diff --git a/.dockerignore b/.dockerignore index 118a67059fde2..601e43c1cbc92 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,22 @@ .git mlruns +docs +apidocs +mlflow.Rcheck outputs +examples +travis +tests +node_modules +coverage +build +npm-debug.log* +yarn-debug.log* +yarn-error.log* __pycache__ .* ~* *.swp *.pyc + + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000000..498bc4c12c839 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,42 @@ +## What changes are proposed in this pull request? + +(Please fill in changes proposed in this fix) + +## How is this patch tested? + +(Details) + +## Release Notes + +### Is this a user-facing change? + +- [ ] No. You can skip the rest of this section. +- [ ] Yes. Give a description of this change to be included in the release notes for MLflow users. + +(Details in 1-2 sentences. You can just refer to another PR with a description if this PR is part of a larger change.) + +### What component(s) does this PR affect? + +- [ ] UI +- [ ] CLI +- [ ] API +- [ ] REST-API +- [ ] Examples +- [ ] Docs +- [ ] Tracking +- [ ] Projects +- [ ] Artifacts +- [ ] Models +- [ ] Scoring +- [ ] Serving +- [ ] R +- [ ] Java +- [ ] Python + +### How should the PR be classified in the release notes? Choose one: + +- [ ] `rn/breaking-change` - The PR will be mentioned in the "Breaking Changes" section +- [ ] `rn/none` - No description will be included. The PR will be mentioned only by the PR number in the "Small Bugfixes and Documentation Updates" section +- [ ] `rn/feature` - A new user-facing feature worth mentioning in the release notes +- [ ] `rn/bug-fix` - A user-facing bug fix worth mentioning in the release notes +- [ ] `rn/documentation` - A user-facing documentation change worth mentioning in the release notes diff --git a/.travis.yml b/.travis.yml index 1d7946b7a9179..71617cbef740a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,33 +1,70 @@ -sudo: true language: python +dist: trusty + services: - docker matrix: include: - - python: 2.7 - - python: 3.6 + - stage: small + - language: python + name: "Lint (Python 3.6)" + python: 3.6 + install: + - INSTALL_SMALL_PYTHON_DEPS=true INSTALL_LARGE_PYTHON_DEPS=true source ./travis/install-common-deps.sh + - pip install -r ./travis/lint-requirements.txt + script: + - ./lint.sh + - language: python + python: 2.7 + install: + - INSTALL_SMALL_PYTHON_DEPS=true source ./travis/install-common-deps.sh + script: + - ./travis/run-small-python-tests.sh + - os: windows + name: "Windows" + language: sh + before_install: + - cinst -y python3 + install: + - export PATH="/c/Python37:/c/Python37/Scripts:$PATH" + - pip install -r dev-requirements.txt + - pip install -r travis/small-requirements.txt + - pip install -e . + script: + - pytest --verbose --ignore=tests/h2o --ignore=tests/keras --ignore=tests/pytorch --ignore=tests/pyfunc --ignore=tests/sagemaker --ignore=tests/sklearn --ignore=tests/spark --ignore=tests/tensorflow --ignore=tests/autologging --ignore tests/azureml --ignore tests/onnx --ignore tests/projects tests - language: r - dist: trusty + name: "R" cache: packages before_install: + # cache packages dramatically decreases build time, but it must not include mlflow + - rm -rf /home/travis/R/Library/mlflow - export NOT_CRAN=true - cd mlflow/R/mlflow - Rscript -e 'install.packages("devtools")' - - Rscript -e 'devtools::install_deps(dependencies = TRUE)' + - Rscript -e 'devtools::install_deps(dependencies = TRUE, upgrade = "always")' - cd ../../.. + install: + - source ./travis/install-common-deps.sh script: - cd mlflow/R/mlflow + # Building the package here populates the /home/travis/R/Library cache, + # and is also used when python forks into R (e.g., rfunc via models CLI). - R CMD build . - R CMD check --no-build-vignettes --no-manual --no-tests mlflow*tar.gz - - cd tests - export LINTR_COMMENT_BOT=false + - cd tests - Rscript ../.travis.R after_success: - export COVR_RUNNING=true - Rscript -e 'covr::codecov()' + after_failure: + - "[ -r /home/travis/build/mlflow/mlflow/mlflow/R/mlflow/mlflow.Rcheck/00check.log ] && cat /home/travis/build/mlflow/mlflow/mlflow/R/mlflow/mlflow.Rcheck/00check.log" - language: java + name: "Java" + install: + - source ./travis/install-common-deps.sh script: - cd mlflow/java - mvn clean package -q @@ -35,70 +72,39 @@ matrix: node_js: - "node" # Use latest NodeJS: https://docs.travis-ci.com/user/languages/javascript-with-nodejs/#specifying-nodejs-versions install: + name: "Node.js" script: - cd mlflow/server/js - npm i - ./lint.sh - npm test -- --coverage + - stage: large + - language: python + python: 2.7 + install: + - INSTALL_LARGE_PYTHON_DEPS=true source ./travis/install-common-deps.sh + script: + - ./travis/run-large-python-tests.sh +# Travis runs an extra top-level job for each build stage - depending on the build stage, we either +# run small or large Python tests below. install: - - sudo mkdir -p /travis-install - - sudo chown travis /travis-install - # (The conda installation steps below are taken from http://conda.pydata.org/docs/travis.html) - # We do this conditionally because it saves us some downloading if the - # version is the same. - - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then - wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O /travis-install/miniconda.sh; + - echo "Build stage $TRAVIS_BUILD_STAGE_NAME" + - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then + echo "skipping this step on windows."; + elif [[ "$TRAVIS_BUILD_STAGE_NAME" == "Small" ]]; then + INSTALL_SMALL_PYTHON_DEPS=true source ./travis/install-common-deps.sh && + wget https://github.com/google/protobuf/releases/download/v3.6.0/protoc-3.6.0-linux-x86_64.zip -O /travis-install/protoc.zip && + sudo unzip /travis-install/protoc.zip -d /usr; else - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /travis-install/miniconda.sh; + INSTALL_LARGE_PYTHON_DEPS=true source ./travis/install-common-deps.sh; fi - - bash /travis-install/miniconda.sh -b -p $HOME/miniconda - - export PATH="$HOME/miniconda/bin:$PATH" - - hash -r - - conda config --set always_yes yes --set changeps1 no - # Useful for debugging any issues with conda - - conda info -a - - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION - - source activate test-environment - - python --version - - pip install --upgrade pip - # Install Python test dependencies only if we're running Python tests - - if [[ ! -z "$TRAVIS_PYTHON_VERSION" ]]; then - travis_wait pip install -r dev-requirements.txt -q; - travis_wait pip install -r test-requirements.txt -q; - fi - - pip install . - - export MLFLOW_HOME=$(pwd) - # Remove boto config present in Travis VMs (https://github.com/travis-ci/travis-ci/issues/7940) - - sudo rm -f /etc/boto.cfg - # Install protoc - - wget https://github.com/google/protobuf/releases/download/v3.6.0/protoc-3.6.0-linux-x86_64.zip -O /travis-install/protoc.zip - - sudo unzip /travis-install/protoc.zip -d /usr script: - - ./lint.sh - - sudo ./test-generate-protos.sh - - pip list - - which mlflow - - echo $MLFLOW_HOME - - SAGEMAKER_OUT=$(mktemp) - - if mlflow sagemaker build-and-push-container --no-push --mlflow-home . > $SAGEMAKER_OUT 2>&1; then - echo "Sagemaker container build succeeded."; + - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then + echo "skipping this step on windows."; + elif [[ "$TRAVIS_BUILD_STAGE_NAME" == "Small" ]]; then + ./travis/run-small-python-tests.sh && ./test-generate-protos.sh; else - echo "Sagemaker container build failed, output:"; - cat $SAGEMAKER_OUT; + ./travis/run-large-python-tests.sh; fi - # Run tests that don't leverage specific ML frameworks - - pytest --cov=mlflow --verbose --large --ignore=tests/h2o --ignore=tests/keras - --ignore=tests/pytorch --ignore=tests/pyfunc--ignore=tests/sagemaker --ignore=tests/sklearn - --ignore=tests/spark --ignore=tests/tensorflow - # Run ML framework tests in their own Python processes. TODO: find a better method of isolating - # tests. - - pytest --verbose tests/h2o --large - - pytest --verbose tests/keras --large - - pytest --verbose tests/pytorch --large - - pytest --verbose tests/pyfunc --large - - pytest --verbose tests/sagemaker --large - - pytest --verbose tests/sklearn --large - - pytest --verbose tests/spark --large - - pytest --verbose tests/tensorflow --large diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c07a71f71e009..8b72031b3e75b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,320 @@ Changelog ========= +1.0 (2019-06-03) +---------------- +MLflow 1.0 includes many significant features and improvements. From this version, MLflow is no longer beta, and all APIs except those marked as experimental are intended to be stable until the next major version. As such, this release includes a number of breaking changes. + +Major features, improvements, and breaking changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Support for recording, querying, and visualizing metrics along a new “step” axis (x coordinate), providing increased flexibility for examining model performance relative to training progress. For example, you can now record performance metrics as a function of the number of training iterations or epochs. MLflow 1.0’s enhanced metrics UI enables you to visualize the change in a metric’s value as a function of its step, augmenting MLflow’s existing UI for plotting a metric’s value as a function of wall-clock time. (#1202, #1237, @dbczumar; #1132, #1142, #1143, @smurching; #1211, #1225, @Zangr; #1372, @stbof) +- Search improvements. MLflow 1.0 includes additional support in both the API and UI for searching runs within a single experiment or a group of experiments. The search filter API supports a simplified version of the ``SQL WHERE`` clause. In addition to searching using run's metrics and params, the API has been enhanced to support a subset of run attributes as well as user and `system tags `_. For details see `Search syntax `_ and `examples for programmatically searching runs `_. (#1245, #1272, #1323, #1326, @mparkhe; #1052, @Zangr; #1363, @aarondav) +- Logging metrics in batches. MLflow 1.0 now has a ``runs/log-batch`` REST API endpoint for logging multiple metrics, params, and tags in a single API request. The endpoint useful for performant logging of multiple metrics at the end of a model training epoch (see `example `_), or logging of many input model parameters at the start of training. You can call this batched-logging endpoint from Python (``mlflow.log_metrics``, ``mlflow.log_params``, ``mlflow.set_tags``), R (``mlflow_log_batch``), and Java (``MlflowClient.logBatch``). (#1214, @dbczumar; see 0.9.1 and 0.9.0 for other changes) +- Windows support for MLflow Tracking. The Tracking portion of the MLflow client is now supported on Windows. (#1171, @eedeleon, @tomasatdatabricks) +- HDFS support for artifacts. Hadoop artifact repository with Kerberos authorization support was added, so you can use HDFS to log and retrieve models and other artifacts. (#1011, @jaroslawk) +- CLI command to build Docker images for serving. Added an ``mlflow models build-docker`` CLI command for building a Docker image capable of serving an MLflow model. The model is served at port 8080 within the container by default. Note that this API is experimental and does not guarantee that the arguments nor format of the Docker container will remain the same. (#1329, @smurching, @tomasatdatabricks) +- New ``onnx`` model flavor for saving, loading, and evaluating ONNX models with MLflow. ONNX flavor APIs are available in the ``mlflow.onnx`` module. (#1127, @avflor, @dbczumar; #1388, @dbczumar) +- Major breaking changes: + + - Some of the breaking changes involve database schema changes in the SQLAlchemy tracking store. If your database instance's schema is not up-to-date, MLflow will issue an error at the start-up of ``mlflow server`` or ``mlflow ui``. To migrate an existing database to the newest schema, you can use the ``mlflow db upgrade`` CLI command. (#1155, #1371, @smurching; #1360, @aarondav) + - [Installation] The MLflow Python package no longer depends on ``scikit-learn``, ``mleap``, or ``boto3``. If you want to use the ``scikit-learn`` support, the ``MLeap`` support, or ``s3`` artifact repository / ``sagemaker`` support, you will have to install these respective dependencies explicitly. (#1223, @aarondav) + - [Artifacts] In the Models API, an artifact's location is now represented as a URI. See the `documentation `_ for the list of accepted URIs. (#1190, #1254, @dbczumar; #1174, @dbczumar, @sueann; #1206, @tomasatdatabricks; #1253, @stbof) + + - The affected methods are: + + - Python: ``.load_model``, ``azureml.build_image``, ``sagemaker.deploy``, ``sagemaker.run_local``, ``pyfunc._load_model_env``, ``pyfunc.load_pyfunc``, and ``pyfunc.spark_udf`` + - R: ``mlflow_load_model``, ``mlflow_rfunc_predict``, ``mlflow_rfunc_serve`` + - CLI: ``mlflow models serve``, ``mlflow models predict``, ``mlflow sagemaker``, ``mlflow azureml`` (with the new ``--model-uri`` option) + + - To allow referring to artifacts in the context of a run, MLflow introduces a new URI scheme of the form ``runs://relative/path/to/artifact``. (#1169, #1175, @sueann) + + - [CLI] ``mlflow pyfunc`` and ``mlflow rfunc`` commands have been unified as ``mlflow models`` (#1257, @tomasatdatabricks; #1321, @dbczumar) + - [CLI] ``mlflow artifacts download``, ``mlflow artifacts download-from-uri`` and ``mlflow download`` commands have been consolidated into ``mlflow artifacts download`` (#1233, @sueann) + - [Runs] Expose ``RunData`` fields (``metrics``, ``params``, ``tags``) as dictionaries. Note that the ``mlflow.entities.RunData`` constructor still accepts lists of ``metric``/``param``/``tag`` entities. (#1078, @smurching) + - [Runs] Rename ``run_uuid`` to ``run_id`` in Python, Java, and REST API. Where necessary, MLflow will continue to accept ``run_uuid`` until MLflow 1.1. (#1187, @aarondav) + +Other breaking changes +~~~~~~~~~~~~~~~~~~~~~~ + +CLI: + +- The ``--file-store`` option is deprecated in ``mlflow server`` and ``mlflow ui`` commands. (#1196, @smurching) +- The ``--host`` and ``--gunicorn-opts`` options are removed in the ``mlflow ui`` command. (#1267, @aarondav) +- Arguments to ``mlflow experiments`` subcommands, notably ``--experiment-name`` and ``--experiment-id`` are now options (#1235, @sueann) +- ``mlflow sagemaker list-flavors`` has been removed (#1233, @sueann) + +Tracking: + +- The ``user`` property of ``Run``s has been moved to tags (similarly, the ``run_name``, ``source_type``, ``source_name`` properties were moved to tags in 0.9.0). (#1230, @acroz; #1275, #1276, @aarondav) +- In R, the return values of experiment CRUD APIs have been updated to more closely match the REST API. In particular, ``mlflow_create_experiment`` now returns a string experiment ID instead of an experiment, and the other APIs return NULL. (#1246, @smurching) +- ``RunInfo.status``'s type is now string. (#1264, @mparkhe) +- Remove deprecated ``RunInfo`` properties from ``start_run``. (#1220, @aarondav) +- As deprecated in 0.9.1 and before, the ``RunInfo`` fields ``run_name``, ``source_name``, ``source_version``, ``source_type``, and ``entry_point_name`` and the ``SearchRuns`` field ``anded_expressions`` have been removed from the REST API and Python, Java, and R tracking client APIs. They are still available as tags, documented in the REST API documentation. (#1188, @aarondav) + +Models and deployment: + +- In Python, require arguments as keywords in ``log_model``, ``save_model`` and ``add_to_model`` methods in the ``tensorflow`` and ``mleap`` modules to avoid breaking changes in the future (#1226, @sueann) +- Remove the unsupported ``jars`` argument from ```spark.log_model`` in Python (#1222, @sueann) +- Introduce ``pyfunc.load_model`` to be consistent with other Models modules. ``pyfunc.load_pyfunc`` will be deprecated in the near future. (#1222, @sueann) +- Rename ``dst_path`` parameter in ``pyfunc.save_model`` to ``path`` (#1221, @aarondav) +- R flavors refactor (#1299, @kevinykuo) + + - ``mlflow_predict()`` has been added in favor of ``mlflow_predict_model()`` and ``mlflow_predict_flavor()`` which have been removed. + - ``mlflow_save_model()`` is now a generic and ``mlflow_save_flavor()`` is no longer needed and has been removed. + - ``mlflow_predict()`` takes ``...`` to pass to underlying predict methods. + - ``mlflow_load_flavor()`` now has the signature ``function(flavor, model_path)`` and flavor authors should implement ``mlflow_load_flavor.mlflow_flavor_{FLAVORNAME}``. The flavor argument is inferred from the inputs of user-facing ``mlflow_load_model()`` and does not need to be explicitly provided by the user. + +Projects: + +- Remove and rename some ``projects.run`` parameters for generality and consistency. (#1222, @sueann) +- In R, the ``mlflow_run`` API for running MLflow projects has been modified to more closely reflect the Python ``mlflow.run`` API. In particular, the order of the ``uri`` and ``entry_point`` arguments has been reversed and the ``param_list`` argument has been renamed to ``parameters``. (#1265, @smurching) + +R: + +- Remove ``mlflow_snapshot`` and ``mlflow_restore_snapshot`` APIs. Also, the ``r_dependencies`` argument used to specify the path to a packrat r-dependencies.txt file has been removed from all APIs. (#1263, @smurching) +- The ``mlflow_cli`` and ``crate`` APIs are now private. (#1246, @smurching) + +Environment variables: + +- Prefix environment variables with "MLFLOW_" (#1268, @aarondav). Affected variables are: + + - [Tracking] ``_MLFLOW_SERVER_FILE_STORE``, ``_MLFLOW_SERVER_ARTIFACT_ROOT``, ``_MLFLOW_STATIC_PREFIX`` + - [SageMaker] ``MLFLOW_SAGEMAKER_DEPLOY_IMG_URL``, ``MLFLOW_DEPLOYMENT_FLAVOR_NAME`` + - [Scoring] ``MLFLOW_SCORING_SERVER_MIN_THREADS``, ``MLFLOW_SCORING_SERVER_MAX_THREADS`` + +More features and improvements +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- [Tracking] Non-default driver support for SQLAlchemy backends: ``db+driver`` is now a valid tracking backend URI scheme (#1297, @drewmcdonald; #1374, @mparkhe) +- [Tracking] Validate backend store URI before starting tracking server (#1218, @luke-zhu, @sueann) +- [Tracking] Add ``GetMetricHistory`` client API in Python and Java corresponding to the REST API. (#1178, @smurching) +- [Tracking] Add ``view_type`` argument to ``MlflowClient.list_experiments()`` in Python. (#1212, @smurching) +- [Tracking] Dictionary values provided to ``mlflow.log_params`` and ``mlflow.set_tags`` in Python can now be non-string types (e.g., numbers), and they are automatically converted to strings. (#1364, @aarondav) +- [Tracking] R API additions to be at parity with REST API and Python (#1122, @kevinykuo) +- [Tracking] Limit number of results returned from ``SearchRuns`` API and UI for faster load (#1125, @mparkhe; #1154, @andrewmchen) +- [Artifacts] To avoid having many copies of large model files in serving, ``ArtifactRepository.download_artifacts`` no longer copies local artifacts (#1307, @andrewmchen; #1383, @dbczumar) +- [Artifacts][Projects] Support GCS in download utilities. ``gs://bucket/path`` files are now supported by the ``mlflow artifacts download`` CLI command and as parameters of type ``path`` in MLProject files. (#1168, @drewmcdonald) +- [Models] All Python models exported by MLflow now declare ``mlflow`` as a dependency by default. In addition, we introduce a flag ``--install-mlflow`` users can pass to ``mlflow models serve`` and ``mlflow models predict`` methods to force installation of the latest version of MLflow into the model's environment. (#1308, @tomasatdatabricks) +- [Models] Update model flavors to lazily import dependencies in Python. Modules that define Model flavors now import extra dependencies such as ``tensorflow``, ``scikit-learn``, and ``pytorch`` inside individual _methods_, ensuring that these modules can be imported and explored even if the dependencies have not been installed on your system. Also, the ``DEFAULT_CONDA_ENVIRONMENT`` module variable has been replaced with a ``get_default_conda_env()`` function for each flavor. +- [Models] It is now possible to pass extra arguments to ``mlflow.keras.load_model`` that will be passed through to ``keras.load_model``. (#1330, @@yorickvP) +- [Serving] For better performance, switch to ``gunicorn`` for serving Python models. This does not change the user interface. (#1322, @tomasatdatabricks) +- [Deployment] For SageMaker, use the uniquely-generated model name as the S3 bucket prefix instead of requiring one. (#1183, @dbczumar) +- [REST API] Add support for API paths without the ``preview`` component. The ``preview`` paths will be deprecated in a future version of MLflow. (#1236, @mparkhe) + +Bug fixes and documentation updates +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- [Tracking] Log metric timestamps in milliseconds by default (#1177, @smurching; #1333, @dbczumar) +- [Tracking] Fix bug when deserializing integer experiment ID for runs in ``SQLAlchemyStore`` (#1167, @smurching) +- [Tracking] Ensure unique constraint names in MLflow tracking database (#1292, @smurching) +- [Tracking] Fix base64 encoding for basic auth in R tracking client (#1126, @freefrag) +- [Tracking] Correctly handle ``file:`` URIs for the ``-—backend-store-uri`` option in ``mlflow server`` and ``mlflow ui`` CLI commands (#1171, @eedeleon, @tomasatdatabricks) +- [Artifacts] Update artifact repository download methods to return absolute paths (#1179, @dbczumar) +- [Artifacts] Make FileStore respect the default artifact location (#1332, @dbczumar) +- [Artifacts] Fix ``log_artifact`` failures due to existing directory on FTP server (#1327, @kafendt) +- [Artifacts] Fix GCS artifact logging of subdirectories (#1285, @jason-huling) +- [Projects] Fix bug not sharing ``SQLite`` database file with Docker container (#1347, @tomasatdatabricks; #1375, @aarondav) +- [Java] Mark ``sendPost`` and ``sendGet`` as experimental (#1186, @aarondav) +- [Python][CLI] Mark ``azureml.build_image`` as experimental (#1222, #1233 @sueann) +- [Docs] Document public MLflow environment variables (#1343, @aarondav) +- [Docs] Document MLflow system tags for runs (#1342, @aarondav) +- [Docs] Autogenerate CLI documentation to include subcommands and descriptions (#1231, @sueann) +- [Docs] Update run selection description in ``mlflow_get_run`` in R documentation (#1258, @dbczumar) +- [Examples] Update examples to reflect API changes (#1361, @tomasatdatabricks; #1367, @mparkhe) + +Small bug fixes and doc updates (#1359, #1350, #1331, #1301, #1270, #1271, #1180, #1144, #1135, #1131, #1358, #1369, #1368, #1387, @aarondav; #1373, @akarloff; #1287, #1344, #1309, @stbof; #1312, @hchiuzhuo; #1348, #1349, #1294, #1227, #1384, @tomasatdatabricks; #1345, @withsmilo; #1316, @ancasarb; #1313, #1310, #1305, #1289, #1256, #1124, #1097, #1162, #1163, #1137, #1351, @smurching; #1319, #1244, #1224, #1195, #1194, #1328, @dbczumar; #1213, #1200, @Kublai-Jing; #1304, #1320, @andrewmchen; #1311, @Zangr; #1306, #1293, #1147, @mateiz; #1303, @gliptak; #1261, #1192, @eedeleon; #1273, #1259, @kevinykuo; #1277, #1247, #1243, #1182, #1376, @mparkhe; #1210, @vgod-dbx; #1199, @ashtuchkin; #1176, #1138, #1365, @sueann; #1157, @cclauss; #1156, @clemens-db; #1152, @pogil; #1146, @srowen; #875, #1251, @jimthompson5802) + + +0.9.1 (2019-04-21) +------------------ +MLflow 0.9.1 is a patch release on top of 0.9.0 containing mostly bug fixes and internal improvements. We have also included a one breaking API change in preparation for additions in MLflow 1.0 and later. This release also includes significant improvements to the Search API. + +Breaking changes: + +- [Tracking] Generalized experiment_id to string (from a long) to be more permissive of different ID types in different backend stores. While breaking for the REST API, this change is backwards compatible for python and R clients. (#1067, #1034 @eedeleon) + +More features and improvements: + +- [Search][API] Moving search filters into a query string based syntax, with Java client, Python client, and UI support. This also improves quote, period, and special character handling in query strings and adds the ability to search on tags in filter string. (#1042, #1055, #1063, #1068, #1099, #1106 @mparkhe; #1025 @andrewmchen; #1060 @smurching) +- [Tracking] Limits and validations to batch-logging APIs in OSS server (#958 @smurching) +- [Tracking][Java] Java client API for batch-logging (#1081 @mparkhe) +- [Tracking] Improved consistency of handling multiple metric values per timestamp across tracking stores (#972, #999 @dbczumar) + +Bug fixes and documentation updates: + +- [Tracking][Python] Reintroduces the parent_run_id argument to MlflowClient.create_run. This API is planned for removal in MLflow 1.0 (#1137 @smurching) +- [Tracking][Python] Provide default implementations of AbstractStore log methods (#1051 @acroz) +- [R] (Released on CRAN as MLflow 0.9.0.1) Small bug fixes with R (#1123 @smurching; #1045, #1017, #1019, #1039, #1048, #1098, #1101, #1107, #1108, #1119 @tomasatdatabricks) + +Small bug fixes and doc updates (#1024, #1029 @bayethiernodiop; #1075 @avflor; #968, #1010, #1070, #1091, #1092 @smurching; #1004, #1085 @dbczumar; #1033, #1046 @sueann; #1053 @tomasatdatabricks; #987 @hanyucui; #935, #941 @jimthompson5802; #963 @amilbourne; #1016 @andrewmchen; #991 @jaroslawk; #1007 @mparkhe) + + +0.9.0.1 (2019-04-09) +-------------------- +Bugfix release (PyPI only) with the following changes: + +- Rebuilt MLflow JS assets to fix an issue where form input was broken in MLflow 0.9.0 (identified + in #1056, #1113 by @shu-yusa, @timothyjlaurent) + + +0.9.0 (2019-03-13) +------------------ + +Major features: + +- Support for running MLflow Projects in Docker containers. This allows you to include non-Python dependencies in their project environments and provides stronger isolation when running projects. See the `Projects documentation `_ for more information. (#555, @marcusrehm; #819, @mparkhe; #970, @dbczumar) +- Database stores for the MLflow Tracking Server. Support for a scalable and performant backend store was one of the top community requests. This feature enables you to connect to local or remote SQLAlchemy-compatible databases (currently supported flavors include MySQL, PostgreSQL, SQLite, and MS SQL) and is compatible with file backed store. See the `Tracking Store documentation `_ for more information. (#756, @AndersonReyes; #800, #844, #847, #848, #860, #868, #975, @mparkhe; #980, @dbczumar) +- Simplified custom Python model packaging. You can easily include custom preprocessing and postprocessing logic, as well as data dependencies in models with the ``python_function`` flavor using updated ``mlflow.pyfunc`` Python APIs. For more information, see the `Custom Python Models documentation `_. (#791, #792, #793, #830, #910, @dbczumar) +- Plugin systems allowing third party libraries to extend MLflow functionality. The `proposal document `_ gives the full detail of the three main changes: + + - You can register additional providers of tracking stores using the ``mlflow.tracking_store`` entrypoint. (#881, @zblz) + - You can register additional providers of artifact repositories using the ``mlflow.artifact_repository`` entrypoint. (#882, @mociarain) + - The logic generating run metadata from the run context (e.g. ``source_name``, ``source_version``) has been refactored into an extendable system of run context providers. Plugins can register additional providers using the ``mlflow.run_context_provider`` entrypoint, which add to or overwrite tags set by the base library. (#913, #926, #930, #978, @acroz) + +- Support for HTTP authentication to the Tracking Server in the R client. Now you can connect to secure Tracking Servers using credentials set in environment variables, or provide custom plugins for setting the credentials. As an example, this release contains a Databricks plugin that can detect existing Databricks credentials to allow you to connect to the Databricks Tracking Server. (#938, #959, #992, @tomasatdatabricks) + + +Breaking changes: + +- [Scoring] The ``pyfunc`` scoring server now expects requests with the ``application/json`` content type to contain json-serialized pandas dataframes in the split format, rather than the records format. See the `documentation on deployment `_ for more detail. (#960, @dbczumar) Also, when reading the pandas dataframes from JSON, the scoring server no longer automatically infers data types as it can result in unintentional conversion of data types (#916, @mparkhe). +- [API] Remove ``GetMetric`` & ``GetParam`` from the REST API as they are subsumed by ``GetRun``. (#879, @aarondav) + + +More features and improvements: + +- [UI] Add a button for downloading artifacts (#967, @mateiz) +- [CLI] Add CLI commands for runs: now you can ``list``, ``delete``, ``restore``, and ``describe`` runs through the CLI (#720, @DorIndivo) +- [CLI] The ``run`` command now can take ``--experiment-name`` as an argument, as an alternative to the ``--experiment-id`` argument. You can also choose to set the ``_EXPERIMENT_NAME_ENV_VAR`` environment variable instead of passing in the value explicitly. (#889, #894, @mparkhe) +- [Examples] Add Image classification example with Keras. (#743, @tomasatdatabricks ) +- [Artifacts] Add ``get_artifact_uri()`` and ``_download_artifact_from_uri`` convenience functions (#779) +- [Artifacts] Allow writing Spark models directly to the target artifact store when possible (#808, @smurching) +- [Models] PyTorch model persistence improvements to allow persisting definitions and dependencies outside the immediate scope: + - Add a ``code_paths`` parameter to ``mlflow.pytorch.save_model`` and ``mlflow.pytorch.log_model`` to allow external module dependencies to be specified as paths to python files. (#842, @dbczumar) + - Improve ``mlflow.pytorch.save_model`` to capture class definitions from notebooks and the ``__main__`` scope (#851, #861, @dbczumar) +- [Runs][R] Allow client to infer context info when creating new run in fluent API (#958, @tomasatdatabricks) +- [Runs][UI] Support Git Commit hyperlink for Gitlab and Bitbucket. Previously the clickable hyperlink was generated only for Github pages. (#901) +- [Search][API] Allow param value to have any content, not just alphanumeric characters, ``.``, and ``-`` (#788, @mparkhe) +- [Search][API] Support "filter" string in the ``SearchRuns`` API. Corresponding UI improvements are planned for the future (#905, @mparkhe) +- [Logging] Basic support for LogBatch. NOTE: The feature is currently experimental and the behavior is expected to change in the near future. (#950, #951, #955, #1001, @smurching) + + +Bug fixes and documentation updates: + +- [Artifacts] Fix empty-file upload to DBFS in ``log_artifact`` and ``log_artifacts`` (#895, #818, @smurching) +- [Artifacts] S3 artifact store: fix path resolution error when artifact root is bucket root (#928, @dbczumar) +- [UI] Fix a bug with Databricks notebook URL links (#891, @smurching) +- [Export] Fix for missing run name in csv export (#864, @jimthompson5802) +- [Example] Correct missing tensorboardX module error in PyTorch example when running in MLflow Docker container (#809, @jimthompson5802) +- [Scoring][R] Fix local serving of rfunc models (#874, @kevinykuo) +- [Docs] Improve flavor-specific documentation in Models documentation (#909, @dbczumar) + +Small bug fixes and doc updates (#822, #899, #787, #785, #780, #942, @hanyucui; #862, #904, #954, #806, #857, #845, @stbof; #907, #872, @smurching; #896, #858, #836, #859, #923, #939, #933, #931, #952, @dbczumar; #880, @zblz; #876, @acroz; #827, #812, #816, #829, @jimthompson5802; #837, #790, #897, #974, #900, @mparkhe; #831, #798, @aarondav; #814, @sueann; #824, #912, @mateiz; #922, #947, @tomasatdatabricks; #795, @KevYuen; #676, @mlaradji; #906, @4n4nd; #777, @tmielika; #804, @alkersan) + + +0.8.2 (2019-01-28) +------------------ + +MLflow 0.8.2 is a patch release on top of 0.8.1 containing only bug fixes and no breaking changes or features. + +Bug fixes: + +- [Python API] CloudPickle has been added to the set of MLflow library dependencies, fixing missing import errors when attempting to save models (#777, @tmielika) +- [Python API] Fixed a malformed logging call that prevented ``mlflow.sagemaker.push_image_to_ecr()`` invocations from succeeding (#784, @jackblandin) +- [Models] PyTorch models can now be saved with code dependencies, allowing model classes to be loaded successfully in new environments (#842, #836, @dbczumar) +- [Artifacts] Fixed a timeout when logging zero-length files to DBFS artifact stores (#818, @smurching) + +Small docs updates (#845, @stbof; #840, @grahamhealy20; #839, @wilderrodrigues) + + +0.8.1 (2018-12-21) +------------------ + +MLflow 0.8.1 introduces several significant improvements: + +- Improved UI responsiveness and load time, especially when displaying experiments containing hundreds to thousands of runs. +- Improved visualizations, including interactive scatter plots for MLflow run comparisons +- Expanded support for scoring Python models as Spark UDFs. For more information, see the `updated documentation for this feature `_. +- By default, saved models will now include a Conda environment specifying all of the dependencies necessary for loading them in a new environment. + +Features: + +- [API/CLI] Support for running MLflow projects from ZIP files (#759, @jmorefieldexpe) +- [Python API] Support for passing model conda environments as dictionaries to ``save_model`` and ``log_model`` functions (#748, @dbczumar) +- [Models] Default Anaconda environments have been added to many Python model flavors. By default, models produced by `save_model` and `log_model` functions will include an environment that specifies all of the versioned dependencies necessary to load and serve the models. Previously, users had to specify these environments manually. (#705, #707, #708, #749, @dbczumar) +- [Scoring] Support for synchronous deployment of models to SageMaker (#717, @dbczumar) +- [Tracking] Include the Git repository URL as a tag when tracking an MLflow run within a Git repository (#741, @whiletruelearn, @mateiz) +- [UI] Improved runs UI performance by using a react-virtualized table to optimize row rendering (#765, #762, #745, @smurching) +- [UI] Significant performance improvements for rendering run metrics, tags, and parameter information (#764, #747, @smurching) +- [UI] Scatter plots, including run comparsion plots, are now interactive (#737, @mateiz) +- [UI] Extended CSRF support by allowing the MLflow UI server to specify a set of expected headers that clients should set when making AJAX requests (#733, @aarondav) + +Bug fixes and documentation updates: + +- [Python/Scoring] MLflow Python models that produce Pandas DataFrames can now be evaluated as Spark UDFs correctly. Spark UDF outputs containing multiple columns of primitive types are now supported (#719, @tomasatdatabricks) +- [Scoring] Fixed a serialization error that prevented models served with Azure ML from returning Pandas DataFrames (#754, @dbczumar) +- [Docs] New example demonstrating how the MLflow REST API can be used to create experiments and log run information (#750, kjahan) +- [Docs] R documentation has been updated for clarity and style consistency (#683, @stbof) +- [Docs] Added clarification about user setup requirements for executing remote MLflow runs on Databricks (#736, @andyk) + +Small bug fixes and doc updates (#768, #715, @smurching; #728, dodysw; #730, mshr-h; #725, @kryptec; #769, #721, @dbczumar; #714, @stbof) + + +0.8.0 (2018-11-08) +----------------- + +MLflow 0.8.0 introduces several major features: + +- Dramatically improved UI for comparing experiment run results: + + - Metrics and parameters are by default grouped into a single column, to avoid an explosion of mostly-empty columns. Individual metrics and parameters can be moved into their own column to help compare across rows. + - Runs that are "nested" inside other runs (e.g., as part of a hyperparameter search or multistep workflow) now show up grouped by their parent run, and can be expanded or collapsed altogether. Runs can be nested by calling ``mlflow.start_run`` or ``mlflow.run`` while already within a run. + - Run names (as opposed to automatically generated run UUIDs) now show up instead of the run ID, making comparing runs in graphs easier. + - The state of the run results table, including filters, sorting, and expanded rows, is persisted in browser local storage, making it easier to go back and forth between an individual run view and the table. + +- Support for deploying models as Docker containers directly to Azure Machine Learning Service Workspace (as opposed to the previously-recommended solution of Azure ML Workbench). + + +Breaking changes: + +- [CLI] ``mlflow sklearn serve`` has been removed in favor of ``mlflow pyfunc serve``, which takes the same arguments but works against any pyfunc model (#690, @dbczumar) + + +Features: + +- [Scoring] pyfunc server and SageMaker now support the pandas "split" JSON format in addition to the "records" format. The split format allows the client to specify the order of columns, which is necessary for some model formats. We recommend switching client code over to use this new format (by sending the Content-Type header ``application/json; format=pandas-split``), as it will become the default JSON format in MLflow 0.9.0. (#690, @dbczumar) +- [UI] Add compact experiment view (#546, #620, #662, #665, @smurching) +- [UI] Add support for viewing & tracking nested runs in experiment view (#588, @andrewmchen; #618, #619, @aarondav) +- [UI] Persist experiments view filters and sorting in browser local storage (#687, @smurching) +- [UI] Show run name instead of run ID when present (#476, @smurching) +- [Scoring] Support for deploying Models directly to Azure Machine Learning Service Workspace (#631, @dbczumar) +- [Server/Python/Java] Add ``rename_experiment`` to Tracking API (#570, @aarondav) +- [Server] Add ``get_experiment_by_name`` to RestStore (#592, @dmarkhas) +- [Server] Allow passing gunicorn options when starting mlflow server (#626, @mparkhe) +- [Python] Cloudpickle support for sklearn serialization (#653, @dbczumar) +- [Artifacts] FTP artifactory store added (#287, @Shenggan) + + +Bug fixes and documentation updates: + +- [Python] Update TensorFlow integration to match API provided by other flavors (#612, @dbczumar; #670, @mlaradji) +- [Python] Support for TensorFlow 1.12 (#692, @smurching) +- [R] Explicitly loading Keras module at predict time no longer required (#586, @kevinykuo) +- [R] pyfunc serve can correctly load models saved with the R Keras support (#634, @tomasatdatabricks) +- [R] Increase network timeout of calls to the RestStore from 1 second to 60 seconds (#704, @aarondav) +- [Server] Improve errors returned by RestStore (#582, @andrewmchen; #560, @smurching) +- [Server] Deleting the default experiment no longer causes it to be immediately recreated (#604, @andrewmchen; #641, @schipiga) +- [Server] Azure Blob Storage artifact repo supports Windows paths (#642, @marcusrehm) +- [Server] Improve behavior when environment and run files are corrupted (#632, #654, #661, @mparkhe) +- [UI] Improve error page when viewing nonexistent runs or views (#600, @andrewmchen; #560, @andrewmchen) +- [UI] UI no longer throws an error if all experiments are deleted (#605, @andrewmchen) +- [Docs] Include diagram of workflow for multistep example (#581, @dennyglee) +- [Docs] Add reference tags and R and Java APIs to tracking documentation (#514, @stbof) +- [Docs/R] Use CRAN installation (#686, @javierluraschi) + +Small bug fixes and doc updates (#576, #594, @javierluraschi; #585, @kevinykuo; #593, #601, #611, #650, #669, #671, #679, @dbczumar; #607, @suzil; #583, #615, @andrewmchen; #622, #681, @aarondav; #625, @pogil; #589, @tomasatdatabricks; #529, #635, #684, @stbof; #657, @mvsusp; #682, @mateiz; #678, vfdev-5; #596, @yutannihilation; #663, @smurching) + + 0.7.0 (2018-10-01) ----------------- @@ -97,7 +411,7 @@ Bug fixes and documentation updates: - [Python] ``log_artifact`` in FileStore now requires a relative path as the artifact path (#439, @mparkhe) - [Python] Fixed string representation of Python entities, so they now display both their type and serialized fields (#371, @smurching) - [UI] Entry point name is now shown in MLflow UI (#345, @aarondav) -- [Models] Keras model export now includes Tensorflow graph explicitly to ensure the model can always be loaded at deployment time (#440, @tomasatdatabricks) +- [Models] Keras model export now includes TensorFlow graph explicitly to ensure the model can always be loaded at deployment time (#440, @tomasatdatabricks) - [Python] Fixed issue where FileStore ignored provided Run Name (#358, @adrian555) - [Python] Fixed an issue where any ``mlflow run`` failing printed an extraneous exception (#365, @smurching) - [Python] uuid dependency removed (#351, @antonpaquin) @@ -295,7 +609,7 @@ This is a patch release fixing some smaller issues after the 0.2.0 release. - ``--artifact-root`` to allow storing artifacts at a remote location, S3 only right now (#78, @mateiz) - Server now runs behind gunicorn to allow concurrent requests to be made (#61, @mateiz) -- Tensorflow integration: we now support logging Tensorflow Models directly in the log_model API, model format, and serving APIs (#28, @juntai-zheng) +- TensorFlow integration: we now support logging TensorFlow Models directly in the log_model API, model format, and serving APIs (#28, @juntai-zheng) - Added ``experiments.list_experiments`` as part of experiments API (#37, @mparkhe) - Improved support for unicode strings (#79, @smurching) - Diabetes progression example dataset and training code (#56, @dennyglee) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 60af139572832..638b7bfd94564 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -3,8 +3,6 @@ Contributing to MLflow We welcome community contributions to MLflow. This page describes how to develop/test your changes to MLflow locally. -Python ------- The majority of the MLflow codebase is in Python. This includes the CLI, Tracking Server, Artifact Repositories (e.g., S3 or Azure Blob Storage backends), and of course the Python fluent, tracking, and model APIs. @@ -13,27 +11,40 @@ tracking, and model APIs. Prerequisites ~~~~~~~~~~~~~ -We recommend installing MLflow in its own conda environment for development, as follows: +First, install the Python MLflow package from source - this is required for developing & testing +changes across all languages and APIs. We recommend installing MLflow in its own conda environment +by running the following from your checkout of MLflow: -.. code:: bash +.. code-block:: bash - conda create --name mlflow-dev-env + conda create --name mlflow-dev-env python=3.6 source activate mlflow-dev-env pip install -r dev-requirements.txt pip install -r test-requirements.txt pip install -e . # installs mlflow from current checkout +Ensure `Docker `_ is installed. -``npm`` is required to run the Javascript dev server. +``npm`` is required to run the Javascript dev server and the tracking UI. You can verify that ``npm`` is on the PATH by running ``npm -v``, and `install npm `_ if needed. +If contributing to MLflow's R APIs, install `R `_. For changes to R +documentation, also install `pandoc `_ 2.2.1 or above, +verifying the version of your installation via ``pandoc --version``. If using Mac OSX, note that +the homebrew installation of pandoc may be out of date - you can find newer pandoc versions at +https://github.com/jgm/pandoc/releases. + +If contributing to MLflow's Java APIs or modifying Java documentation, +install `Java `_ and `Apache Maven `_. + + Install Node Modules ~~~~~~~~~~~~~~~~~~~~ Before running the Javascript dev server or building a distributable wheel, install Javascript dependencies via: -.. code:: bash +.. code-block:: bash cd mlflow/server/js npm install @@ -55,11 +66,15 @@ the Python package in a conda environment as described above. the Java 8 JDK (or above), and `download `_ and `install `_ Maven. You can then build and run tests via: -.. code:: bash +.. code-block:: bash cd mlflow/java mvn compile test +If opening a PR that makes API changes, please regenerate API documentation as described in +`Writing Docs`_ and commit the updated docs to your PR branch. + + R - @@ -67,7 +82,7 @@ The ``mlflow/R/mlflow`` directory contains R wrappers for the Projects, Tracking components. These wrappers depend on the Python package, so first install the Python package in a conda environment: -.. code:: bash +.. code-block:: bash # Note that we don't pass the -e flag to pip, as the R tests attempt to run the MLflow UI # via the CLI, which will not work if we run against the development tracking server @@ -77,7 +92,7 @@ the Python package in a conda environment: `Install R `_, then run the following to install dependencies for building MLflow locally: -.. code:: bash +.. code-block:: bash cd mlflow/R/mlflow NOT_CRAN=true Rscript -e 'install.packages("devtools", repos = "https://cloud.r-project.org")' @@ -85,13 +100,13 @@ building MLflow locally: Build the R client via: -.. code:: bash +.. code-block:: bash R CMD build . Run tests: -.. code:: bash +.. code-block:: bash R CMD check --no-build-vignettes --no-manual --no-tests mlflow*tar.gz cd tests @@ -100,20 +115,95 @@ Run tests: Run linter: -.. code:: bash +.. code-block:: bash Rscript -e 'lintr::lint_package()' +If opening a PR that makes API changes, please regenerate API documentation as described in +`Writing Docs`_ and commit the updated docs to your PR branch. + When developing, you can make Python changes available in R by running (from mlflow/R/mlflow): -.. code:: bash +.. code-block:: bash Rscript -e 'reticulate::conda_install("r-mlflow", "../../../.", pip = TRUE)' Please also follow the recommendations from the `Advanced R - Style Guide `_ regarding naming and styling. +Python +------ +Verify that the unit tests & linter pass before submitting a pull request by running: + +.. code-block:: bash + + ./lint.sh + ./travis/run-small-python-tests.sh + # Optionally, run large tests as well. Travis will run large tests on your pull request once + # small tests pass. Note: models and model deployment tests are considered "large" tests. If + # making changes to these components, we recommend running the relevant tests (e.g. tests under + # tests/keras for changes to Keras model support) locally before submitting a pull request. + ./travis/run-large-python-tests.sh + +Python tests are split into "small" & "large" categories, with new tests falling into the "small" +category by default. Tests that take 10 or more seconds to run should be marked as large tests +via the @pytest.mark.large annotation. Dependencies for small and large tests can be added to +travis/small-requirements.txt and travis/large-requirements.txt, respectively. + +We use `pytest `_ to run Python tests. +You can run tests for one or more test directories or files via +``pytest [--large] [file_or_dir] ... [file_or_dir]``, where specifying ``--large`` tells pytest to +run tests annotated with @pytest.mark.large. For example, to run all pyfunc tests +(including large tests), you can run: + +.. code-block:: bash + + pytest tests/pyfunc --large + +Note: Certain model tests are not well-isolated (can result in OOMs when run in the same Python +process), so simply invoking ``pytest`` or ``pytest tests`` may not work. If you'd like to +run multiple model tests, we recommend doing so via separate ``pytest`` invocations, e.g. +``pytest --verbose tests/sklearn --large && pytest --verbose tests/tensorflow --large`` + +Note also that some tests do not run as part of PR builds on Travis. In particular, PR builds +exclude: + + - Tests marked with @pytest.mark.requires_ssh. These tests require that passwordless SSH access to + localhost be enabled, and can be run via ``pytest --requires-ssh``. + - Tests marked with @pytest.mark.release. These tests can be run via ``pytest --release``. + +If opening a PR that changes or adds new APIs, please update or add Python documentation as +described in `Writing Docs`_ and commit the docs to your PR branch. + + +Building Protobuf Files +----------------------- +To build protobuf files, simply run ``generate-protos.sh``. The required ``protoc`` version is ``3.6.0``. +Verify that .proto files and autogenerated code are in sync by running ``./test-generate-protos.sh.`` + + +Database Schema Changes +----------------------- +MLflow's Tracking component supports storing experiment and run data in a SQL backend. To +make changes to the tracking database schema, run the following from your +checkout of MLflow: + +.. code-block:: bash + + # starting at the root of the project + $ pwd + ~/mlflow + $ cd mlflow + # MLflow relies on Alembic (https://alembic.sqlalchemy.org) for schema migrations. + $ alembic -c mlflow/store/db_migrations/alembic.ini revision -m "add new field to db" + Generating ~/mlflow/mlflow/store/db_migrations/versions/b446d3984cfa_add_new_field_to_db.py + + +These commands generate a new migration script (e.g. at +``~/mlflow/mlflow/alembic/versions/12341123_add_new_field_to_db.py``) that you should then edit to add +migration logic. + Launching the Development UI ---------------------------- @@ -123,33 +213,19 @@ Alternatively, you can generate the necessary files in ``mlflow/server/js/build` `Building a Distributable Artifact`_. -Tests and Lint --------------- -Verify that the unit tests & linter pass before submitting a pull request by running: - -.. code:: bash - - pytest - ./lint.sh - -When running ``pytest --requires-ssh`` it is necessary that passwordless SSH access to localhost -is available. This can be achieved by adding the SSH public key to authorized keys: -``cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys``. - - Running the Javascript Dev Server --------------------------------- `Install Node Modules`_, then run the following: In one shell: -.. code:: bash +.. code-block:: bash mlflow ui In another shell: -.. code:: bash +.. code-block:: bash cd mlflow/server/js npm start @@ -162,28 +238,61 @@ Building a Distributable Artifact Generate JS files in ``mlflow/server/js/build``: -.. code:: bash +.. code-block:: bash cd mlflow/server/js npm run build Build a pip-installable wheel in ``dist/``: -.. code:: bash +.. code-block:: bash cd - python setup.py bdist_wheel -Building Protobuf Files ------------------------ -To build protobuf files, simply run ``generate-protos.sh``. The required ``protoc`` version is ``3.6.0``. - Writing Docs ------------ -Install the necessary Python dependencies via ``pip install -r dev-requirements.txt``. Then run +First, install dependencies for building docs as described in `Prerequisites`_. -.. code:: bash +To generate a live preview of Python & other rst documentation, run the following snippet. Note +that R & Java API docs must be regenerated separately after each change and are not live-updated; +see subsequent sections for instructions on generating R and Java docs. + +.. code-block:: bash cd docs make livehtml + + +Generate R API rst doc files via: + +.. code-block:: bash + + cd docs + make rdocs + +Generate Java API rst doc files via: + +.. code-block:: bash + + cd docs + make javadocs + + +Generate API docs for all languages via: + +.. code-block:: bash + + cd docs + make html + + +If changing existing Python APIs or adding new APIs under existing modules, ensure that references +to the modified APIs are updated in existing docs under ``docs/source``. Note that the Python doc +generation process will automatically produce updated API docs, but you should still audit for +usages of the modified APIs in guides and examples. + +If adding a new public Python module, create a corresponding doc file for the module under +``docs/source/python_api`` - `see here `_ +for an example. diff --git a/Dockerfile b/Dockerfile index 74d934450c624..2f6518cc90389 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,10 +4,12 @@ WORKDIR /app ADD . /app -RUN pip install -r dev-requirements.txt && \ +RUN apt-get update && apt-get install -y default-libmysqlclient-dev build-essential && \ + pip install -r dev-requirements.txt && \ pip install -r test-requirements.txt && \ pip install -e . && \ - apt-get update && apt-get install -y gnupg && \ + apt-get install -y gnupg && \ + apt-get install -y openjdk-8-jre-headless && \ curl -sL https://deb.nodesource.com/setup_10.x | bash - && \ apt-get update && apt-get install -y nodejs && \ cd mlflow/server/js && \ diff --git a/README.rst b/README.rst index dcda54f0cfa20..b520945c310ba 100644 --- a/README.rst +++ b/README.rst @@ -1,12 +1,43 @@ -==================== -MLflow Alpha Release -==================== - -**Note:** The current version of MLflow is an alpha release. This means that APIs and data formats -are subject to change! - -**Note 2:** We do not currently support running MLflow on Windows. Despite this, we would appreciate any contributions -to make MLflow work better on Windows. +============================================= +MLflow: A Machine Learning Lifecycle Platform +============================================= + +MLflow is a platform to streamline machine learning development, including tracking experiments, packaging code +into reproducible runs, and sharing and deploying models. MLflow offers a set of lightweight APIs in that can +used with any existing machine learning application or library (TensorFlow, PyTorch, XGBoost, etc), wherever you +currently run ML code (e.g. in notebooks, standalone applications or the cloud). MLflow's current components are: + +* `MLflow Tracking `_: An API to log parameters, code, and + results in machine learning experiments and compare them using an interactive UI. +* `MLflow Projects `_: A code packaging format for reproducible + runs using Conda and Docker, so you can share your ML code with others. +* `MLflow Models `_: A model packaging format and tools that let + you easily deploy the same model (from any ML library) to batch and real-time scoring on platforms such as + Docker, Apache Spark, Azure ML and AWS SageMaker. + +|docs| |travis| |pypi| |conda-forge| |cran| |maven| |license| + +.. |docs| image:: https://img.shields.io/badge/docs-latest-success.svg + :target: https://mlflow.org/docs/latest/index.html + :alt: Latest Docs +.. |travis| image:: https://img.shields.io/travis/mlflow/mlflow.svg + :target: https://travis-ci.org/mlflow/mlflow + :alt: Build Status +.. |pypi| image:: https://img.shields.io/pypi/v/mlflow.svg + :target: https://pypi.org/project/mlflow/ + :alt: Latest Python Release +.. |conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/mlflow.svg + :target: https://anaconda.org/conda-forge/mlflow + :alt: Latest Conda Release +.. |cran| image:: https://img.shields.io/cran/v/mlflow.svg + :target: https://cran.r-project.org/package=mlflow + :alt: Latest CRAN Release +.. |maven| image:: https://img.shields.io/maven-central/v/org.mlflow/mlflow-parent.svg + :target: https://mvnrepository.com/artifact/org.mlflow + :alt: Maven Central +.. |license| image:: https://img.shields.io/badge/license-Apache%202-brightgreen.svg + :target: https://github.com/mlflow/mlflow/blob/master/LICENSE.txt + :alt: Apache 2 License Installing ---------- @@ -45,9 +76,9 @@ Start it with:: mlflow ui **Note:** Running ``mlflow ui`` from within a clone of MLflow is not recommended - doing so will -run the dev UI from source. We recommend running the UI from a different working directory, using the -``--file-store`` option to specify which log directory to run against. Alternatively, see instructions -for running the dev UI in the `contributor guide `_. +run the dev UI from source. We recommend running the UI from a different working directory, +specifying a backend store via the ``--backend-store-uri`` option. Alternatively, see +instructions for running the dev UI in the `contributor guide `_. Running a Project from a URI @@ -57,7 +88,7 @@ or a Git URI:: mlflow run examples/sklearn_elasticnet_wine -P alpha=0.4 - mlflow run git@github.com:mlflow/mlflow-example.git -P alpha=0.4 + mlflow run https://github.com/mlflow/mlflow-example.git -P alpha=0.4 See ``examples/sklearn_elasticnet_wine`` for a sample project with an MLproject file. @@ -66,18 +97,15 @@ Saving and Serving Models ------------------------- To illustrate managing models, the ``mlflow.sklearn`` package can log scikit-learn models as MLflow artifacts and then load them again for serving. There is an example training application in -``examples/sklearn_logisitic_regression/train.py`` that you can run as follows:: +``examples/sklearn_logistic_regression/train.py`` that you can run as follows:: - $ python examples/sklearn_logisitic_regression/train.py + $ python examples/sklearn_logistic_regression/train.py Score: 0.666 Model saved in run - $ mlflow sklearn serve -r -m model - - $ curl -d '[{"x": 1}, {"x": -1}]' -H 'Content-Type: application/json' -X POST localhost:5000/invocations - - + $ mlflow models serve --model-uri runs://model + $ curl -d '{"columns":[0],"index":[0,1],"data":[[1],[-1]]}' -H 'Content-Type: application/json' localhost:5000/invocations Contributing diff --git a/conftest.py b/conftest.py index 5bc6553730f9d..0bd26400c5c19 100644 --- a/conftest.py +++ b/conftest.py @@ -1,6 +1,10 @@ def pytest_addoption(parser): + parser.addoption('--large-only', action='store_true', dest="large_only", + default=False, help="Run only tests decorated with 'large' annotation") parser.addoption('--large', action='store_true', dest="large", default=False, help="Run tests decorated with 'large' annotation") + parser.addoption('--release', action='store_true', dest="release", + default=False, help="Run tests decorated with 'release' annotation") parser.addoption("--requires-ssh", action='store_true', dest="requires_ssh", default=False, help="Run tests decorated with 'requires_ssh' annotation. " "These tests require keys to be configured locally " @@ -11,8 +15,12 @@ def pytest_configure(config): # Override the markexpr argument to pytest # See https://docs.pytest.org/en/latest/example/markers.html for more details markexpr = [] - if not config.option.large: + if not config.option.large and not config.option.large_only: markexpr.append('not large') + elif config.option.large_only: + markexpr.append('large') + if not config.option.release: + markexpr.append('not release') if not config.option.requires_ssh: markexpr.append('not requires_ssh') if len(markexpr) > 0: diff --git a/dev-requirements.txt b/dev-requirements.txt index 2041345c125a2..d5a7f78d456d7 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,10 +1,12 @@ # Dev/Deployment sphinx==1.7.9 -sphinx_rtd_theme -sphinx-autobuild +sphinx_rtd_theme==0.4.3 +sphinx-autobuild==0.7.1 +sphinx-click==2.1.0 nose codecov coverage pypi-publisher scikit-learn scipy +kubernetes diff --git a/docs/Makefile b/docs/Makefile index 175a24b02d242..0f6cadded8485 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -56,13 +56,24 @@ help: clean: rm -rf $(BUILDDIR)/* -.PHONY: html -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html +.PHONY: javadocs +javadocs: ./build-javadoc.sh + +.PHONY: rdocs +rdocs: + ./build-rdoc.sh + +# Builds only the RST-based documentation (i.e., everything but Java & R docs) +.PHONY: rsthtml +rsthtml: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." +.PHONY: html +html: javadocs rdocs rsthtml + .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml diff --git a/docs/build-javadoc.sh b/docs/build-javadoc.sh index edd35befda989..083f6f2de799d 100755 --- a/docs/build-javadoc.sh +++ b/docs/build-javadoc.sh @@ -2,8 +2,9 @@ set -ex pushd ../mlflow/java/client/ -mvn javadoc:javadoc +mvn clean javadoc:javadoc popd +rm -rf build/html/java_api/ mkdir -p build/html/java_api/ cp -r ../mlflow/java/client/target/site/apidocs/* build/html/java_api/ echo "Copied JavaDoc into docs/build/html/java_api/" diff --git a/docs/build-rdoc.sh b/docs/build-rdoc.sh new file mode 100755 index 0000000000000..f29b8beea29f5 --- /dev/null +++ b/docs/build-rdoc.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -ex +pushd ../mlflow/R/mlflow +Rscript -e 'install.packages("devtools", repos = "https://cloud.r-project.org")' +Rscript -e 'devtools::install_dev_deps(dependencies = TRUE)' +# Install Rd2md from source as a temporary fix for the rendering of code examples, until +# a release is published including the fixes in https://github.com/quantsch/Rd2md/issues/1 +# Note that this commit is equivalent to commit 6b48255 of Rd2md master +# (https://github.com/quantsch/Rd2md/tree/6b4825579a2df8a22898316d93729384f92a756b) +# with a single extra commit to fix rendering of \link tags between methods in R documentation. +Rscript -e 'devtools::install_github("https://github.com/smurching/Rd2md", ref = "ac7b22bb")' +Rscript -e 'install.packages("rmarkdown", repos = "https://cloud.r-project.org")' +rm -rf man +Rscript -e "roxygen2::roxygenise()" +Rscript document.R +popd diff --git a/docs/source/R-api.rst b/docs/source/R-api.rst index 60876c32a0502..615622fe7d254 100644 --- a/docs/source/R-api.rst +++ b/docs/source/R-api.rst @@ -4,141 +4,53 @@ R API ======== -The MLflow R API allows you to use MLflow :doc:`Tracking `, :doc:`Projects ` and :doc:`Models `. +The MLflow `R `_ API allows you to use MLflow :doc:`Tracking `, :doc:`Projects ` and :doc:`Models `. -For instance, you can use the R API to `install MLflow`_, start the `user interface `_, `create `_ and `list experiments`_, `save models `_, `run projects `_ and `serve models `_ among many other functions available in the R API. +You can use the R API to `install MLflow `_, start the `user interface `_, `create `_ and `list experiments `_, `save models `_, `run projects `_ and `serve models `_ among many other functions available in the R API. .. contents:: Table of Contents :local: :depth: 1 -Crate a function to share with another process -============================================== - -``crate()`` creates functions in a self-contained environment -(technically, a child of the base environment). This has two advantages: - -- They can easily be executed in another process. +``install_mlflow`` +================== -- Their effects are reproducible. You can run them locally with the - same results as on a different process. +Install MLflow -Creating self-contained functions requires some care, see section below. +Installs auxiliary dependencies of MLflow (e.g. the MLflow CLI). As a +one-time setup step, you must run install_mlflow() to install these +dependencies before calling other MLflow APIs. .. code:: r - crate(.fn, ...) + install_mlflow() -Arguments ---------- +Details +------- -+-------------------------------+--------------------------------------+ -| Argument | Description | -+===============================+======================================+ -| ``.fn`` | A fresh formula or function. “Fresh” | -| | here means that they should be | -| | declared in the call to ``crate()``. | -| | See examples if you need to crate a | -| | function that is already defined. | -| | Formulas are converted to purrr-like | -| | lambda functions using | -| | [rlang::as_function()]. | -+-------------------------------+--------------------------------------+ -| ``...`` | Arguments to declare in the | -| | environment of ``.fn``. If a name is | -| | supplied, the object is assigned to | -| | that name. Otherwise the argument is | -| | automatically named after itself. | -+-------------------------------+--------------------------------------+ +install_mlflow() requires Python and Conda to be installed. See +https://www.python.org/getit/ and +https://docs.conda.io/projects/conda/en/latest/user-guide/install/ . Examples -------- .. code:: r - # You can create functions using the ordinary notation: - crate(function(x) stats::var(x)) - - # Or the formula notation: - crate(~stats::var(.x)) - - # Declare data by supplying named arguments. You can test you have - # declared all necessary data by calling your crated function: - na_rm <- TRUE - fn <- crate(~stats::var(.x, na.rm = na_rm)) - try(fn(1:10)) - - # Arguments are automatically named after themselves so that the - # following are equivalent: - crate(~stats::var(.x, na.rm = na_rm), na_rm = na_rm) - crate(~stats::var(.x, na.rm = na_rm), na_rm) - - # However if you supply a complex expression, do supply a name! - crate(~stats::var(.x, na.rm = na_rm), !na_rm) - crate(~stats::var(.x, na.rm = na_rm), na_rm = na_rm) - - # For small data it is handy to unquote instead. Unquoting inlines - # objects inside the function. This is less verbose if your - # function depends on many small objects: - fn <- crate(~stats::var(.x, na.rm = !!na_rm)) - fn(1:10) - - # One downside is that the individual sizes of unquoted objects - # won't be shown in the crate printout: - fn - - - # The function or formula you pass to crate() should defined inside - # the crate() call, i.e. you can't pass an already defined - # function: - fn <- function(x) toupper(x) - try(crate(fn)) - - # If you really need to crate an existing function, you can - # explicitly set its environment to the crate environment with the - # set_env() function from rlang: - crate(rlang::set_env(fn)) - -Is an object a crate? -===================== + library(mlflow) + install_mlflow() -Is an object a crate? - -.. code:: r - - is_crate(x) - -.. _arguments-1: - -Arguments ---------- - -+----------+--------------------+ -| Argument | Description | -+==========+====================+ -| ``x`` | An object to test. | -+----------+--------------------+ - -Active Run -========== - -Retrieves the active run. - -.. code:: r - - mlflow_active_run() +``mlflow_client`` +================= -MLflow Command -============== +Initialize an MLflow Client -Executes a generic MLflow command through the commmand line interface. +Initializes and returns an MLflow client that communicates with the +tracking server or store at the specified URI. .. code:: r - mlflow_cli(..., background = FALSE, echo = TRUE, - stderr_callback = NULL) - -.. _arguments-2: + mlflow_client(tracking_uri = NULL) Arguments --------- @@ -146,47 +58,23 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``...`` | The parameters to pass to the | -| | command line. | -+-------------------------------+--------------------------------------+ -| ``background`` | Should this command be triggered as | -| | a background task? Defaults to | -| | ``FALSE`` . | -+-------------------------------+--------------------------------------+ -| ``echo`` | Print the standard output and error | -| | to the screen? Defaults to ``TRUE`` | -| | , does not apply to background | -| | tasks. | -+-------------------------------+--------------------------------------+ -| ``stderr_callback`` | NULL, or a function to call for | -| | every chunk of the standard error. | +| ``tracking_uri`` | The tracking URI. If not provided, | +| | defaults to the service set by | +| | ``mlflow_set_tracking_uri()``. | +-------------------------------+--------------------------------------+ -Value ------ - -A ``processx`` task. - -.. _examples-1: - -Examples --------- +``mlflow_create_experiment`` +============================ -.. code:: r - - list("\n", "library(mlflow)\n", "mlflow_install()\n", "\n", "mlflow_cli(\"server\", \"--help\")\n") - - -Create Experiment - Tracking Client -=================================== +Create Experiment -Creates an MLflow experiment. +Creates an MLflow experiment and returns its id. .. code:: r - mlflow_client_create_experiment(client, name, artifact_location = NULL) + mlflow_create_experiment(name, artifact_location = NULL, client = NULL) -.. _arguments-3: +.. _arguments-1: Arguments --------- @@ -194,8 +82,6 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``client`` | An ``mlflow_client`` object. | -+-------------------------------+--------------------------------------+ | ``name`` | The name of the experiment to | | | create. | +-------------------------------+--------------------------------------+ @@ -204,29 +90,31 @@ Arguments | | provided, the remote server will | | | select an appropriate default. | +-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -Details -------- - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_delete_experiment`` +============================ -Create Run -========== +Delete Experiment -reate a new run within an experiment. A run is usually a single -execution of a machine learning or data ETL pipeline. +Marks an experiment and associated runs, params, metrics, etc. for +deletion. If the experiment uses FileStore, artifacts associated with +experiment are also deleted. .. code:: r - mlflow_client_create_run(client, experiment_id, user_id = NULL, - run_name = NULL, source_type = NULL, source_name = NULL, - entry_point_name = NULL, start_time = NULL, source_version = NULL, - tags = NULL) + mlflow_delete_experiment(experiment_id, client = NULL) -.. _arguments-4: +.. _arguments-2: Arguments --------- @@ -234,258 +122,216 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``client`` | An ``mlflow_client`` object. | -+-------------------------------+--------------------------------------+ -| ``experiment_id`` | Unique identifier for the associated | -| | experiment. | -+-------------------------------+--------------------------------------+ -| ``user_id`` | User ID or LDAP for the user | -| | executing the run. | -+-------------------------------+--------------------------------------+ -| ``run_name`` | Human readable name for run. | -+-------------------------------+--------------------------------------+ -| ``source_type`` | Originating source for this run. One | -| | of Notebook, Job, Project, Local or | -| | Unknown. | -+-------------------------------+--------------------------------------+ -| ``source_name`` | String descriptor for source. For | -| | example, name or description of the | -| | notebook, or job name. | -+-------------------------------+--------------------------------------+ -| ``entry_point_name`` | Name of the entry point for the run. | -+-------------------------------+--------------------------------------+ -| ``start_time`` | Unix timestamp of when the run | -| | started in milliseconds. | -+-------------------------------+--------------------------------------+ -| ``source_version`` | Git version of the source code used | -| | to create run. | +| ``experiment_id`` | ID of the associated experiment. | +| | This field is required. | +-------------------------------+--------------------------------------+ -| ``tags`` | Additional metadata for run in | -| | key-value pairs. | +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | +-------------------------------+--------------------------------------+ -.. _details-1: - -Details -------- - -MLflow uses runs to track Param, Metric, and RunTag, associated with a -single execution. - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_delete_run`` +===================== -Delete Experiment -================= +Delete a Run -Mark an experiment and associated runs, params, metrics, … etc for -deletion. If the experiment uses FileStore, artifacts associated with -experiment are also deleted. +Deletes the run with the specified ID. .. code:: r - mlflow_client_delete_experiment(client, experiment_id) + mlflow_delete_run(run_id, client = NULL) -.. _arguments-5: +.. _arguments-3: Arguments --------- -+-----------------------------------+-----------------------------------+ -| Argument | Description | -+===================================+===================================+ -| ``client`` | An ``mlflow_client`` object. | -+-----------------------------------+-----------------------------------+ -| ``experiment_id`` | ID of the associated experiment. | -| | This field is required. | -+-----------------------------------+-----------------------------------+ - -.. _details-2: - -Details -------- ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_delete_tag`` +===================== -Delete a Run -============ +Delete Tag -Delete a Run +Deletes a tag on a run. This is irreversible. Tags are run metadata that +can be updated during a run and after a run completes. .. code:: r - mlflow_client_delete_run(client, run_id) + mlflow_delete_tag(key, run_id = NULL, client = NULL) -.. _arguments-6: +.. _arguments-4: Arguments --------- -+------------+------------------------------+ -| Argument | Description | -+============+==============================+ -| ``client`` | An ``mlflow_client`` object. | -+------------+------------------------------+ -| ``run_id`` | Run ID. | -+------------+------------------------------+ - -.. _details-3: - -Details -------- ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``key`` | Name of the tag. Maximum size is 255 | +| | bytes. This field is required. | ++-------------------------------+--------------------------------------+ +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_download_artifacts`` +============================= Download Artifacts -================== Download an artifact file or directory from a run to a local directory if applicable, and return a local path for it. .. code:: r - mlflow_client_download_artifacts(client, run_id, path) + mlflow_download_artifacts(path, run_id = NULL, client = NULL) -.. _arguments-7: +.. _arguments-5: Arguments --------- -+------------+-----------------------------------------------+ -| Argument | Description | -+============+===============================================+ -| ``client`` | An ``mlflow_client`` object. | -+------------+-----------------------------------------------+ -| ``run_id`` | Run ID. | -+------------+-----------------------------------------------+ -| ``path`` | Relative source path to the desired artifact. | -+------------+-----------------------------------------------+ - -.. _details-4: - -Details -------- ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``path`` | Relative source path to the desired | +| | artifact. | ++-------------------------------+--------------------------------------+ +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_end_run`` +================== -Get Experiment by Name -====================== +End a Run -Get meta data for experiment by name. +Terminates a run. Attempts to end the current active run if ``run_id`` +is not specified. .. code:: r - mlflow_client_get_experiment_by_name(client, name) + mlflow_end_run(status = c("FINISHED", "FAILED", "KILLED"), + end_time = NULL, run_id = NULL, client = NULL) -.. _arguments-8: +.. _arguments-6: Arguments --------- -+------------+------------------------------+ -| Argument | Description | -+============+==============================+ -| ``client`` | An ``mlflow_client`` object. | -+------------+------------------------------+ -| ``name`` | The experiment name. | -+------------+------------------------------+ - -.. _details-5: - -Details -------- ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``status`` | Updated status of the run. Defaults | +| | to ``FINISHED``. Can also be set to | +| | “FAILED” or “KILLED”. | ++-------------------------------+--------------------------------------+ +| ``end_time`` | Unix timestamp of when the run ended | +| | in milliseconds. | ++-------------------------------+--------------------------------------+ +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_get_experiment`` +========================= Get Experiment -============== -Get meta data for experiment and a list of runs for this experiment. +Gets metadata for an experiment and a list of runs for the experiment. +Attempts to obtain the active experiment if both ``experiment_id`` and +``name`` are unspecified. .. code:: r - mlflow_client_get_experiment(client, experiment_id) - -.. _arguments-9: + mlflow_get_experiment(experiment_id = NULL, name = NULL, + client = NULL) -Arguments ---------- - -+-------------------+---------------------------------+ -| Argument | Description | -+===================+=================================+ -| ``client`` | An ``mlflow_client`` object. | -+-------------------+---------------------------------+ -| ``experiment_id`` | Identifer to get an experiment. | -+-------------------+---------------------------------+ - -.. _details-6: - -Details -------- - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. - -Get Run -======= - -Get meta data, params, tags, and metrics for run. Only last logged value -for each metric is returned. - -.. code:: r - - mlflow_client_get_run(client, run_id) - -.. _arguments-10: +.. _arguments-7: Arguments --------- -+------------+------------------------------+ -| Argument | Description | -+============+==============================+ -| ``client`` | An ``mlflow_client`` object. | -+------------+------------------------------+ -| ``run_id`` | Run ID. | -+------------+------------------------------+ - -.. _details-7: - -Details -------- ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``experiment_id`` | Identifer to get an experiment. | ++-------------------------------+--------------------------------------+ +| ``name`` | The experiment name. Only one of | +| | ``name`` or ``experiment_id`` should | +| | be specified. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_get_metric_history`` +============================= -List artifacts -============== +Get Metric History -List artifacts +Get a list of all values for the specified metric for a given run. .. code:: r - mlflow_client_list_artifacts(client, run_id, path = NULL) + mlflow_get_metric_history(metric_key, run_id = NULL, client = NULL) -.. _arguments-11: +.. _arguments-8: Arguments --------- @@ -493,36 +339,35 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``client`` | An ``mlflow_client`` object. | +| ``metric_key`` | Name of the metric. | +-------------------------------+--------------------------------------+ | ``run_id`` | Run ID. | +-------------------------------+--------------------------------------+ -| ``path`` | The run’s relative artifact path to | -| | list from. If not specified, it is | -| | set to the root artifact path | +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | +-------------------------------+--------------------------------------+ -.. _details-8: - -Details -------- - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_get_run`` +================== -List Experiments -================ +Get Run -Get a list of all experiments. +Gets metadata, params, tags, and metrics for a run. Returns a single +value for each metric key: the most recently logged metric value at the +largest step. .. code:: r - mlflow_client_list_experiments(client, view_type = c("ACTIVE_ONLY", - "DELETED_ONLY", "ALL")) + mlflow_get_run(run_id = NULL, client = NULL) -.. _arguments-12: +.. _arguments-9: Arguments --------- @@ -530,231 +375,66 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``client`` | An ``mlflow_client`` object. | +| ``run_id`` | Run ID. | +-------------------------------+--------------------------------------+ -| ``view_type`` | Qualifier for type of experiments to | -| | be returned. Defaults to | -| | ``ACTIVE_ONLY``. | +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | +-------------------------------+--------------------------------------+ -.. _details-9: - -Details -------- - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. - -Log Artifact -============ - -Logs an specific file or directory as an artifact. - -.. code:: r - - mlflow_client_log_artifact(client, run_id, path, artifact_path = NULL) - -.. _arguments-13: - -Arguments ---------- - -+-------------------+-------------------------------------------------+ -| Argument | Description | -+===================+=================================================+ -| ``client`` | An ``mlflow_client`` object. | -+-------------------+-------------------------------------------------+ -| ``run_id`` | Run ID. | -+-------------------+-------------------------------------------------+ -| ``path`` | The file or directory to log as an artifact. | -+-------------------+-------------------------------------------------+ -| ``artifact_path`` | Destination path within the run’s artifact URI. | -+-------------------+-------------------------------------------------+ - -.. _details-10: - -Details -------- - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. - -When logging to Amazon S3, ensure that the user has a proper policy -attach to it, for instance: - -\`\` - -Additionally, at least the ``AWS_ACCESS_KEY_ID`` and -``AWS_SECRET_ACCESS_KEY`` environment variables must be set to the -corresponding key and secrets provided by Amazon IAM. +``mlflow_get_tracking_uri`` +=========================== -Log Metric -========== +Get Remote Tracking URI -API to log a metric for a run. Metrics key-value pair that record a -single float measure. During a single execution of a run, a particular -metric can be logged several times. Backend will keep track of -historical values along with timestamps. +Gets the remote tracking URI. .. code:: r - mlflow_client_log_metric(client, run_id, key, value, timestamp = NULL) - -.. _arguments-14: - -Arguments ---------- - -+-----------------------------------+-----------------------------------+ -| Argument | Description | -+===================================+===================================+ -| ``client`` | An ``mlflow_client`` object. | -+-----------------------------------+-----------------------------------+ -| ``run_id`` | Run ID. | -+-----------------------------------+-----------------------------------+ -| ``key`` | Name of the metric. | -+-----------------------------------+-----------------------------------+ -| ``value`` | Float value for the metric being | -| | logged. | -+-----------------------------------+-----------------------------------+ -| ``timestamp`` | Unix timestamp in milliseconds at | -| | the time metric was logged. | -+-----------------------------------+-----------------------------------+ - -.. _details-11: - -Details -------- - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. + mlflow_get_tracking_uri() -Log Parameter +``mlflow_id`` ============= -API to log a parameter used for this run. Examples are params and -hyperparams used for ML training, or constant dates and values used in -an ETL pipeline. A params is a STRING key-value pair. For a run, a -single parameter is allowed to be logged only once. - -.. code:: r - - mlflow_client_log_param(client, run_id, key, value) - -.. _arguments-15: - -Arguments ---------- - -+------------+--------------------------------+ -| Argument | Description | -+============+================================+ -| ``client`` | An ``mlflow_client`` object. | -+------------+--------------------------------+ -| ``run_id`` | Run ID. | -+------------+--------------------------------+ -| ``key`` | Name of the parameter. | -+------------+--------------------------------+ -| ``value`` | String value of the parameter. | -+------------+--------------------------------+ +Get Run or Experiment ID -.. _details-12: - -Details -------- - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. - -Restore Experiment -================== - -Restore an experiment marked for deletion. This also restores associated -metadata, runs, metrics, and params. If experiment uses FileStore, -underlying artifacts associated with experiment are also restored. +Extracts the ID of the run or experiment. .. code:: r - mlflow_client_restore_experiment(client, experiment_id) - -.. _arguments-16: - -Arguments ---------- + mlflow_id(object) + list(list("mlflow_id"), list("mlflow_run"))(object) + list(list("mlflow_id"), list("mlflow_experiment"))(object) -+-----------------------------------+-----------------------------------+ -| Argument | Description | -+===================================+===================================+ -| ``client`` | An ``mlflow_client`` object. | -+-----------------------------------+-----------------------------------+ -| ``experiment_id`` | ID of the associated experiment. | -| | This field is required. | -+-----------------------------------+-----------------------------------+ - -.. _details-13: - -Details -------- - -Throws RESOURCE_DOES_NOT_EXIST if experiment was never created or was -permanently deleted. - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. - -Restore a Run -============= - -Restore a Run - -.. code:: r - - mlflow_client_restore_run(client, run_id) - -.. _arguments-17: +.. _arguments-10: Arguments --------- -+------------+------------------------------+ -| Argument | Description | -+============+==============================+ -| ``client`` | An ``mlflow_client`` object. | -+------------+------------------------------+ -| ``run_id`` | Run ID. | -+------------+------------------------------+ +========== ================================================== +Argument Description +========== ================================================== +``object`` An ``mlflow_run`` or ``mlflow_experiment`` object. +========== ================================================== -.. _details-14: +``mlflow_list_artifacts`` +========================= -Details -------- +List Artifacts -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. - -Set Tag -======= - -Set a tag on a run. Tags are run metadata that can be updated during and -after a run completes. +Gets a list of artifacts. .. code:: r - mlflow_client_set_tag(client, run_id, key, value) + mlflow_list_artifacts(path = NULL, run_id = NULL, client = NULL) -.. _arguments-18: +.. _arguments-11: Arguments --------- @@ -762,77 +442,72 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``client`` | An ``mlflow_client`` object. | +| ``path`` | The run’s relative artifact path to | +| | list from. If not specified, it is | +| | set to the root artifact path | +-------------------------------+--------------------------------------+ | ``run_id`` | Run ID. | +-------------------------------+--------------------------------------+ -| ``key`` | Name of the tag. Maximum size is 255 | -| | bytes. This field is required. | -+-------------------------------+--------------------------------------+ -| ``value`` | String value of the tag being | -| | logged. Maximum size is 500 bytes. | -| | This field is required. | +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | +-------------------------------+--------------------------------------+ -.. _details-15: - -Details -------- - -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_list_experiments`` +=========================== -Terminate a Run -=============== +List Experiments -Terminate a Run +Gets a list of all experiments. .. code:: r - mlflow_client_set_terminated(client, run_id, status = c("FINISHED", - "SCHEDULED", "FAILED", "KILLED"), end_time = NULL) + mlflow_list_experiments(view_type = c("ACTIVE_ONLY", "DELETED_ONLY", + "ALL"), client = NULL) -.. _arguments-19: +.. _arguments-12: Arguments --------- -+--------------+-------------------------------------------------------+ -| Argument | Description | -+==============+=======================================================+ -| ``client`` | An ``mlflow_client`` object. | -+--------------+-------------------------------------------------------+ -| ``run_id`` | Unique identifier for the run. | -+--------------+-------------------------------------------------------+ -| ``status`` | Updated status of the run. Defaults to ``FINISHED``. | -+--------------+-------------------------------------------------------+ -| ``end_time`` | Unix timestamp of when the run ended in milliseconds. | -+--------------+-------------------------------------------------------+ -| ``run_id`` | Run ID. | -+--------------+-------------------------------------------------------+ - -.. _details-16: - -Details -------- ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``view_type`` | Qualifier for type of experiments to | +| | be returned. Defaults to | +| | ``ACTIVE_ONLY``. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -The Tracking Client family of functions require an MLflow client to be -specified explicitly. These functions allow for greater control of where -the operations take place in terms of services and runs, but are more -verbose compared to the Fluent API. +``mlflow_list_run_infos`` +========================= -Initialize an MLflow client -=========================== +List Run Infos -Initialize an MLflow client +Returns a tibble whose columns contain run metadata (run ID, etc) for +all runs under the specified experiment. .. code:: r - mlflow_client(tracking_uri = NULL) + mlflow_list_run_infos(run_view_type = c("ACTIVE_ONLY", "DELETED_ONLY", + "ALL"), experiment_id = NULL, client = NULL) -.. _arguments-20: +.. _arguments-13: Arguments --------- @@ -840,21 +515,38 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``tracking_uri`` | The tracking URI. If not provided, | -| | defaults to the service set by | -| | ``mlflow_set_tracking_uri()``. | +| ``run_view_type`` | Run view type. | ++-------------------------------+--------------------------------------+ +| ``experiment_id`` | Experiment ID. Attempts to use the | +| | active experiment if not specified. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | +-------------------------------+--------------------------------------+ -Create Experiment -================= +``mlflow_load_flavor`` +====================== + +Load MLflow Model Flavor -Creates an MLflow experiment. +Loads an MLflow model using a specific flavor. This method is called +internally by `mlflow_load_model <#mlflow-load-model>`__ , but is +exposed for package authors to extend the supported MLflow models. See +https://mlflow.org/docs/latest/models.html#storage-format for more info +on MLflow model flavors. .. code:: r - mlflow_create_experiment(name, artifact_location = NULL) + mlflow_load_flavor(flavor, model_path) -.. _arguments-21: +.. _arguments-14: Arguments --------- @@ -862,125 +554,88 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``name`` | The name of the experiment to | -| | create. | +| ``flavor`` | An MLflow flavor object loaded by | +| | `mlflow_load_model <#mlflow-load-mod | +| | el>`__ | +| | , with class loaded from the flavor | +| | field in an MLmodel file. | +-------------------------------+--------------------------------------+ -| ``artifact_location`` | Location where all artifacts for | -| | this experiment are stored. If not | -| | provided, the remote server will | -| | select an appropriate default. | +| ``model_path`` | The path to the MLflow model wrapped | +| | in the correct class. | +-------------------------------+--------------------------------------+ -.. _details-17: - -Details -------- +``mlflow_load_model`` +===================== -The fluent API family of functions operate with an implied MLflow client -determined by the service set by ``mlflow_set_tracking_uri()``. For -operations involving a run it adopts the current active run, or, if one -does not exist, starts one through the implied service. +Load MLflow Model -End a Run -========= - -End an active MLflow run (if there is one). +Loads an MLflow model. MLflow models can have multiple model flavors. +Not all flavors / models can be loaded in R. This method by default +searches for a flavor supported by R/MLflow. .. code:: r - mlflow_end_run(status = c("FINISHED", "SCHEDULED", "FAILED", "KILLED")) + mlflow_load_model(model_uri, flavor = NULL, client = mlflow_client()) -.. _arguments-22: +.. _arguments-15: Arguments --------- -+------------+------------------------------------------------------+ -| Argument | Description | -+============+======================================================+ -| ``status`` | Updated status of the run. Defaults to ``FINISHED``. | -+------------+------------------------------------------------------+ - -.. _details-18: - -Details -------- - -The fluent API family of functions operate with an implied MLflow client -determined by the service set by ``mlflow_set_tracking_uri()``. For -operations involving a run it adopts the current active run, or, if one -does not exist, starts one through the implied service. - -Get Remote Tracking URI -======================= - -Get Remote Tracking URI - -.. code:: r - - mlflow_get_tracking_uri() - -Install MLflow -============== - -Installs MLflow for individual use. - -.. code:: r - - mlflow_install() ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``model_uri`` | The location, in URI format, of the | +| | MLflow model. | ++-------------------------------+--------------------------------------+ +| ``flavor`` | Optional flavor specification | +| | (string). Can be used to load a | +| | particular flavor in case there are | +| | multiple flavors available. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -.. _details-19: +.. _details-1: Details ------- -Notice that MLflow requires Python and Conda to be installed, see -https://www.python.org/getit/ and -https://conda.io/docs/installation.html . - -.. _examples-2: - -Examples --------- - -.. code:: r - - list("\n", "library(mlflow)\n", "mlflow_install()\n") - - -Load MLflow Model Flavor -======================== +The URI scheme must be supported by MLflow - i.e. there has to be an +MLflow artifact repository corresponding to the scheme of the URI. The +content is expected to point to a directory containing MLmodel. The +following are examples of valid model uris: -Loads an MLflow model flavor, to be used by package authors to extend -the supported MLflow models. +- ``file:///absolute/path/to/local/model`` +- ``file:relative/path/to/local/model`` +- ``s3://my_bucket/path/to/model`` +- ``runs://run-relative/path/to/model`` -.. code:: r - - mlflow_load_flavor(model_path) - -.. _arguments-23: +For more information about supported URI schemes, see the Artifacts +Documentation at +https://www.mlflow.org/docs/latest/tracking.html#supported-artifact-stores. -Arguments ---------- - -+----------------+------------------------------------------------------------+ -| Argument | Description | -+================+============================================================+ -| ``model_path`` | The path to the MLflow model wrapped in the correct class. | -+----------------+------------------------------------------------------------+ +``mlflow_log_artifact`` +======================= -Load MLflow Model. -================== +Log Artifact -MLflow models can have multiple model flavors. Not all flavors / models -can be loaded in R. This method will by default search for a flavor -supported by R/mlflow. +Logs a specific file or directory as an artifact for a run. .. code:: r - mlflow_load_model(model_path, flavor = NULL, run_id = NULL) + mlflow_log_artifact(path, artifact_path = NULL, run_id = NULL, + client = NULL) -.. _arguments-24: +.. _arguments-16: Arguments --------- @@ -988,116 +643,157 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``model_path`` | "Path to the MLflow model. The path | -| | is relative to the run with the | -| | given run-id or local filesystem | -| | path without run-id. | +| ``path`` | The file or directory to log as an | +| | artifact. | +-------------------------------+--------------------------------------+ -| ``flavor`` | Optional flavor specification. Can | -| | be used to load a particular flavor | -| | in case there are multiple flavors | -| | available. | +| ``artifact_path`` | Destination path within the run’s | +| | artifact URI. | +-------------------------------+--------------------------------------+ -| ``run_id`` | Optional MLflow run-id. If supplied | -| | model will be fetched from MLflow | -| | tracking server. | +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | +-------------------------------+--------------------------------------+ -.. _log-artifact-1: - -Log Artifact -============ - -Logs an specific file or directory as an artifact. - -.. code:: r +.. _details-2: - mlflow_log_artifact(path, artifact_path = NULL) +Details +------- -.. _arguments-25: +When logging to Amazon S3, ensure that you have the s3:PutObject, +s3:GetObject, s3:ListBucket, and s3:GetBucketLocation permissions on +your bucket. -Arguments ---------- +Additionally, at least the ``AWS_ACCESS_KEY_ID`` and +``AWS_SECRET_ACCESS_KEY`` environment variables must be set to the +corresponding key and secrets provided by Amazon IAM. -+-------------------+-------------------------------------------------+ -| Argument | Description | -+===================+=================================================+ -| ``path`` | The file or directory to log as an artifact. | -+-------------------+-------------------------------------------------+ -| ``artifact_path`` | Destination path within the run’s artifact URI. | -+-------------------+-------------------------------------------------+ +``mlflow_log_batch`` +==================== -.. _details-20: +Log Batch -Details -------- +Log a batch of metrics, params, and/or tags for a run. The server will +respond with an error (non-200 status code) if any data failed to be +persisted. In case of error (due to internal server error or an invalid +request), partial data may be written. -The fluent API family of functions operate with an implied MLflow client -determined by the service set by ``mlflow_set_tracking_uri()``. For -operations involving a run it adopts the current active run, or, if one -does not exist, starts one through the implied service. +.. code:: r -When logging to Amazon S3, ensure that the user has a proper policy -attach to it, for instance: + mlflow_log_batch(metrics = NULL, params = NULL, tags = NULL, + run_id = NULL, client = NULL) -\`\` +.. _arguments-17: -Additionally, at least the ``AWS_ACCESS_KEY_ID`` and -``AWS_SECRET_ACCESS_KEY`` environment variables must be set to the -corresponding key and secrets provided by Amazon IAM. +Arguments +--------- + ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``metrics`` | A dataframe of metrics to log, | +| | containing the following columns: | +| | “key”, “value”, “step”, “timestamp”. | +| | This dataframe cannot contain any | +| | missing (‘NA’) entries. | ++-------------------------------+--------------------------------------+ +| ``params`` | A dataframe of params to log, | +| | containing the following columns: | +| | “key”, “value”. This dataframe | +| | cannot contain any missing (‘NA’) | +| | entries. | ++-------------------------------+--------------------------------------+ +| ``tags`` | A dataframe of tags to log, | +| | containing the following columns: | +| | “key”, “value”. This dataframe | +| | cannot contain any missing (‘NA’) | +| | entries. | ++-------------------------------+--------------------------------------+ +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -.. _log-metric-1: +``mlflow_log_metric`` +===================== Log Metric -========== -API to log a metric for a run. Metrics key-value pair that record a -single float measure. During a single execution of a run, a particular -metric can be logged several times. Backend will keep track of -historical values along with timestamps. +Logs a metric for a run. Metrics key-value pair that records a single +float measure. During a single execution of a run, a particular metric +can be logged several times. The MLflow Backend keeps track of +historical metric values along two axes: timestamp and step. .. code:: r - mlflow_log_metric(key, value, timestamp = NULL) + mlflow_log_metric(key, value, timestamp = NULL, step = NULL, + run_id = NULL, client = NULL) -.. _arguments-26: +.. _arguments-18: Arguments --------- -+-----------------------------------+-----------------------------------+ -| Argument | Description | -+===================================+===================================+ -| ``key`` | Name of the metric. | -+-----------------------------------+-----------------------------------+ -| ``value`` | Float value for the metric being | -| | logged. | -+-----------------------------------+-----------------------------------+ -| ``timestamp`` | Unix timestamp in milliseconds at | -| | the time metric was logged. | -+-----------------------------------+-----------------------------------+ - -.. _details-21: - -Details -------- ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``key`` | Name of the metric. | ++-------------------------------+--------------------------------------+ +| ``value`` | Float value for the metric being | +| | logged. | ++-------------------------------+--------------------------------------+ +| ``timestamp`` | Timestamp at which to log the | +| | metric. Timestamp is rounded to the | +| | nearest integer. If unspecified, the | +| | number of milliseconds since the | +| | Unix epoch is used. | ++-------------------------------+--------------------------------------+ +| ``step`` | Step at which to log the metric. | +| | Step is rounded to the nearest | +| | integer. If unspecified, the default | +| | value of zero is used. | ++-------------------------------+--------------------------------------+ +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -The fluent API family of functions operate with an implied MLflow client -determined by the service set by ``mlflow_set_tracking_uri()``. For -operations involving a run it adopts the current active run, or, if one -does not exist, starts one through the implied service. +``mlflow_log_model`` +==================== Log Model -========= -Logs a model in the given run. Similar to ``mlflow_save_model()`` but -stores model as an artifact within the active run. +Logs a model for this run. Similar to ``mlflow_save_model()`` but stores +model as an artifact within the active run. .. code:: r - mlflow_log_model(fn, artifact_path) + mlflow_log_model(model, artifact_path, ...) -.. _arguments-27: +.. _arguments-19: Arguments --------- @@ -1105,60 +801,76 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``fn`` | The serving function that will | -| | perform a prediction. | +| ``model`` | The model that will perform a | +| | prediction. | +-------------------------------+--------------------------------------+ | ``artifact_path`` | Destination path where this MLflow | | | compatible model will be saved. | +-------------------------------+--------------------------------------+ +| ``...`` | Optional additional arguments passed | +| | to ``mlflow_save_model()`` when | +| | persisting the model. For example, | +| | ``conda_env = /path/to/conda.yaml`` | +| | may be passed to specify a conda | +| | dependencies file for flavors | +| | (e.g. keras) that support conda | +| | environments. | ++-------------------------------+--------------------------------------+ -.. _log-parameter-1: +``mlflow_log_param`` +==================== Log Parameter -============= -API to log a parameter used for this run. Examples are params and -hyperparams used for ML training, or constant dates and values used in -an ETL pipeline. A params is a STRING key-value pair. For a run, a -single parameter is allowed to be logged only once. +Logs a parameter for a run. Examples are params and hyperparams used for +ML training, or constant dates and values used in an ETL pipeline. A +param is a STRING key-value pair. For a run, a single parameter is +allowed to be logged only once. .. code:: r - mlflow_log_param(key, value) + mlflow_log_param(key, value, run_id = NULL, client = NULL) -.. _arguments-28: +.. _arguments-20: Arguments --------- -+-----------+--------------------------------+ -| Argument | Description | -+===========+================================+ -| ``key`` | Name of the parameter. | -+-----------+--------------------------------+ -| ``value`` | String value of the parameter. | -+-----------+--------------------------------+ - -.. _details-22: - -Details -------- ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``key`` | Name of the parameter. | ++-------------------------------+--------------------------------------+ +| ``value`` | String value of the parameter. | ++-------------------------------+--------------------------------------+ +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -The fluent API family of functions operate with an implied MLflow client -determined by the service set by ``mlflow_set_tracking_uri()``. For -operations involving a run it adopts the current active run, or, if one -does not exist, starts one through the implied service. +``mlflow_param`` +================ -Read Command Line Parameter -=========================== +Read Command-Line Parameter -Reads a command line parameter. +Reads a command-line parameter passed to an MLflow project MLflow allows +you to define named, typed input parameters to your R scripts via the +mlflow_param API. This is useful for experimentation, e.g. tracking +multiple invocations of the same script with different parameters. .. code:: r mlflow_param(name, default = NULL, type = NULL, description = NULL) -.. _arguments-29: +.. _arguments-21: Arguments --------- @@ -1166,86 +878,117 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``name`` | The name for this parameter. | +| ``name`` | The name of the parameter. | +-------------------------------+--------------------------------------+ -| ``default`` | The default value for this | -| | parameter. | +| ``default`` | The default value of the parameter. | +-------------------------------+--------------------------------------+ | ``type`` | Type of this parameter. Required if | | | ``default`` is not set. If | | | specified, must be one of “numeric”, | | | “integer”, or “string”. | +-------------------------------+--------------------------------------+ -| ``description`` | Optional description for this | +| ``description`` | Optional description for the | | | parameter. | +-------------------------------+--------------------------------------+ -Predict over MLflow Model Flavor -================================ +.. _examples-1: + +Examples +-------- + +.. code:: r + + # This parametrized script trains a GBM model on the Iris dataset and can be run as an MLflow + # project. You can run this script (assuming it's saved at /some/directory/params_example.R) + # with custom parameters via: + # mlflow_run(entry_point = "params_example.R", uri = "/some/directory", + # parameters = list(num_trees = 200, learning_rate = 0.1)) + install.packages("gbm") + library(mlflow) + library(gbm) + # define and read input parameters + num_trees <- mlflow_param(name = "num_trees", default = 200, type = "integer") + lr <- mlflow_param(name = "learning_rate", default = 0.1, type = "numeric") + # use params to fit a model + ir.adaboost <- gbm(Species ~., data=iris, n.trees=num_trees, shrinkage=lr) + +``mlflow_predict`` +================== + +Generate Prediction with MLflow Model Performs prediction over a model loaded using ``mlflow_load_model()`` , to be used by package authors to extend the supported MLflow models. .. code:: r - mlflow_predict_flavor(model, data) + mlflow_predict(model, data, ...) -.. _arguments-30: +.. _arguments-22: Arguments --------- -+-----------+----------------------------------+ -| Argument | Description | -+===========+==================================+ -| ``model`` | The loaded MLflow model flavor. | -+-----------+----------------------------------+ -| ``data`` | A data frame to perform scoring. | -+-----------+----------------------------------+ +========= =================================================================== +Argument Description +========= =================================================================== +``model`` The loaded MLflow model flavor. +``data`` A data frame to perform scoring. +``...`` Optional additional arguments passed to underlying predict methods. +========= =================================================================== -Generate prediction with MLflow model. -====================================== +``mlflow_rename_experiment`` +============================ -Generate prediction with MLflow model. +Rename Experiment + +Renames an experiment. .. code:: r - mlflow_predict_model(model, data) + mlflow_rename_experiment(new_name, experiment_id = NULL, client = NULL) -.. _arguments-31: +.. _arguments-23: Arguments --------- -+-----------+-------------------------+ -| Argument | Description | -+===========+=========================+ -| ``model`` | MLflow model. | -+-----------+-------------------------+ -| ``data`` | Dataframe to be scored. | -+-----------+-------------------------+ - -Restore Snapshot -================ - -Restores a snapshot of all dependencies required to run the files in the -current directory - -.. code:: r ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``new_name`` | The experiment’s name will be | +| | changed to this. The new name must | +| | be unique. | ++-------------------------------+--------------------------------------+ +| ``experiment_id`` | ID of the associated experiment. | +| | This field is required. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ - mlflow_restore_snapshot() +``mlflow_restore_experiment`` +============================= -Predict using RFunc MLflow Model -================================ +Restore Experiment -Predict using an RFunc MLflow Model from a file or data frame. +Restores an experiment marked for deletion. This also restores +associated metadata, runs, metrics, and params. If experiment uses +FileStore, underlying artifacts associated with experiment are also +restored. .. code:: r - mlflow_rfunc_predict(model_path, run_uuid = NULL, input_path = NULL, - output_path = NULL, data = NULL, restore = FALSE) + mlflow_restore_experiment(experiment_id, client = NULL) -.. _arguments-32: +.. _arguments-24: Arguments --------- @@ -1253,49 +996,73 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``model_path`` | The path to the MLflow model, as a | -| | string. | -+-------------------------------+--------------------------------------+ -| ``run_uuid`` | Run ID of run to grab the model | -| | from. | -+-------------------------------+--------------------------------------+ -| ``input_path`` | Path to ‘JSON’ or ‘CSV’ file to be | -| | used for prediction. | -+-------------------------------+--------------------------------------+ -| ``output_path`` | ‘JSON’ or ‘CSV’ file where the | -| | prediction will be written to. | -+-------------------------------+--------------------------------------+ -| ``data`` | Data frame to be scored. This can be | -| | utilized for testing purposes and | -| | can only be specified when | -| | ``input_path`` is not specified. | +| ``experiment_id`` | ID of the associated experiment. | +| | This field is required. | +-------------------------------+--------------------------------------+ -| ``restore`` | Should ``mlflow_restore_snapshot()`` | -| | be called before serving? | +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | +-------------------------------+--------------------------------------+ -.. _examples-3: +.. _details-3: -Examples --------- +Details +------- + +Throws ``RESOURCE_DOES_NOT_EXIST`` if the experiment was never created +or was permanently deleted. + +``mlflow_restore_run`` +====================== + +Restore a Run + +Restores the run with the specified ID. .. code:: r - list("\n", "library(mlflow)\n", "\n", "# save simple model which roundtrips data as prediction\n", "mlflow_save_model(function(df) df, \"mlflow_roundtrip\")\n", "\n", "# save data as json\n", "jsonlite::write_json(iris, \"iris.json\")\n", "\n", "# predict existing model from json data\n", "mlflow_rfunc_predict(\"mlflow_roundtrip\", \"iris.json\")\n") - + mlflow_restore_run(run_id, client = NULL) + +.. _arguments-25: + +Arguments +--------- + ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ + +``mlflow_rfunc_serve`` +====================== Serve an RFunc MLflow Model -=========================== -Serve an RFunc MLflow Model as a local web api. +Serves an RFunc MLflow model as a local web API. .. code:: r - mlflow_rfunc_serve(model_path, run_uuid = NULL, host = "127.0.0.1", - port = 8090, daemonized = FALSE, browse = !daemonized, - restore = FALSE) + mlflow_rfunc_serve(model_uri, host = "127.0.0.1", port = 8090, + daemonized = FALSE, browse = !daemonized, ...) -.. _arguments-33: +.. _arguments-26: Arguments --------- @@ -1303,10 +1070,8 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``model_path`` | The path to the MLflow model, as a | -| | string. | -+-------------------------------+--------------------------------------+ -| ``run_uuid`` | ID of run to grab the model from. | +| ``model_uri`` | The location, in URI format, of the | +| | MLflow model. | +-------------------------------+--------------------------------------+ | ``host`` | Address to use to serve model, as a | | | string. | @@ -1314,43 +1079,74 @@ Arguments | ``port`` | Port to use to serve model, as | | | numeric. | +-------------------------------+--------------------------------------+ -| ``daemonized`` | Makes ‘httpuv’ server daemonized so | -| | R interactive sessions are not | +| ``daemonized`` | Makes ``httpuv`` server daemonized | +| | so R interactive sessions are not | | | blocked to handle requests. To | | | terminate a daemonized server, call | -| | ‘httpuv::stopDaemonizedServer()’ | +| | ``httpuv::stopDaemonizedServer()`` | | | with the handle returned from this | | | call. | +-------------------------------+--------------------------------------+ | ``browse`` | Launch browser with serving landing | | | page? | +-------------------------------+--------------------------------------+ -| ``restore`` | Should ``mlflow_restore_snapshot()`` | -| | be called before serving? | +| ``...`` | Optional arguments passed to | +| | ``mlflow_predict()``. | +-------------------------------+--------------------------------------+ -.. _examples-4: +.. _details-4: + +Details +------- + +The URI scheme must be supported by MLflow - i.e. there has to be an +MLflow artifact repository corresponding to the scheme of the URI. The +content is expected to point to a directory containing MLmodel. The +following are examples of valid model uris: + +- ``file:///absolute/path/to/local/model`` +- ``file:relative/path/to/local/model`` +- ``s3://my_bucket/path/to/model`` +- ``runs://run-relative/path/to/model`` + +For more information about supported URI schemes, see the Artifacts +Documentation at +https://www.mlflow.org/docs/latest/tracking.html#supported-artifact-stores. + +.. _examples-2: Examples -------- .. code:: r - list("\n", "library(mlflow)\n", "\n", "# save simple model with constant prediction\n", "mlflow_save_model(function(df) 1, \"mlflow_constant\")\n", "\n", "# serve an existing model over a web interface\n", "mlflow_rfunc_serve(\"mlflow_constant\")\n", "\n", "# request prediction from server\n", "httr::POST(\"http://127.0.0.1:8090/predict/\")\n") + library(mlflow) -Run in MLflow -============= + # save simple model with constant prediction + mlflow_save_model(function(df) 1, "mlflow_constant") + + # serve an existing model over a web interface + mlflow_rfunc_serve("mlflow_constant") -Wrapper for ``mlflow run``. + # request prediction from server + httr::POST("http://127.0.0.1:8090/predict/") + +``mlflow_run`` +============== + +Run an MLflow Project + +Wrapper for the ``mlflow run`` CLI command. See +https://www.mlflow.org/docs/latest/cli.html#run for more info. .. code:: r - mlflow_run(entry_point = NULL, uri = ".", version = NULL, - param_list = NULL, experiment_id = NULL, mode = NULL, - cluster_spec = NULL, git_username = NULL, git_password = NULL, - no_conda = FALSE, storage_dir = NULL) + mlflow_run(uri = ".", entry_point = NULL, version = NULL, + parameters = NULL, experiment_id = NULL, experiment_name = NULL, + backend = NULL, backend_config = NULL, no_conda = FALSE, + storage_dir = NULL) -.. _arguments-34: +.. _arguments-27: Arguments --------- @@ -1358,149 +1154,119 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``entry_point`` | Entry point within project, defaults | -| | to ``main`` if not specified. | -+-------------------------------+--------------------------------------+ | ``uri`` | A directory containing modeling | | | scripts, defaults to the current | | | directory. | +-------------------------------+--------------------------------------+ +| ``entry_point`` | Entry point within project, defaults | +| | to ``main`` if not specified. | ++-------------------------------+--------------------------------------+ | ``version`` | Version of the project to run, as a | | | Git commit reference for Git | | | projects. | +-------------------------------+--------------------------------------+ -| ``param_list`` | A list of parameters. | +| ``parameters`` | A list of parameters. | +-------------------------------+--------------------------------------+ | ``experiment_id`` | ID of the experiment under which to | | | launch the run. | +-------------------------------+--------------------------------------+ -| ``mode`` | Execution mode to use for run. | +| ``experiment_name`` | Name of the experiment under which | +| | to launch the run. | +-------------------------------+--------------------------------------+ -| ``cluster_spec`` | Path to JSON file describing the | -| | cluster to use when launching a run | -| | on Databricks. | +| ``backend`` | Execution backend to use for run. | +-------------------------------+--------------------------------------+ -| ``git_username`` | Username for HTTP(S) Git | -| | authentication. | -+-------------------------------+--------------------------------------+ -| ``git_password`` | Password for HTTP(S) Git | -| | authentication. | +| ``backend_config`` | Path to JSON file which will be | +| | passed to the backend. For the | +| | Databricks backend, it should | +| | describe the cluster to use when | +| | launching a run on Databricks. | +-------------------------------+--------------------------------------+ | ``no_conda`` | If specified, assume that MLflow is | | | running within a Conda environment | | | with the necessary dependencies for | | | the current project instead of | -| | attempting to create a new conda | +| | attempting to create a new Conda | | | environment. Only valid if running | | | locally. | +-------------------------------+--------------------------------------+ -| ``storage_dir`` | Only valid when ``mode`` is local. | -| | MLflow downloads artifacts from | -| | distributed URIs passed to | -| | parameters of type ‘path’ to | -| | subdirectories of storage_dir. | +| ``storage_dir`` | Valid only when ``backend`` is | +| | local. MLflow downloads artifacts | +| | from distributed URIs passed to | +| | parameters of type ``path`` to | +| | subdirectories of ``storage_dir``. | +-------------------------------+--------------------------------------+ -.. _value-1: - Value ----- The run associated with this run. -Save MLflow Keras Model Flavor -============================== +.. _examples-3: -Saves model in MLflow’s Keras flavor. +Examples +-------- .. code:: r - list(list("mlflow_save_flavor"), list("keras.engine.training.Model"))(x, - path = "model", r_dependencies = NULL, conda_env = NULL) - -.. _arguments-35: - -Arguments ---------- - -+-------------------------------+--------------------------------------+ -| Argument | Description | -+===============================+======================================+ -| ``x`` | The serving function or model that | -| | will perform a prediction. | -+-------------------------------+--------------------------------------+ -| ``path`` | Destination path where this MLflow | -| | compatible model will be saved. | -+-------------------------------+--------------------------------------+ -| ``r_dependencies`` | Optional vector of paths to | -| | dependency files to include in the | -| | model, as in ``r-dependencies.txt`` | -| | or ``conda.yaml`` . | -+-------------------------------+--------------------------------------+ -| ``conda_env`` | Path to Conda dependencies file. | -+-------------------------------+--------------------------------------+ - -.. _value-2: - -Value ------ - -This funciton must return a list of flavors that conform to the MLmodel -specification. + # This parametrized script trains a GBM model on the Iris dataset and can be run as an MLflow + # project. You can run this script (assuming it's saved at /some/directory/params_example.R) + # with custom parameters via: + # mlflow_run(entry_point = "params_example.R", uri = "/some/directory", + # parameters = list(num_trees = 200, learning_rate = 0.1)) + install.packages("gbm") + library(mlflow) + library(gbm) + # define and read input parameters + num_trees <- mlflow_param(name = "num_trees", default = 200, type = "integer") + lr <- mlflow_param(name = "learning_rate", default = 0.1, type = "numeric") + # use params to fit a model + ir.adaboost <- gbm(Species ~., data=iris, n.trees=num_trees, shrinkage=lr) + +``mlflow_save_model.crate`` +=========================== -Save MLflow Model Flavor -======================== +Save Model for MLflow -Saves model in MLflow’s flavor, to be used by package authors to extend -the supported MLflow models. +Saves model in MLflow format that can later be used for prediction and +serving. This method is generic to allow package authors to save custom +model types. .. code:: r - mlflow_save_flavor(x, path = "model", r_dependencies = NULL, - conda_env = NULL) + list(list("mlflow_save_model"), list("crate"))(model, path, ...) + list(list("mlflow_save_model"), list("keras.engine.training.Model"))(model, path, + conda_env = NULL, ...) + mlflow_save_model(model, path, ...) -.. _arguments-36: +.. _arguments-28: Arguments --------- -+-------------------------------+--------------------------------------+ -| Argument | Description | -+===============================+======================================+ -| ``x`` | The serving function or model that | -| | will perform a prediction. | -+-------------------------------+--------------------------------------+ -| ``path`` | Destination path where this MLflow | -| | compatible model will be saved. | -+-------------------------------+--------------------------------------+ -| ``r_dependencies`` | Optional vector of paths to | -| | dependency files to include in the | -| | model, as in ``r-dependencies.txt`` | -| | or ``conda.yaml`` . | -+-------------------------------+--------------------------------------+ -| ``conda_env`` | Path to Conda dependencies file. | -+-------------------------------+--------------------------------------+ - -.. _value-3: - -Value ------ +============= ================================================================== +Argument Description +============= ================================================================== +``model`` The model that will perform a prediction. +``path`` Destination path where this MLflow compatible model will be saved. +``...`` Optional additional arguments. +``conda_env`` Path to Conda dependencies file. +============= ================================================================== -This funciton must return a list of flavors that conform to the MLmodel -specification. +``mlflow_search_runs`` +====================== -Save Model for MLflow -===================== +Search Runs -Saves model in MLflow’s format that can later be used for prediction and -serving. +Search for runs that satisfy expressions. Search expressions can use +Metric and Param keys. .. code:: r - mlflow_save_model(x, path = "model", r_dependencies = NULL, - conda_env = NULL) + mlflow_search_runs(filter = NULL, run_view_type = c("ACTIVE_ONLY", + "DELETED_ONLY", "ALL"), experiment_ids = NULL, order_by = list(), + client = NULL) -.. _arguments-37: +.. _arguments-29: Arguments --------- @@ -1508,22 +1274,39 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``x`` | The serving function or model that | -| | will perform a prediction. | -+-------------------------------+--------------------------------------+ -| ``path`` | Destination path where this MLflow | -| | compatible model will be saved. | -+-------------------------------+--------------------------------------+ -| ``r_dependencies`` | Optional vector of paths to | -| | dependency files to include in the | -| | model, as in ``r-dependencies.txt`` | -| | or ``conda.yaml`` . | -+-------------------------------+--------------------------------------+ -| ``conda_env`` | Path to Conda dependencies file. | -+-------------------------------+--------------------------------------+ +| ``filter`` | A filter expression over params, | +| | metrics, and tags, allowing | +| | returning a subset of runs. The | +| | syntax is a subset of SQL which | +| | allows only ANDing together binary | +| | operations between a | +| | param/metric/tag and a constant. | ++-------------------------------+--------------------------------------+ +| ``run_view_type`` | Run view type. | ++-------------------------------+--------------------------------------+ +| ``experiment_ids`` | List of string experiment IDs (or a | +| | single string experiment ID) to | +| | search over. Attempts to use active | +| | experiment if not specified. | ++-------------------------------+--------------------------------------+ +| ``order_by`` | List of properties to order by. | +| | Example: “metrics.acc DESC”. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ + +``mlflow_server`` +================= -Run the MLflow Tracking Server -============================== +Run MLflow Tracking Server Wrapper for ``mlflow server``. @@ -1533,7 +1316,7 @@ Wrapper for ``mlflow server``. host = "127.0.0.1", port = 5000, workers = 4, static_prefix = NULL) -.. _arguments-38: +.. _arguments-30: Arguments --------- @@ -1560,50 +1343,52 @@ Arguments | | the path of all static paths. | +-------------------------------+--------------------------------------+ +``mlflow_set_experiment`` +========================= + Set Experiment -============== -Set given experiment as active experiment. If experiment does not exist, -create an experiment with provided name. +Sets an experiment as the active experiment. Either the name or ID of +the experiment can be provided. If the a name is provided but the +experiment does not exist, this function creates an experiment with +provided name. Returns the ID of the active experiment. .. code:: r - mlflow_set_experiment(experiment_name) + mlflow_set_experiment(experiment_name = NULL, experiment_id = NULL, + artifact_location = NULL) -.. _arguments-39: +.. _arguments-31: Arguments --------- -+---------------------+-------------------------------------+ -| Argument | Description | -+=====================+=====================================+ -| ``experiment_name`` | Name of experiment to be activated. | -+---------------------+-------------------------------------+ - -.. _details-23: - -Details -------- - -The fluent API family of functions operate with an implied MLflow client -determined by the service set by ``mlflow_set_tracking_uri()``. For -operations involving a run it adopts the current active run, or, if one -does not exist, starts one through the implied service. ++-------------------------------+--------------------------------------+ +| Argument | Description | ++===============================+======================================+ +| ``experiment_name`` | Name of experiment to be activated. | ++-------------------------------+--------------------------------------+ +| ``experiment_id`` | ID of experiment to be activated. | ++-------------------------------+--------------------------------------+ +| ``artifact_location`` | Location where all artifacts for | +| | this experiment are stored. If not | +| | provided, the remote server will | +| | select an appropriate default. | ++-------------------------------+--------------------------------------+ -.. _set-tag-1: +``mlflow_set_tag`` +================== Set Tag -======= -Set a tag on a run. Tags are run metadata that can be updated during and -after a run completes. +Sets a tag on a run. Tags are run metadata that can be updated during a +run and after a run completes. .. code:: r - mlflow_set_tag(key, value) + mlflow_set_tag(key, value, run_id = NULL, client = NULL) -.. _arguments-40: +.. _arguments-32: Arguments --------- @@ -1618,19 +1403,23 @@ Arguments | | logged. Maximum size is 500 bytes. | | | This field is required. | +-------------------------------+--------------------------------------+ +| ``run_id`` | Run ID. | ++-------------------------------+--------------------------------------+ +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | ++-------------------------------+--------------------------------------+ -.. _details-24: - -Details -------- - -The fluent API family of functions operate with an implied MLflow client -determined by the service set by ``mlflow_set_tracking_uri()``. For -operations involving a run it adopts the current active run, or, if one -does not exist, starts one through the implied service. +``mlflow_set_tracking_uri`` +=========================== Set Remote Tracking URI -======================= Specifies the URI to the remote MLflow server that will be used to track experiments. @@ -1639,29 +1428,21 @@ experiments. mlflow_set_tracking_uri(uri) -.. _arguments-41: +.. _arguments-33: Arguments --------- -+----------+--------------------------------------+ -| Argument | Description | -+==========+======================================+ -| ``uri`` | The URI to the remote MLflow server. | -+----------+--------------------------------------+ - -Dependencies Snapshot -===================== - -Creates a snapshot of all dependencies required to run the files in the -current directory. +======== ==================================== +Argument Description +======== ==================================== +``uri`` The URI to the remote MLflow server. +======== ==================================== -.. code:: r - - mlflow_snapshot() +``mlflow_source`` +================= Source a Script with MLflow Params -================================== This function should not be used interactively. It is designed to be called via ``Rscript`` from the terminal or through the MLflow CLI. @@ -1670,30 +1451,34 @@ called via ``Rscript`` from the terminal or through the MLflow CLI. mlflow_source(uri) -.. _arguments-42: +.. _arguments-34: Arguments --------- -+----------+----------------------------------------------------------+ -| Argument | Description | -+==========+==========================================================+ -| ``uri`` | Path to an R script, can be a quoted or unquoted string. | -+----------+----------------------------------------------------------+ +======== ======================================================== +Argument Description +======== ======================================================== +``uri`` Path to an R script, can be a quoted or unquoted string. +======== ======================================================== + +``mlflow_start_run`` +==================== Start Run -========= -Starts a new run within an experiment, should be used within a ``with`` -block. +Starts a new run. If ``client`` is not provided, this function infers +contextual information such as source name and version, and also +registers the created run as the active run. If ``client`` is provided, +no inference is done, and additional arguments such as ``start_time`` +can be provided. .. code:: r - mlflow_start_run(run_uuid = NULL, experiment_id = NULL, - source_name = NULL, source_version = NULL, entry_point_name = NULL, - source_type = "LOCAL") + mlflow_start_run(run_id = NULL, experiment_id = NULL, + start_time = NULL, tags = NULL, client = NULL) -.. _arguments-43: +.. _arguments-35: Arguments --------- @@ -1701,66 +1486,62 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``run_uuid`` | If specified, get the run with the | +| ``run_id`` | If specified, get the run with the | | | specified UUID and log metrics and | | | params under that run. The run’s end | | | time is unset and its status is set | | | to running, but the run’s other | | | attributes remain unchanged. | +-------------------------------+--------------------------------------+ -| ``experiment_id`` | Used only when ``run_uuid`` is | +| ``experiment_id`` | Used only when ``run_id`` is | | | unspecified. ID of the experiment | | | under which to create the current | | | run. If unspecified, the run is | | | created under a new experiment with | | | a randomly generated name. | +-------------------------------+--------------------------------------+ -| ``source_name`` | Name of the source file or URI of | -| | the project to be associated with | -| | the run. Defaults to the current | -| | file if none provided. | -+-------------------------------+--------------------------------------+ -| ``source_version`` | Optional Git commit hash to | -| | associate with the run. | +| ``start_time`` | Unix timestamp of when the run | +| | started in milliseconds. Only used | +| | when ``client`` is specified. | +-------------------------------+--------------------------------------+ -| ``entry_point_name`` | Optional name of the entry point for | -| | to the current run. | +| ``tags`` | Additional metadata for run in | +| | key-value pairs. Only used when | +| | ``client`` is specified. | +-------------------------------+--------------------------------------+ -| ``source_type`` | Integer enum value describing the | -| | type of the run (“local”, “project”, | -| | etc.). | +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | +-------------------------------+--------------------------------------+ -.. _details-25: - -Details -------- - -The fluent API family of functions operate with an implied MLflow client -determined by the service set by ``mlflow_set_tracking_uri()``. For -operations involving a run it adopts the current active run, or, if one -does not exist, starts one through the implied service. - -.. _examples-5: +.. _examples-4: Examples -------- .. code:: r - list("\n", "with(mlflow_start_run(), {\n", " mlflow_log(\"test\", 10)\n", "})\n") - + with(mlflow_start_run(), { + mlflow_log_metric("test", 10) + }) -MLflow User Interface -===================== +``mlflow_ui`` +============= -Launches MLflow user interface. +Run MLflow User Interface + +Launches the MLflow user interface. .. code:: r - mlflow_ui(x, ...) + mlflow_ui(client, ...) -.. _arguments-44: +.. _arguments-36: Arguments --------- @@ -1768,38 +1549,34 @@ Arguments +-------------------------------+--------------------------------------+ | Argument | Description | +===============================+======================================+ -| ``x`` | An ``mlflow_client`` object. | +| ``client`` | (Optional) An MLflow client object | +| | returned from | +| | `mlflow_client <#mlflow-client>`__ . | +| | If specified, MLflow will use the | +| | tracking server associated with the | +| | passed-in client. If unspecified | +| | (the common case), MLflow will use | +| | the tracking server associated with | +| | the current tracking URI. | +-------------------------------+--------------------------------------+ | ``...`` | Optional arguments passed to | | | ``mlflow_server()`` when ``x`` is a | | | path to a file store. | +-------------------------------+--------------------------------------+ -.. _examples-6: +.. _examples-5: Examples -------- .. code:: r - list("\n", "library(mlflow)\n", "mlflow_install()\n", "\n", "# launch mlflow ui locally\n", "mlflow_ui()\n", "\n", "# launch mlflow ui for existing mlflow server\n", "mlflow_set_tracking_uri(\"http://tracking-server:5000\")\n", "mlflow_ui()\n") - - -Uninstalls MLflow. -================== - -Uninstalls MLflow by removing the Conda environment. - -.. code:: r - - mlflow_uninstall() - -.. _examples-7: + library(mlflow) + install_mlflow() -Examples --------- - -.. code:: r + # launch mlflow ui locally + mlflow_ui() - list("\n", "library(mlflow)\n", "mlflow_install()\n", "mlflow_uninstall()\n") - + # launch mlflow ui for existing mlflow server + mlflow_set_tracking_uri("http://tracking-server:5000") + mlflow_ui() diff --git a/docs/source/_static/images/metrics-step.png b/docs/source/_static/images/metrics-step.png new file mode 100644 index 0000000000000..04be57ab80e91 Binary files /dev/null and b/docs/source/_static/images/metrics-step.png differ diff --git a/docs/source/_static/images/metrics-time-relative.png b/docs/source/_static/images/metrics-time-relative.png new file mode 100644 index 0000000000000..18af8fc3dbf8e Binary files /dev/null and b/docs/source/_static/images/metrics-time-relative.png differ diff --git a/docs/source/_static/images/metrics-time-wall.png b/docs/source/_static/images/metrics-time-wall.png new file mode 100644 index 0000000000000..c73205f3a432e Binary files /dev/null and b/docs/source/_static/images/metrics-time-wall.png differ diff --git a/docs/source/cli.rst b/docs/source/cli.rst index 3ff8bfc11a37a..722a28c629bdc 100644 --- a/docs/source/cli.rst +++ b/docs/source/cli.rst @@ -3,145 +3,17 @@ Command-Line Interface ====================== -The MLflow command-line interface (CLI) provides a simple interface to various functionality in MLflow. You can use the CLI to -run projects, start the tracking UI, create and list experiments, download run artifacts, +The MLflow command-line interface (CLI) provides a simple interface to various functionality in MLflow. You can use the CLI to run projects, start the tracking UI, create and list experiments, download run artifacts, serve MLflow Python Function and scikit-learn models, and serve models on `Microsoft Azure Machine Learning `_ and `Amazon SageMaker `_. -.. code:: - - $ mlflow --help - Usage: mlflow [OPTIONS] COMMAND [ARGS]... - - Options: - --version Show the version and exit. - --help Show this message and exit. - - Commands: - azureml Serve models on Azure Machine Learning. - download Download the artifact at the specified DBFS or S3 URI. - experiments Manage experiments. - pyfunc Serve Python models locally. - run Run an MLflow project from the given URI. - sagemaker Serve models on Amazon SageMaker. - sklearn Serve scikit-learn models. - ui Run the MLflow tracking UI. - - Each individual command has a detailed help screen accessible via ``mlflow command_name --help``. .. contents:: Table of Contents :local: :depth: 2 - -Azure Machine Learning Models ------------------------------ - -Subcommands to serve models on Azure Machine Learning. - - -Download --------- - -Download the artifact at the specified DBFS or S3 URI into the specified -local output path, or the current directory if no output path is -specified. - - -Experiments ------------ - -Subcommands to manage experiments. - - -.. contents:: In this section: - :local: - :depth: 1 - -Create -~~~~~~ - -Create an experiment. The command has required argument for experiment name. -Additionally, you can provide an artifact location using ``-l`` or ``--artifact-location`` -option. If not provided, backend store will pick default location. Backend store will generate a -unique ID for each experiment. - -All artifacts generated by runs related to this experiment will be stored under artifact location, -organized under specific run_uuid sub-directories. - -Implementation of experiment and metadata store is dependent on backend storage. ``FileStore`` -creates a folder for each experiment ID and stores metadata in ``meta.yaml``. Runs are stored as -subfolders. - - -List -~~~~ - -Lists all experiments managed by backend store. Command takes an optional ``--view`` or ``-v`` -argument. Valid arguments are ``active_only`` (default), ``deleted_only``, or ``all``. - - -Delete -~~~~~~ - -Mark an active experiment for deletion. This also applies to experiment's metadata, runs and -associated data, and artifacts if they are store in default location. Use ``list`` command to view -artifact location. Command takes a required argument for experiment ID. Command will thrown -an error if experiment is not found or already marked for deletion. - -Experiments marked for deletion can be restored using ``restore`` command, unless they are -permanently deleted. - -Specific implementation of deletion is dependent on backend stores. ``FileStore`` moves -experiments marked for deletion under a ``.trash`` folder under the main folder used to -instantiate ``FileStore``. Experiments marked for deletion can be permanently deleted by clearing -the ``.trash`` folder. It is recommended to use a ``cron`` job or an alternate workflow mechanism -to clear ``.trash`` folder. - - -Restore -~~~~~~~ - -Restore a deleted experiment. This also applies to experiment's metadata, runs and associated data. -The command has a required argument for experiment ID. The command throws an error if the experiment is -already active, cannot be found, or permanently deleted. - - -Python Function Models ----------------------- - -Subcommands to serve Python models and apply them for inference. - - -Run ---- - -Run an MLflow project from the given URI. - -If running locally (the default), the URI can be either a Git repository -URI or a local path. If running on Databricks, the URI must be a Git -repository. - -By default, Git projects will run in a new working directory with the -given parameters, while local projects will run from the project's root -directory. - - -SageMaker Models ----------------- - -Subcommands to serve models on SageMaker. - - -scikit-learn Models -------------------- - -Subcommands to serve scikit-learn models and apply them for inference. - - -UI --- - -Run the MLflow tracking UI. The UI is served at http://localhost:5000. +.. click:: mlflow.cli:cli + :prog: mlflow + :show-nested: diff --git a/docs/source/concepts.rst b/docs/source/concepts.rst index 094d76431814c..47adad7f306f5 100644 --- a/docs/source/concepts.rst +++ b/docs/source/concepts.rst @@ -58,7 +58,7 @@ is simply a directory with code or a Git repository, and uses a descriptor file convention to specify its dependencies and how to run the code. For example, projects can contain a ``conda.yaml`` file for specifying a Python `Conda `_ environment. When you use the MLflow Tracking API in a Project, MLflow automatically remembers the project -version executed (for example, Git commit) and any parameters. You can easily run existing MLflow +version (for example, Git commit) and any parameters. You can easily run existing MLflow Projects from GitHub or your own Git repository, and chain them into multi-step workflows. **MLflow Models** offer a convention for packaging machine learning models in multiple flavors, and @@ -68,8 +68,8 @@ TensorFlow model can be loaded as a TensorFlow DAG, or as a Python function to a MLflow provides tools to deploy many common model types to diverse platforms: for example, any model supporting the "Python function" flavor can be deployed to a Docker-based REST server, to cloud platforms such as Azure ML and AWS SageMaker, and as a user-defined function in Apache Spark for -batch and streaming inference. If you output MLflow Models using the Tracking API, MLflow will also -automatically remember which Project and run they came from. +batch and streaming inference. If you output MLflow Models using the Tracking API, MLflow also +automatically remembers which Project and run they came from. .. TODO: example app and data diff --git a/docs/source/conf.py b/docs/source/conf.py index 9e995b2d825a6..1d93e8c7d40ad 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -35,6 +35,7 @@ extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', + 'sphinx_click.ext', ] # Add any paths that contain templates here, relative to this directory. @@ -53,7 +54,7 @@ # General information about the project. project = 'MLflow' -copyright = 'Databricks 2018. All rights reserved' +copyright = 'Databricks 2019. All rights reserved' author = 'Databricks' # The version info for the project you're documenting, acts as replacement for diff --git a/docs/source/index.rst b/docs/source/index.rst index 540b51e74ffa8..eae6a0bcf3370 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -12,7 +12,8 @@ It tackles three primary functions: MLflow is library-agnostic. You can use it with any machine learning library, and in any programming language, since all functions are accessible through a :ref:`rest-api` -and :ref:`CLI`. For convenience, the project also includes a :ref:`python-api`. +and :ref:`CLI`. For convenience, the project also includes a :ref:`python-api`, :ref:`R-api`, +and :ref:`java_api`. Get started using the :ref:`quickstart` or by reading about the :ref:`key concepts`. @@ -26,12 +27,8 @@ Get started using the :ref:`quickstart` or by reading about the :ref:`key concep projects models cli + search-syntax python_api/index R-api java_api/index rest-api - -.. warning:: - - The current version of MLflow is an alpha release. This means that APIs and storage formats - are subject to breaking change. diff --git a/docs/source/languagesections/languagesections.js b/docs/source/languagesections/languagesections.js index fa42ac1fe29cb..c990be39ee05e 100644 --- a/docs/source/languagesections/languagesections.js +++ b/docs/source/languagesections/languagesections.js @@ -62,10 +62,10 @@ $(function() { var my_sel_class = sel_class; // When the target language is not available, default to bash or python. if (!$('div.' + sel_class, parent).length) { - if ($('div.bash', parent).length) - my_sel_class = 'bash'; + if ($('div.highlight-bash', parent).length) + my_sel_class = 'highlight-bash'; else - my_sel_class = 'python'; + my_sel_class = 'highlight-python'; } $('div.example', parent).hide(); diff --git a/docs/source/models.rst b/docs/source/models.rst index 6b1d61fa46c61..f3c2c0e0a737a 100644 --- a/docs/source/models.rst +++ b/docs/source/models.rst @@ -5,7 +5,7 @@ MLflow Models An MLflow Model is a standard format for packaging machine learning models that can be used in a variety of downstream tools---for example, real-time serving through a REST API or batch inference -on Apache Spark. The format defines a convention that lets you save a model in different "flavors" +on Apache Spark. The format defines a convention that lets you save a model in different "flavors" that can be understood by different downstream tools. .. contents:: Table of Contents @@ -13,6 +13,8 @@ that can be understood by different downstream tools. :depth: 1 +.. _model-storage-format: + Storage Format -------------- @@ -42,7 +44,7 @@ format. For example, :py:mod:`mlflow.sklearn` outputs models as follows: And its ``MLmodel`` file describes two flavors: -.. code:: yaml +.. code-block:: yaml time_created: 2018-05-25T17:28:53.35 @@ -54,17 +56,17 @@ And its ``MLmodel`` file describes two flavors: loader_module: mlflow.sklearn This model can then be used with any tool that supports *either* the ``sklearn`` or -``python_function`` model flavor. For example, the ``mlflow sklearn`` command can serve a +``python_function`` model flavor. For example, the ``mlflow models serve`` command can serve a model with the ``sklearn`` flavor: -.. code:: +.. code-block:: bash - mlflow sklearn serve my_model + mlflow models serve my_model In addition, the ``mlflow sagemaker`` command-line tool can package and deploy models to AWS SageMaker as long as they support the ``python_function`` flavor: -.. code:: bash +.. code-block:: bash mlflow sagemaker deploy -m my_model [other options] @@ -79,6 +81,8 @@ time_created run_id ID of the run that created the model, if the model was saved using :ref:`tracking`. +.. _model-api: + Model API --------- @@ -103,7 +107,11 @@ Built-In Model Flavors MLflow provides several standard flavors that might be useful in your applications. Specifically, many of its deployment tools support these flavors, so you can export your own model in one of these -flavors to benefit from all these tools. +flavors to benefit from all these tools: + +.. contents:: + :local: + :depth: 1 Python Function (``python_function``) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -113,240 +121,609 @@ for saving and loading models to and from this format. The format is self-contai that it includes all the information necessary to load and use a model. Dependencies are stored either directly with the model or referenced via Conda environment. +Many MLflow Model persistence modules, such as :mod:`mlflow.sklearn`, :mod:`mlflow.keras`, +and :mod:`mlflow.pytorch`, produce models with the ``python_function`` (``pyfunc``) flavor. This +means that they adhere to the :ref:`python_function filesystem format ` +and can be interpreted as generic Python classes that implement the specified +:ref:`inference API `. Therefore, any tool that operates on these ``pyfunc`` +classes can operate on any MLflow Model containing the ``pyfunc`` flavor, regardless of which +persistence module or framework was used to produce the model. This interoperability is very +powerful because it allows any Python model to be productionized in a variety of environments. + The convention for ``python_function`` models is to have a ``predict`` method or function with the following signature: -.. code:: python +.. code-block:: py - predict(data: pandas.DataFrame) -> [pandas.DataFrame | numpy.array] + predict(model_input: pandas.DataFrame) -> [numpy.ndarray | pandas.Series | pandas.DataFrame] Other MLflow components expect ``python_function`` models to follow this convention. -The ``python_function`` model format is defined as a directory structure containing all required data, code, and -configuration: - -.. code:: bash - - ./dst-path/ - ./MLmodel: configuration - : code packaged with the model (specified in the MLmodel file) - : data packaged with the model (specified in the MLmodel file) - : Conda environment definition (specified in the MLmodel file) - -A ``python_function`` model directory must contain an ``MLmodel`` file in its root with "python_function" format and the following parameters: +The ``python_function`` :ref:`model format ` is defined as a directory +structure containing all required data, code, and configuration. -- loader_module [required]: - Python module that can load the model. Expected to be a module identifier - (for example, ``mlflow.sklearn``) importable via ``importlib.import_module``. - The imported module must contain a function with the following signature: +The :py:mod:`mlflow.pyfunc` module defines functions for saving and loading MLflow Models with the +``python_function`` flavor. This module also includes utilities for creating custom Python models. +For more information, see the :ref:`custom Python models documentation ` +and the :mod:`mlflow.pyfunc` documentation. - _load_pyfunc(path: string) -> +R Function (``crate``) +^^^^^^^^^^^^^^^^^^^^^^ - The path argument is specified by the ``data`` parameter and may refer to a file or directory. - -- code [optional]: - A relative path to a directory containing the code packaged with this model. - All files and directories inside this directory are added to the Python path - prior to importing the model loader. - -- data [optional]: - A relative path to a file or directory containing model data. - The path is passed to the model loader. - -- env [optional]: - A relative path to an exported Conda environment. If present this environment - is activated prior to running the model. - -.. rubric:: Example - -.. code:: bash - - tree example/sklearn_iris/mlruns/run1/outputs/linear-lr - -:: - - ├── MLmodel - ├── code - │   ├── sklearn_iris.py - │   - ├── data - │   └── model.pkl - └── mlflow_env.yml - -.. code:: bash - - cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel - -:: - - python_function: - code: code - data: data/model.pkl - loader_module: mlflow.sklearn - env: mlflow_env.yml - main: sklearn_iris +The ``crate`` model flavor defines a generic model format for representing aribtrary R prediction +function as Mlflow model. The prediction function is expected to take a dataframe as input and +produce a dataframe, a vector or a list with the predictions as output. -For more information, see :py:mod:`mlflow.pyfunc`. +This flavor requires R to be installed in order to be used. H\ :sub:`2`\ O (``h2o``) ^^^^^^^^^^^^^^^^^^^^^^^^ -The H2O model flavor enables logging and loading H2O models. These models will be saved by using the :py:mod:`mlflow.h2o.save_model`. Using :py:mod:`mlflow.h2o.log_model` will also enable a valid ``Python Function`` flavor. +The ``h2o`` model flavor enables logging and loading H2O models. + +The :py:mod:`mlflow.h2o` module defines :py:func:`save_model() ` and +:py:func:`log_model() ` methods for saving H2O models in MLflow Model format. +These methods produce MLflow Models with the ``python_function`` flavor, allowing you to load them +as generic Python functions for inference via :py:func:`mlflow.pyfunc.load_pyfunc()`. When you load +MLflow Models with the ``h2o`` flavor using :py:func:`load_pyfunc() `, +the `h2o.init() `_ method is +called. Therefore, the correct version of ``h2o(-py)`` must be installed in the loader's +environment. You can customize the arguments given to +`h2o.init() `_ by modifying the +``init`` entry of the persisted H2O model's YAML configuration file: ``model.h2o/h2o.yaml``. + +Finally, you can use the :py:func:`mlflow.h2o.load_model()` method to load MLflow Models with the +``h2o`` flavor as H2O model objects. -When loading a H2O model as a PyFunc model, :py:mod:`h2o.init(...)` will be called. Therefore, the right version of h2o(-py) has to be in the environment. The arguments given to :py:mod:`h2o.init(...)` can be customized in ``model.h2o/h2o.yaml`` under the key ``init``. For more information, see :py:mod:`mlflow.h2o`. +For more information, see :py:mod:`mlflow.h2o`. Keras (``keras``) ^^^^^^^^^^^^^^^^^ -The ``keras`` model flavor enables logging and loading Keras models. This model will be saved in a HDF5 file format, via the model_save functionality provided by Keras. Additionally, model can be loaded back as ``Python Function``. For more information, see :py:mod:`mlflow.keras`. +The ``keras`` model flavor enables logging and loading Keras models. It is available in both Python +and R clients. The :py:mod:`mlflow.keras` module defines :py:func:`save_model()` +and :py:func:`log_model() ` functions that you can use to save Keras models +in MLflow Model format in Python. Similarly, in R, you can save or log the model using +`mlflow_save_model `__ and `mlflow_log_model `__. These functions serialize Keras +models as HDF5 files using the Keras library's built-in model persistence functions. MLflow Models +produced by these functions also contain the ``python_function`` flavor, allowing them to be interpreted +as generic Python functions for inference via :py:func:`mlflow.pyfunc.load_pyfunc()`. Finally, you +can use the :py:func:`mlflow.keras.load_model()` function in Python or `mlflow_load_model `__ +function in R to load MLflow Models with the ``keras`` flavor as +`Keras Model objects `_. + +For more information, see :py:mod:`mlflow.keras`. MLeap (``mleap``) ^^^^^^^^^^^^^^^^^ -The ``mleap`` model flavor supports saving models using the MLeap persistence mechanism. A companion module for loading MLflow models with the MLeap flavor format is available in the ``mlflow/java`` package. For more information, see :py:mod:`mlflow.mleap`. +The ``mleap`` model flavor supports saving Spark models in MLflow format using the +`MLeap `_ persistence mechanism. MLeap is an inference-optimized +format and execution engine for Spark models that does not depend on +`SparkContext `_ +to evaluate inputs. + +You can save Spark models in MLflow format with the ``mleap`` flavor by specifying the +``sample_input`` argument of the :py:func:`mlflow.spark.save_model()` or +:py:func:`mlflow.spark.log_model()` method (recommended). The :py:mod:`mlflow.mleap` module also +defines :py:func:`save_model() ` and +:py:func:`log_model() ` methods for saving MLeap models in MLflow format, +but these methods do not include the ``python_function`` flavor in the models they produce. + +A companion module for loading MLflow Models with the MLeap flavor is available in the +``mlflow/java`` package. + +For more information, see :py:mod:`mlflow.spark`, :py:mod:`mlflow.mleap`, and the +`MLeap documentation `_. PyTorch (``pytorch``) ^^^^^^^^^^^^^^^^^^^^^ -The ``pytorch`` model flavor enables logging and loading PyTorch models. Model is completely stored in `.pth` format using `torch.save(model)` method. Given a directory containing a saved model, you can log the model to MLflow via ``log_saved_model``. The saved model can then be loaded for inference via ``mlflow.pyfunc.load_pyfunc()``. For more information, see :py:mod:`mlflow.pytorch`. +The ``pytorch`` model flavor enables logging and loading PyTorch models. + +The :py:mod:`mlflow.pytorch` module defines utilities for saving and loading MLflow Models with the +``pytorch`` flavor. You can use the :py:func:`mlflow.pytorch.save_model()` and +:py:func:`mlflow.pytorch.log_model()` methods to save PyTorch models in MLflow format; both of these +functions use the `torch.save() `_ method to +serialize PyTorch models. Additionally, you can use the :py:func:`mlflow.pytorch.load_model()` +method to load MLflow Models with the ``pytorch`` flavor as PyTorch model objects. Finally, models +produced by :py:func:`mlflow.pytorch.save_model()` and :py:func:`mlflow.pytorch.log_model()` contain +the ``python_function`` flavor, allowing you to load them as generic Python functions for inference +via :py:func:`mlflow.pyfunc.load_pyfunc()`. + +For more information, see :py:mod:`mlflow.pytorch`. Scikit-learn (``sklearn``) ^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``sklearn`` model flavor provides an easy to use interface for handling scikit-learn models with no -external dependencies. It saves and loads models using Python's pickle module and also generates a valid -``python_function`` flavor model. For more information, see :py:mod:`mlflow.sklearn`. +The ``sklearn`` model flavor provides an easy-to-use interface for saving and loading scikit-learn +models. The :py:mod:`mlflow.sklearn` module defines +:py:func:`save_model() ` and +:py:func:`log_model() ` functions that save scikit-learn models in +MLflow format, using either Python's pickle module (Pickle) or CloudPickle for model serialization. +These functions produce MLflow Models with the ``python_function`` flavor, allowing them to +be loaded as generic Python functions for inference via :py:func:`mlflow.pyfunc.load_pyfunc()`. +Finally, you can use the :py:func:`mlflow.sklearn.load_model()` method to load MLflow Models with +the ``sklearn`` flavor as scikit-learn model objects. +For more information, see :py:mod:`mlflow.sklearn`. Spark MLlib (``spark``) ^^^^^^^^^^^^^^^^^^^^^^^ -The ``spark`` model flavor enables exporting Spark MLlib models as MLflow models. Exported models are -saved using Spark MLLib's native serialization, and can then be loaded back as MLlib models or -deployed as ``python_function`` models. When deployed as a ``python_function``, the model creates its own -SparkContext and converts pandas DataFrame input to a Spark DataFrame before scoring. While this is not -the most efficient solution, especially for real-time scoring, it enables you to easily deploy any MLlib PipelineModel -(as long as the PipelineModel has no external JAR dependencies) to any endpoint supported by -MLflow. For more information, see :py:mod:`mlflow.spark`. +The ``spark`` model flavor enables exporting Spark MLlib models as MLflow Models. + +The :py:mod:`mlflow.spark` module defines :py:func:`save_model() ` and +:py:func:`log_model() ` methods that save Spark MLlib pipelines in MLflow +model format. MLflow Models produced by these functions contain the ``python_function`` flavor, +allowing you to load them as generic Python functions via :py:func:`mlflow.pyfunc.load_pyfunc()`. +When a model with the ``spark`` flavor is loaded as a Python function via +:py:func:`load_pyfunc() `, a new +`SparkContext `_ +is created for model inference; additionally, the function converts all Pandas DataFrame inputs to +Spark DataFrames before scoring. While this initialization overhead and format translation latency +is not ideal for high-performance use cases, it enables you to easily deploy any +`MLlib PipelineModel `_ to any production environment supported by MLflow +(SageMaker, AzureML, etc). + +Finally, the :py:func:`mlflow.spark.load_model()` method is used to load MLflow Models with +the ``spark`` flavor as Spark MLlib pipelines. + +For more information, see :py:mod:`mlflow.spark`. TensorFlow (``tensorflow``) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``tensorflow`` model flavor enables logging TensorFlow ``Saved Models`` and loading them back as ``Python Function`` models for inference on pandas DataFrames. Given a directory containing a saved model, you can log the model to MLflow via ``log_saved_model`` and then load the saved model for inference using ``mlflow.pyfunc.load_pyfunc``. For more information, see :py:mod:`mlflow.tensorflow`. +The ``tensorflow`` model flavor allows serialized TensorFlow models in +`SavedModel format `_ +to be logged in MLflow format via the :py:func:`mlflow.tensorflow.save_model()` and +:py:func:`mlflow.tensorflow.log_model()` methods. These methods also add the ``python_function`` +flavor to the MLflow Models that they produce, allowing the models to be interpreted as generic +Python functions for inference via :py:func:`mlflow.pyfunc.load_pyfunc()`. Finally, you can use the +:py:func:`mlflow.tensorflow.load_model()` method to load MLflow Models with the ``tensorflow`` +flavor as TensorFlow graphs. + +For more information, see :py:mod:`mlflow.tensorflow`. + +ONNX (``onnx``) +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The ``onnx`` model flavor enables logging of `ONNX models `_ in MLflow format via +the :py:func:`mlflow.onnx.save_model()` and :py:func:`mlflow.onnx.log_model()` methods. These +methods also add the ``python_function`` flavor to the MLflow Models that they produce, allowing the +models to be interpreted as generic Python functions for inference via +:py:func:`mlflow.pyfunc.load_pyfunc()`. The ``python_function`` representation of an MLflow +ONNX model uses the `ONNX Runtime execution engine `_ for +evaluation Finally, you can use the :py:func:`mlflow.onnx.load_model()` method to load MLflow +Models with the ``onnx`` flavor in native ONNX format. + +For more information, see :py:mod:`mlflow.onnx` and ``_. + +Model Customization +------------------- + +While MLflow's built-in model persistence utilities are convenient for packaging models from various +popular ML libraries in MLflow Model format, they do not cover every use case. For example, you may +want to use a model from an ML library that is not explicitly supported by MLflow's built-in +flavors. Alternatively, you may want to package custom inference code and data to create an +MLflow Model. Fortunately, MLflow provides two solutions that can be used to accomplish these +tasks: :ref:`custom-python-models` and :ref:`custom-flavors`. + +.. contents:: In this section: + :local: + :depth: 2 + +.. _custom-python-models: + +Custom Python Models +^^^^^^^^^^^^^^^^^^^^ +The :py:mod:`mlflow.pyfunc` module provides :py:func:`save_model() ` and +:py:func:`log_model() ` utilities for creating MLflow Models with the +``python_function`` flavor that contain user-specified code and *artifact* (file) dependencies. +These artifact dependencies may include serialized models produced by any Python ML library. + +Because these custom models contain the ``python_function`` flavor, they can be deployed +to any of MLflow's supported production environments, such as SageMaker, AzureML, or local +REST endpoints. + +The following examples demonstrate how you can use the :py:mod:`mlflow.pyfunc` module to create +custom Python models. For additional information about model customization with MLflow's +``python_function`` utilities, see the +:ref:`python_function custom models documentation `. + +Example: Creating a custom "add n" model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This example defines a class for a custom model that adds a specified numeric value, ``n``, to all +columns of a Pandas DataFrame input. Then, it uses the :py:mod:`mlflow.pyfunc` APIs to save an +instance of this model with ``n = 5`` in MLflow Model format. Finally, it loads the model in +``python_function`` format and uses it to evaluate a sample input. + +.. code-block:: py + + import mlflow.pyfunc + + # Define the model class + class AddN(mlflow.pyfunc.PythonModel): + + def __init__(self, n): + self.n = n + + def predict(self, context, model_input): + return model_input.apply(lambda column: column + self.n) + + # Construct and save the model + model_path = "add_n_model" + add5_model = AddN(n=5) + mlflow.pyfunc.save_model(path=model_path, python_model=add5_model) + + # Load the model in `python_function` format + loaded_model = mlflow.pyfunc.load_pyfunc(model_path) + + # Evaluate the model + import pandas as pd + model_input = pd.DataFrame([range(10)]) + model_output = loaded_model.predict(model_input) + assert model_output.equals(pd.DataFrame([range(5, 15)])) + +Example: Saving an XGBoost model in MLflow format +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This example begins by training and saving a gradient boosted tree model using the XGBoost +library. Next, it defines a wrapper class around the XGBoost model that conforms to MLflow's +``python_function`` :ref:`inference API `. Then, it uses the wrapper class and +the saved XGBoost model to construct an MLflow Model that performs inference using the gradient +boosted tree. Finally, it loads the MLflow Model in ``python_function`` format and uses it to +evaluate test data. + +.. code-block:: py + + # Load training and test datasets + import xgboost as xgb + from sklearn import datasets + from sklearn.model_selection import train_test_split + + iris = datasets.load_iris() + x = iris.data[:, 2:] + y = iris.target + x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.2, random_state=42) + dtrain = xgb.DMatrix(x_train, label=y_train) + + # Train and save an XGBoost model + xgb_model = xgb.train(params={'max_depth': 10}, dtrain=dtrain, num_boost_round=10) + xgb_model_path = "xgb_model.pth" + xgb_model.save_model(xgb_model_path) + + # Create an `artifacts` dictionary that assigns a unique name to the saved XGBoost model file. + # This dictionary will be passed to `mlflow.pyfunc.save_model`, which will copy the model file + # into the new MLflow Model's directory. + artifacts = { + "xgb_model": xgb_model_path + } + + # Define the model class + import mlflow.pyfunc + class XGBWrapper(mlflow.pyfunc.PythonModel): + + def load_context(self, context): + import xgboost as xgb + self.xgb_model = xgb.Booster() + self.xgb_model.load_model(context.artifacts["xgb_model"]) + + def predict(self, context, model_input): + input_matrix = xgb.DMatrix(model_input.values) + return self.xgb_model.predict(input_matrix) + + # Create a Conda environment for the new MLflow Model that contains the XGBoost library + # as a dependency, as well as the required CloudPickle library + import cloudpickle + conda_env = { + 'channels': ['defaults'], + 'dependencies': [ + 'xgboost={}'.format(xgb.__version__), + 'cloudpickle={}'.format(cloudpickle.__version__), + ], + 'name': 'xgb_env' + } + + # Save the MLflow Model + mlflow_pyfunc_model_path = "xgb_mlflow_pyfunc" + mlflow.pyfunc.save_model( + path=mlflow_pyfunc_model_path, python_model=XGBWrapper(), artifacts=artifacts, + conda_env=conda_env) + + # Load the model in `python_function` format + loaded_model = mlflow.pyfunc.load_pyfunc(mlflow_pyfunc_model_path) + + # Evaluate the model + import pandas as pd + test_predictions = loaded_model.predict(pd.DataFrame(x_test)) + print(test_predictions) + +.. _custom-flavors: Custom Flavors --------------- -You can add a flavor in MLmodel files, either by writing it directly or -building it with the :py:class:`mlflow.models.Model` class. Choose an arbitrary string name -for your flavor. MLflow tools ignore flavors in the MLmodel file that they do not understand. +^^^^^^^^^^^^^^ +You can also create custom MLflow Models by writing a custom *flavor*. + +As discussed in the :ref:`model-api` and :ref:`model-storage-format` sections, an MLflow Model +is defined by a directory of files that contains an ``MLmodel`` configuration file. This ``MLmodel`` +file describes various model attributes, including the flavors in which the model can be +interpreted. The ``MLmodel`` file contains an entry for each flavor name; each entry is +a YAML-formatted collection of flavor-specific attributes. + +To create a new flavor to support a custom model, you define the set of flavor-specific attributes +to include in the ``MLmodel`` configuration file, as well as the code that can interpret the +contents of the model directory and the flavor's attributes. + +As an example, let's examine the :py:mod:`mlflow.pytorch` module corresponding to MLflow's +``pytorch`` flavor. In the :py:func:`mlflow.pytorch.save_model()` method, a PyTorch model is saved +to a specified output directory. Additionally, :py:func:`mlflow.pytorch.save_model()` leverages the +:py:func:`mlflow.models.Model.add_flavor()` and :py:func:`mlflow.models.Model.save()` functions to +produce an ``MLmodel`` configuration containing the ``pytorch`` flavor. The resulting configuration +has several flavor-specific attributes, such as ``pytorch_version``, which denotes the version of the +PyTorch library that was used to train the model. To interpret model directories produced by +:py:func:`save_model() `, the :py:mod:`mlflow.pytorch` module also +defines a :py:mod:`load_model() ` method. +:py:mod:`mlflow.pytorch.load_model()` reads the ``MLmodel`` configuration from a specified +model directory and uses the configuration attributes of the ``pytorch`` flavor to load +and return a PyTorch model from its serialized representation. Built-In Deployment Tools ------------------------- -MLflow provides tools for deploying models on a local machine and several production environments. -You can use these tools to easily apply your models in a production environment. Not all deployment -methods are available for all model flavors. Deployment is supported for the Python function format and all compatible formats. +MLflow provides tools for deploying MLflow models on a local machine and to several production environments. +Not all deployment methods are available for all model flavors. -Local -^^^^^ -MLflow can deploy models locally as local REST API endpoints or to directly score CSV files. -This functionality is a convenient way of testing models before uploading to a remote model server. -You deploy the Python Function flavor locally via the CLI interface to the :py:mod:`mlflow.pyfunc` module. +.. contents:: In this section: + :local: + :depth: 1 -* :py:func:`serve ` deploys the model as a local REST API server. -* :py:func:`predict ` uses the model to generate a prediction for a local - CSV file. +.. _local_model_deployment: -For more info, see: +Deploy MLflow models +^^^^^^^^^^^^^^^^^^^^ +MLflow can deploy models locally as local REST API endpoints or to directly score files. In addition, +MLflow can package models as self contained Docker images with the REST API endpoint. The image can +be used to safely deploy the model to various environments such as Kubernetes. -.. code:: bash +You deploy MLflow model locally or generate a Docker image using the CLI interface to the +:py:mod:`mlflow.models` module. - mlflow pyfunc --help - mlflow pyfunc serve --help - mlflow pyfunc predict --help +The REST API server accepts the following data formats as POST input to the ``/invocations`` path: -Microsoft AzureML -^^^^^^^^^^^^^^^^^ -The :py:mod:`mlflow.azureml` module can export ``python_function`` models as Azure ML compatible models. It -can also be used to directly deploy and serve models on Azure ML, provided the environment has -been correctly set up. +* JSON-serialized pandas DataFrames in the ``split`` orientation. For example, + ``data = pandas_df.to_json(orient='split')``. This format is specified using a ``Content-Type`` + request header value of ``application/json`` or ``application/json; format=pandas-split``. -* :py:func:`export ` exports the model in Azure ML-compatible format. - MLflow will output a directory with the dependencies necessary to deploy the model. +* JSON-serialized pandas DataFrames in the ``records`` orientation. *We do not recommend using + this format because it is not guaranteed to preserve column ordering.* This format is + specified using a ``Content-Type`` request header value of + ``application/json; format=pandas-records``. -* :py:func:`deploy ` deploys the model directly to Azure ML. - You first need to set up your environment to work with the Azure ML CLI. You can do this by - starting a shell from the Azure ML Workbench application. You also have to set up all accounts - required to run and deploy on Azure ML. Where the model is deployed is dependent on your - active Azure ML environment. If the active environment is set up for local deployment, the model - will be deployed locally in a Docker container (Docker is required). +* CSV-serialized pandas DataFrames. For example, ``data = pandas_df.to_csv()``. This format is + specified using a ``Content-Type`` request header value of ``text/csv``. -Model export example: +Example requests: -.. code:: bash +.. code-block:: bash - mlflow azureml export -m -o test-output - tree test-output + # split-oriented + curl http://127.0.0.1:5000/invocations -H 'Content-Type: application/json' -d '{ + "columns": ["a", "b", "c"], + "data": [[1, 2, 3], [4, 5, 6]] + }' -:: - - test-output - ├── create_service.sh - use this script to upload the model to Azure ML - ├── score.py - main module required by Azure ML - └── test-output - directory containing MLflow model in Python Function flavor + # record-oriented (fine for vector rows, loses ordering for JSON records) + curl http://127.0.0.1:5000/invocations -H 'Content-Type: application/json; format=pandas-records' -d '[[1, 2, 3], [4, 5, 6]]' -.. rubric:: Example workflow using the MLflow CLI -.. code:: bash +For more information about serializing pandas DataFrames, see +`pandas.DataFrame.to_json `_. + +The predict command accepts the same input formats. The format is specified as command line arguments. - az ml env set -n -g - set environment to local deployment - mlflow azureml deploy - deploy locally to test the model - az ml env set -n -g - set environment to cluster - mlflow azureml deploy - deploy to the cloud +Commands +~~~~~~~~ + +* :py:func:`serve ` deploys the model as a local REST API server. +* :py:func:`build_docker ` packages a REST API endpoint serving the + model as a docker image. +* :py:func:`predict ` uses the model to generate a prediction for a local + CSV or JSON file. For more info, see: -.. code:: bash +.. code-block:: bash + + mlflow models --help + mlflow models serve --help + mlflow models predict --help + mlflow models build-docker --help + +.. _azureml_deployment: + +Deploy a ``python_function`` model on Microsoft Azure ML +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :py:mod:`mlflow.azureml` module can package ``python_function`` models into Azure ML container images. +These images can be deployed to Azure Kubernetes Service (AKS) and the Azure Container Instances (ACI) +platform for real-time serving. The resulting Azure ML ContainerImage contains a web server that +accepts the following data formats as input: + +* JSON-serialized pandas DataFrames in the ``split`` orientation. For example, ``data = pandas_df.to_json(orient='split')``. This format is specified using a ``Content-Type`` request header value of ``application/json``. + +* :py:func:`build_image ` registers an MLflow Model with an existing Azure ML workspace and builds an Azure ML container image for deployment to AKS and ACI. The `Azure ML SDK`_ is required in order to use this function. *The Azure ML SDK requires Python 3. It cannot be installed with earlier versions of Python.* + + .. _Azure ML SDK: https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py + +.. rubric:: Example workflow using the Python API + +.. code-block:: py + + import mlflow.azureml + + from azureml.core import Workspace + from azureml.core.webservice import AciWebservice, Webservice + + + # Create or load an existing Azure ML workspace. You can also load an existing workspace using + # Workspace.get(name="") + workspace_name = "" + subscription_id = "" + resource_group = "" + location = "" + azure_workspace = Workspace.create(name=workspace_name, + subscription_id=subscription_id, + resource_group=resource_group, + location=location, + create_resource_group=True, + exist_okay=True) + + # Build an Azure ML container image for deployment + azure_image, azure_model = mlflow.azureml.build_image(model_uri="", + workspace=azure_workspace, + description="Wine regression model 1", + synchronous=True) + # If your image build failed, you can access build logs at the following URI: + print("Access the following URI for build logs: {}".format(azure_image.image_build_log_uri)) + + # Deploy the container image to ACI + webservice_deployment_config = AciWebservice.deploy_configuration() + webservice = Webservice.deploy_from_image( + image=azure_image, workspace=azure_workspace, name="") + webservice.wait_for_deployment() + + # After the image deployment completes, requests can be posted via HTTP to the new ACI + # webservice's scoring URI. The following example posts a sample input from the wine dataset + # used in the MLflow ElasticNet example: + # https://github.com/mlflow/mlflow/tree/master/examples/sklearn_elasticnet_wine + print("Scoring URI is: %s", webservice.scoring_uri) + + import requests + import json + + # `sample_input` is a JSON-serialized pandas DataFrame with the `split` orientation + sample_input = { + "columns": [ + "alcohol", + "chlorides", + "citric acid", + "density", + "fixed acidity", + "free sulfur dioxide", + "pH", + "residual sugar", + "sulphates", + "total sulfur dioxide", + "volatile acidity" + ], + "data": [ + [8.8, 0.045, 0.36, 1.001, 7, 45, 3, 20.7, 0.45, 170, 0.27] + ] + } + response = requests.post( + url=webservice.scoring_uri, data=json.dumps(sample_input), + headers={"Content-type": "application/json"}) + response_json = json.loads(response.text) + print(response_json) + +.. rubric:: Example workflow using the MLflow CLI + +.. code-block:: bash + + mlflow azureml build-image -w -m -d "Wine regression model 1" + + az ml service create aci -n --image-id : + + # After the image deployment completes, requests can be posted via HTTP to the new ACI + # webservice's scoring URI. The following example posts a sample input from the wine dataset + # used in the MLflow ElasticNet example: + # https://github.com/mlflow/mlflow/tree/master/examples/sklearn_elasticnet_wine + + scoring_uri=$(az ml service show --name -v | jq -r ".scoringUri") + + # `sample_input` is a JSON-serialized pandas DataFrame with the `split` orientation + sample_input=' + { + "columns": [ + "alcohol", + "chlorides", + "citric acid", + "density", + "fixed acidity", + "free sulfur dioxide", + "pH", + "residual sugar", + "sulphates", + "total sulfur dioxide", + "volatile acidity" + ], + "data": [ + [8.8, 0.045, 0.36, 1.001, 7, 45, 3, 20.7, 0.45, 170, 0.27] + ] + }' + + echo $sample_input | curl -s -X POST $scoring_uri\ + -H 'Cache-Control: no-cache'\ + -H 'Content-Type: application/json'\ + -d @- + +For more info, see: + +.. code-block:: bash mlflow azureml --help - mlflow azureml export --help - mlflow azureml deploy --help - -Amazon SageMaker -^^^^^^^^^^^^^^^^ -The :py:mod:`mlflow.sagemaker` module can deploy ``python_function`` models on SageMaker -or locally in a Docker container with SageMaker compatible environment. -You have to set up your environment and user accounts first in order to -deploy to SageMaker with MLflow. Also, in order to export a custom model to SageMaker, you need a -MLflow-compatible Docker image to be available on Amazon ECR. MLflow provides a default Docker -image definition; however, it is up to you to build the actual image and upload it to ECR. -MLflow includes a utility function to perform this step. Once built and uploaded, the MLflow -container can be used for all MLflow models. + mlflow azureml build-image --help -* The :py:func:`build-and-push-container ` CLI command builds an MLfLow - Docker image and uploads it to ECR. The caller must have the correct permissions set up. The image - is built locally and requires Docker to be present on the machine that performs this step. +.. _sagemaker_deployment: + +Deploy a ``python_function`` model on Amazon SageMaker +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :py:mod:`mlflow.sagemaker` module can deploy ``python_function`` models locally in a Docker +container with SageMaker compatible environment and remotely on SageMaker. +To deploy remotely to SageMaker you need to set up your environment and user accounts. +To export a custom model to SageMaker, you need a MLflow-compatible Docker image to be available on Amazon ECR. +MLflow provides a default Docker image definition; however, it is up to you to build the image and upload it to ECR. +MLflow includes the utility function ``build_and_push_container`` to perform this step. Once built and uploaded, you can use the MLflow container for all MLflow Models. Model webservers deployed using the :py:mod:`mlflow.sagemaker` +module accept the following data formats as input, depending on the deployment flavor: + +* ``python_function``: For this deployment flavor, the endpoint accepts the same formats described + in the :ref:`local model deployment documentation `. + +* ``mleap``: For this deployment flavor, the endpoint accepts `only` + JSON-serialized pandas DataFrames in the ``split`` orientation. For example, + ``data = pandas_df.to_json(orient='split')``. This format is specified using a ``Content-Type`` + request header value of ``application/json``. + +Commands +~~~~~~~~~ * :py:func:`run-local ` deploys the model locally in a Docker container. The image and the environment should be identical to how the model would be run remotely and it is therefore useful for testing the model prior to deployment. +* The :py:func:`build-and-push-container ` CLI command builds an MLfLow + Docker image and uploads it to ECR. The caller must have the correct permissions set up. The image + is built locally and requires Docker to be present on the machine that performs this step. + * :py:func:`deploy ` deploys the model on Amazon SageMaker. MLflow uploads the Python Function model into S3 and starts an Amazon SageMaker endpoint serving the model. .. rubric:: Example workflow using the MLflow CLI -.. code:: bash +.. code-block:: bash mlflow sagemaker build-and-push-container - build the container (only needs to be called once) mlflow sagemaker run-local -m - test the model locally - mlflow sagemaker deploy - deploy the model to the cloud + mlflow sagemaker deploy - deploy the model remotely For more info, see: -.. code:: bash +.. code-block:: bash mlflow sagemaker --help mlflow sagemaker build-and-push-container --help @@ -354,14 +731,51 @@ For more info, see: mlflow sagemaker deploy --help -Apache Spark -^^^^^^^^^^^^ -MLfLow can output a ``python_function`` model as an Apache Spark UDF, which can be uploaded to a Spark cluster and -used to score the model. +Export a ``python_function`` model as an Apache Spark UDF +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can output a ``python_function`` model as an Apache Spark UDF, which can be uploaded to a +Spark cluster and used to score the model. .. rubric:: Example -.. code:: python +.. code-block:: py pyfunc_udf = mlflow.pyfunc.spark_udf() df = spark_df.withColumn("prediction", pyfunc_udf()) + +The resulting UDF is based Spark's Pandas UDF and is currently limited to producing either a single +value or an array of values of the same type per observation. By default, we return the first +numeric column as a double. You can control what result is returned by supplying ``result_type`` +argument. The following values are supported: + +* ``'int'`` or IntegerType_: The leftmost integer that can fit in + ``int32`` result is returned or exception is raised if there is none. +* ``'long'`` or LongType_: The leftmost long integer that can fit in ``int64`` + result is returned or exception is raised if there is none. +* ArrayType_ (IntegerType_ | LongType_): Return all integer columns that can fit + into the requested size. +* ``'float'`` or FloatType_: The leftmost numeric result cast to + ``float32`` is returned or exception is raised if there is no numeric column. +* ``'double'`` or DoubleType_: The leftmost numeric result cast to + ``double`` is returned or exception is raised if there is no numeric column. +* ArrayType_ ( FloatType_ | DoubleType_ ): Return all numeric columns cast to the + requested. type. Exception is raised if there are numeric columns. +* ``'string'`` or StringType_: Result is the leftmost column converted to string. +* ArrayType_ ( StringType_ ): Return all columns converted to string. + +.. _IntegerType: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.types.IntegerType +.. _LongType: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.types.LongType +.. _FloatType: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.types.FloatType +.. _DoubleType: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.types.DoubleType +.. _StringType: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.types.StringType +.. _ArrayType: https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.types.ArrayType + +.. rubric:: Example + +.. code-block:: py + + from pyspark.sql.types import ArrayType, FloatType + pyfunc_udf = mlflow.pyfunc.spark_udf(, result_type=ArrayType(FloatType())) + # The prediction column will contain all the numeric columns returned by the model as floats + df = spark_df.withColumn("prediction", pyfunc_udf()) diff --git a/docs/source/projects.rst b/docs/source/projects.rst index 0cef5d7f993fd..3fcd777b69150 100644 --- a/docs/source/projects.rst +++ b/docs/source/projects.rst @@ -17,7 +17,7 @@ Overview At the core, MLflow Projects are just a convention for organizing and describing your code to let other data scientists (or automated tools) run it. Each project is simply a directory of files, or a Git repository, containing your code. MLflow can run some projects based on a convention for -placing files in this directory (for example, a ``conda.yaml`` file will be treated as a +placing files in this directory (for example, a ``conda.yaml`` file is treated as a `Conda `_ environment), but you can describe your project in more detail by adding a ``MLproject`` file, which is a `YAML `_ formatted text file. Each project can specify several properties: @@ -25,57 +25,146 @@ text file. Each project can specify several properties: Name A human-readable name for the project. -Dependencies - Libraries needed to run the project. MLflow currently uses the - `Conda `_ package manager, which supports both Python packages and native - libraries (for example, CuDNN or Intel MKL), to specify dependencies. MLflow will use the - Conda installation given by the ``MLFLOW_CONDA_HOME`` environment variable if specified - (e.g. running Conda commands by invoking ``$MLFLOW_CONDA_HOME/bin/conda``), and default to - running ``conda`` otherwise. - Entry Points - Commands that can be executed within the project, and information about their - parameters. Most projects will contain at least one entry point that you want other users to + Commands that can be run within the project, and information about their + parameters. Most projects contain at least one entry point that you want other users to call. Some projects can also contain more than one entry point: for example, you might have a single Git repository containing multiple featurization algorithms. You can also call any ``.py`` or ``.sh`` file in the project as an entry point. If you list your entry points in a ``MLproject`` file, however, you can also specify *parameters* for them, including data types and default values. +Environment + The software environment that should be used to execute project entry points. This includes all + library dependencies required by the project code. See :ref:`project-environments` for more + information about the software environments supported by MLflow Projects, including + :ref:`Conda environments ` and + :ref:`Docker containers `. + You can run any project from a Git URI or from a local directory using the ``mlflow run`` command-line tool, or the :py:func:`mlflow.projects.run` Python API. These APIs also allow submitting the -project for remote execution on `Databricks `_. +project for remote execution on :ref:`Databricks ` and +:ref:`Kubernetes `. -.. caution:: +.. important:: - By default, MLflow will use a new, temporary working directory for Git projects. + By default, MLflow uses a new, temporary working directory for Git projects. This means that you should generally pass any file arguments to MLflow project using absolute, not relative, paths. If your project declares its parameters, MLflow - will automatically make paths absolute for parameters of type ``path``. + automatically makes paths absolute for parameters of type ``path``. Specifying Projects ------------------- -By default, any Git repository or local directory is treated as a project, and MLflow uses the -following conventions to determine its parameters: +By default, any Git repository or local directory can be treated as an MLflow project; you can +invoke any bash or Python script contained in the directory as a project entry point. The +:ref:`project-directories` section describes how MLflow interprets directories as projects. + +To provide additional control over a project's attributes, you can also include an :ref:`MLproject +file ` in your project's repository or directory. + +Finally, MLflow projects allow you to specify the software :ref:`environment ` +that is used to execute project entry points. + +.. _project-environments: + +Project Environments +^^^^^^^^^^^^^^^^^^^^ +MLflow currently supports the following project environments: Conda environment, Docker container environment, and system environment. + +.. _project-conda-environments: + +Conda environment + `Conda `_ environments support + both Python packages and native libraries (e.g, CuDNN or Intel MKL). When an MLflow Project + specifies a Conda environment, it is activated before project code is run. + + By default, MLflow uses the system path to find and run the ``conda`` binary. You can use a + different Conda installation by setting the ``MLFLOW_CONDA_HOME`` environment variable; in this + case, MLflow attempts to run the binary at ``$MLFLOW_CONDA_HOME/bin/conda``. + + You can specify a Conda environment for your MLflow project by including a ``conda.yaml`` + file in the root of the project directory or by including a ``conda_env`` entry in your + ``MLproject`` file. For details, see the :ref:`project-directories` and :ref:`mlproject-specify-environment` sections. + +.. _project-docker-container-environments: + +Docker container environment + `Docker containers `_ allow you to capture + non-Python dependencies such as Java libraries. + + When you run an MLflow project that specifies a Docker image, MLflow adds a new Docker layer + that copies the project's contents into the ``/mlflow/projects/code`` directory. This step produces + a new image. MLflow then runs the new image and invokes the project entrypoint in the resulting + container. + + Environment variables, such as ``MLFLOW_TRACKING_URI``, are propagated inside the Docker container + during project execution. Additionally, :ref:`runs ` and + :ref:`experiments ` created by the project are saved to the + tracking server specified by your :ref:`tracking URI `. When running + against a local tracking URI, MLflow mounts the host system's tracking directory + (e.g., a local ``mlruns`` directory) inside the container so that metrics, parameters, and + artifacts logged during project execution are accessible afterwards. + + See `Dockerized Model Training with MLflow + `_ for an example of an MLflow + project with a Docker environment. + + To specify a Docker container environment, you *must* add an + :ref:`MLproject file ` to your project. For information about specifying + a Docker container environment in an ``MLproject`` file, see + :ref:`mlproject-specify-environment`. + +System environment + You can also run MLflow Projects directly in your current system environment. All of the + project's dependencies must be installed on your system prior to project execution. The system + environment is supplied at runtime. It is not part of the MLflow Project's directory contents + or ``MLproject`` file. For information about using the system environment when running + a project, see the ``Environment`` parameter description in the :ref:`running-projects` section. + +.. _project-directories: + +Project Directories +^^^^^^^^^^^^^^^^^^^ + +When running an MLflow Project directory or repository that does *not* contain an ``MLproject`` +file, MLflow uses the following conventions to determine the project's attributes: * The project's name is the name of the directory. + * The `Conda environment `_ is specified in ``conda.yaml``, if present. If no ``conda.yaml`` file is present, MLflow - will use a Conda environment containing only Python (specifically, the latest Python available to + uses a Conda environment containing only Python (specifically, the latest Python available to Conda) when running the project. -* Any ``.py`` and ``.sh`` file in the project can be an entry point, with no parameters explicitly - declared. When you execute such a command with a set of parameters, MLflow will pass each - parameter on the command line using ``--key value`` syntax. -You can get more control over a project by adding a ``MLproject``, which is simply a text file in -YAML syntax. The MLproject file looks like this: +* Any ``.py`` and ``.sh`` file in the project can be an entry point. MLflow uses Python + to execute entry points with the ``.py`` extension, and it uses bash to execute entry points with + the ``.sh`` extension. For more information about specifying project entrypoints at runtime, + see :ref:`running-projects`. + +* By default, entry points do not have any parameters when an ``MLproject`` file is not included. + Parameters can be supplied at runtime via the ``mlflow run`` CLI or the + :py:func:`mlflow.projects.run` Python API. Runtime parameters are passed to the entry point on the + command line using ``--key value`` syntax. For more information about running projects and + with runtime parameters, see :ref:`running-projects`. -.. code:: yaml +.. _mlproject-file: + +MLproject File +^^^^^^^^^^^^^^ + +You can get more control over an MLflow Project by adding an ``MLproject`` file, which is a text +file in YAML syntax, to the project's root directory. The following is an example of an +``MLproject`` file: + +.. code-block:: yaml name: My Project conda_env: my_env.yaml + # Can have a docker_env instead of a conda_env, e.g. + # docker_env: + # image: mlflow-docker-example entry_points: main: @@ -88,12 +177,68 @@ YAML syntax. The MLproject file looks like this: data_file: path command: "python validate.py {data_file}" -As you can see, the file can specify a name and a different environment file, as well as more -detailed information about each entry point. Specifically, each entry point has a *command* to -run and *parameters* (including data types). We describe these two pieces next. +The file can specify a name and :ref:`a Conda or Docker environment +`, as well as more detailed information about each entry point. +Specifically, each entry point defines a :ref:`command to run ` and +:ref:`parameters to pass to the command ` (including data types). + +.. _mlproject-specify-environment: + +Specifying an Environment +~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to specify Conda and Docker container environments in an ``MLproject`` file. +``MLproject`` files cannot specify *both* a Conda environment and a Docker environment. + +Conda environment + Include a top-level ``conda_env`` entry in the ``MLproject`` file. + The value of this entry must be a *relative* path to a `Conda environment YAML file + `_ + within the MLflow project's directory. In the following example: + + .. code-block:: yaml + + conda_env: files/config/conda_environment.yaml + + ``conda_env`` refers to an environment file located at + ``/files/config/conda_environment.yaml``, where + ```` is the path to the MLflow project's root directory. + +Docker container environment + Include a top-level ``docker_env`` entry in the ``MLproject`` file. The value of this entry must be the name + of a Docker image that is accessible on the system executing the project; this image name + may include a registry path and tags. Here are a couple of examples. + + .. rubric:: Example 1: Image without a registry path + + .. code-block:: yaml + + docker_env: + image: mlflow-docker-example-environment + + In this example, ``docker_env`` refers to the Docker image with name + ``mlflow-docker-example-environment`` and default tag ``latest``. Because no registry path is + specified, Docker searches for this image on the system that runs the MLflow project. If the + image is not found, Docker attempts to pull it from `DockerHub `_. + + .. rubric:: Example 2: Image in a remote registry + + .. code-block:: yaml + + docker_env: + image: 012345678910.dkr.ecr.us-west-2.amazonaws.com/mlflow-docker-example-environment:7.0 + + In this example, ``docker_env`` refers to the Docker image with name + ``mlflow-docker-example-environment`` and tag ``7.0`` in the Docker registry with path + ``012345678910.dkr.ecr.us-west-2.amazonaws.com``, which corresponds to an + `Amazon ECR registry `_. + When the MLflow project is run, Docker attempts to pull the image from the specified registry. + The system executing the MLflow project must have credentials to pull this image from the specified registry. + +.. _mlproject-command-syntax: Command Syntax -^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~ When specifying an entry point in an ``MLproject`` file, the command can be any string in Python `format string syntax `_. @@ -102,26 +247,26 @@ string for substitution. If you call the project with additional parameters *not ``parameters`` field, MLflow passes them using ``--key value`` syntax, so you can use the ``MLproject`` file to declare types and defaults for just a subset of your parameters. -Before substituting parameters in the command, MLflow escapes them using Python's -`shlex.quote `_ function, so you don't need -to worry about adding quotes inside your command field. +Before substituting parameters in the command, MLflow escapes them using the Python +`shlex.quote `_ function, so you don't +need to worry about adding quotes inside your command field. .. _project_parameters: Specifying Parameters -^^^^^^^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~~~~~~~ MLflow allows specifying a data type and default value for each parameter. You can specify just the data type by writing: -.. code:: yaml +.. code-block:: yaml parameter_name: data_type in your YAML file, or add a default value as well using one of the following syntaxes (which are equivalent in YAML): -.. code:: yaml +.. code-block:: yaml parameter_name: {type: data_type, default: value} # Short syntax @@ -133,59 +278,70 @@ MLflow supports four parameter types, some of which it treats specially (for exa data to local files). Any undeclared parameters are treated as ``string``. The parameter types are: string - Any text string. + A text string. float A real number. MLflow validates that the parameter is a number. path - A path on the local file system. MLflow will convert any relative paths passed for - parameters of this type to absolute paths, and will also download any paths passed - as distributed storage URIs (``s3://`` and ``dbfs://``) to local files. Use this type - for programs that can only read local files. + A path on the local file system. MLflow converts any relative ``path`` parameters to absolute + paths. MLflow also downloads any paths passed as distributed storage URIs + (``s3://`` and ``dbfs://``) to local files. Use this type for programs that can only read local + files. uri - A URI for data either in a local or distributed storage system. MLflow will convert - any relative paths to absolute paths, as in the ``path`` type. Use this type for programs - that know how to read from distributed storage (for example using Spark). + A URI for data either in a local or distributed storage system. MLflow converts + relative paths to absolute paths, as in the ``path`` type. Use this type for programs + that know how to read from distributed storage (e.g., programs that use Spark). + +.. _running-projects: Running Projects ---------------- -MLflow provides two simple ways to run projects: the ``mlflow run`` :ref:`command-line tool `, or +MLflow provides two ways to run projects: the ``mlflow run`` :ref:`command-line tool `, or the :py:func:`mlflow.projects.run` Python API. Both tools take the following parameters: Project URI - Can be either a directory on the local file system or a Git repository path, + A directory on the local file system or a Git repository path, specified as a URI of the form ``https://`` (to use HTTPS) or ``user@host:path`` (to use Git over SSH). To run against an MLproject file located in a subdirectory of the project, add a '#' to the end of the URI argument, followed by the relative path from the project's root directory to the subdirectory containing the desired project. Project Version - Either the commit hash or branch name in the Git repository to run, for Git-based projects. + For Git-based projects, the commit hash or branch name in the Git repository. Entry Point - The name of the entry point to use, which defaults to ``main``. You can use any + The name of the entry point, which defaults to ``main``. You can use any entry point named in the ``MLproject`` file, or any ``.py`` or ``.sh`` file in the project, given as a path from the project root (for example, ``src/test.py``). Parameters Key-value parameters. Any parameters with - :ref:`declared types ` will be validated and transformed if needed. + :ref:`declared types ` are validated and transformed if needed. Deployment Mode - Both the command-line and API let you :ref:`launch projects remotely ` on - a `Databricks `_ environment if you have a Databricks account. This - includes setting cluster parameters such as a VM type. Of course, you can also run projects on - any other computing infrastructure of your choice using the local version of the ``mlflow run`` - command (for example, submit a script that does ``mlflow run`` to a standard job queueing system). + - Both the command-line and API let you :ref:`launch projects remotely ` + in a `Databricks `_ environment. This includes setting cluster + parameters such as a VM type. Of course, you can also run projects on any other computing + infrastructure of your choice using the local version of the ``mlflow run`` command (for + example, submit a script that does ``mlflow run`` to a standard job queueing system). -For example, the tutorial creates and publishes a MLproject that trains a linear model. The -project is also published on GitHub at https://github.com/mlflow/mlflow-example. To execute -this project, run: + - You can also launch projects remotely on `Kubernetes `_ clusters + using the ``mlflow run`` CLI (see :ref:`kubernetes_execution`). -.. code:: bash +Environment + By default, MLflow Projects are run in the environment specified by the project directory + or the ``MLproject`` file (see :ref:`Specifying Project Environments `). + You can ignore a project's specified environment and run the project in the current + system environment by supplying the ``--no-conda`` flag. + +For example, the tutorial creates and publishes an MLflow Project that trains a linear model. The +project is also published on GitHub at https://github.com/mlflow/mlflow-example. To run +this project: + +.. code-block:: bash mlflow run git@github.com:mlflow/mlflow-example.git -P alpha=0.5 @@ -194,33 +350,176 @@ useful if you quickly want to test a project in your existing shell environment. .. _databricks_execution: -Remote Execution on Databricks -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Support for running projects on Databricks will be released soon - -`sign up here `_ to receive updates. +Run an MLflow Project on Databricks +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +You can run MLflow Projects remotely on Databricks. To use this feature, you must have an enterprise +Databricks account (Community Edition is not supported) and you must have set up the +`Databricks CLI `_. Find more detailed instructions +in the Databricks docs +(`Azure Databricks `_, +`Databricks on AWS `_). A brief overview +of how to use the feature is as follows: -Launching a Run -~~~~~~~~~~~~~~~ -First, create a JSON file containing the +.. important:: + + Databricks execution for MLflow projects with Docker environments is *not* currently supported. + +Create a JSON file containing the `cluster specification `_ -for your run. Then, run your project via +for your run. Then, run your project using the command + +.. code-block:: bash + + mlflow run -m databricks --cluster-spec -.. code:: bash +where ```` is a Git repository URI or a folder. - mlflow run -m databricks --cluster-spec +.. _kubernetes_execution: -```` must be a Git repository URI. You can also pass Git credentials via the -``git-username`` and ``git-password`` arguments (or via the ``MLFLOW_GIT_USERNAME`` and -``MLFLOW_GIT_PASSWORD`` environment variables). +Run an MLflow Project on Kubernetes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can run MLflow Projects with :ref:`Docker environments ` +on Kubernetes. The following sections provide an overview of the feature, including a simple +Project execution guide with examples. + +To see this feature in action, you can also refer to the +`Docker example `_, which includes +the required Kubernetes backend configuration (``kubernetes_backend.json``) and `Kubernetes Job Spec +`_ +(``kubernetes_job_template.yaml``) files. + +How it works +~~~~~~~~~~~~ + +When you run an MLflow Project on Kubernetes, MLflow constructs a new Docker image +containing the Project's contents; this image inherits from the Project's +:ref:`Docker environment `. MLflow then pushes the new +Project image to your specified Docker registry and starts a +`Kubernetes Job `_ +on your specified Kubernetes cluster. This Kubernetes Job downloads the Project image and starts +a corresponding Docker container. Finally, the container invokes your Project's +:ref:`entry point `, logging parameters, tags, metrics, and artifacts to your +:ref:`MLflow tracking server `. + +Execution guide +~~~~~~~~~~~~~~~ +You can run your MLflow Project on Kubernetes by following these steps: + +1. Add a Docker environment to your MLflow Project, if one does not already exist. For + reference, see :ref:`mlproject-specify-environment`. + +2. Create a backend configuration JSON file with the following entries: + + - ``kube-context`` + The `Kubernetes context + `_ + where MLflow will run the job. + - ``repository-uri`` + The URI of the docker repository where the Project execution Docker image will be uploaded + (pushed). Your Kubernetes cluster must have access to this repository in order to run your + MLflow Project. + - ``kube-job-template-path`` + The path to a YAML configuration file for your Kubernetes Job - a `Kubernetes Job Spec + `_. + MLflow reads the Job Spec and replaces certain fields to facilitate job execution and + monitoring; MLflow does not modify the original template file. For more information about + writing Kubernetes Job Spec templates for use with MLflow, see the + :ref:`kubernetes_execution_job_templates` section. + + .. rubric:: Example Kubernetes backend configuration + + .. code-block:: json + + { + "kube-context": "docker-for-desktop", + "repository-uri": "username/mlflow-kubernetes-example", + "kube-job-template-path": "/Users/username/path/to/kubernetes_job_template.yaml" + } + +3. If necessary, obtain credentials to access your Project's Docker and Kubernetes resources, including: + + - The :ref:`Docker environment image ` specified in the MLproject + file. + - The Docker repository referenced by ``repository-uri`` in your backend configuration file. + - The `Kubernetes context + `_ + referenced by ``kube-context`` in your backend configuration file. + + MLflow expects these resources to be accessible via the + `docker `_ and + `kubectl `_ CLIs before running the + Project. + +4. Run the Project using the MLflow Projects CLI or :py:func:`Python API `, + specifying your Project URI and the path to your backend configuration file. For example: + + .. code-block:: bash + + mlflow run --backend kubernetes --backend-config examples/docker/kubernetes_config.json + + where ```` is a Git repository URI or a folder. + +.. _kubernetes_execution_job_templates: + +Job Templates +~~~~~~~~~~~~~ + +MLflow executes Projects on Kubernetes by creating `Kubernetes Job resources +`_. +MLflow creates a Kubernetes Job for an MLflow Project by reading a user-specified +`Job Spec +`_. +When MLflow reads a Job Spec, it formats the following fields: + +- ``metadata.name`` Replaced with a string containing the name of the MLflow Project and the time + of Project execution +- ``spec.template.spec.container[0].name`` Replaced with the name of the MLflow Project +- ``spec.template.spec.container[0].image`` Replaced with the URI of the Docker image created during + Project execution. This URI includes the Docker image's digest hash. +- ``spec.template.spec.container[0].command`` Replaced with the Project entry point command + specified when executing the MLflow Project. + +The following example shows a simple Kubernetes Job Spec that is compatible with MLflow Project +execution. Replaced fields are indicated using bracketed text. + +.. rubric:: Example Kubernetes Job Spec + +.. code-block:: yaml + + apiVersion: batch/v1 + kind: Job + metadata: + name: "{replaced with MLflow Project name}" + namespace: mlflow + spec: + ttlSecondsAfterFinished: 100 + backoffLimit: 0 + template: + spec: + containers: + - name: "{replaced with MLflow Project name}" + image: "{replaced with URI of Docker image created during Project execution}" + command: ["{replaced with MLflow Project entry point command}"] + resources: + limits: + memory: 512Mi + requests: + memory: 256Mi + restartPolicy: Never + +The ``container.name``, ``container.image``, and ``container.command`` fields are only replaced for +the *first* container defined in the Job Spec. All subsequent container definitions are applied +without modification. Iterating Quickly ----------------- If you want to rapidly develop a project, we recommend creating an ``MLproject`` file with your main program specified as the ``main`` entry point, and running it with ``mlflow run .``. -To avoid repeatedly writing them you can add default parameters in your ``MLproject`` file. +To avoid having to write parameters repeatedly, you can add default parameters in your ``MLproject`` file. Building Multistep Workflows ----------------------------- diff --git a/docs/source/python_api/index.rst b/docs/source/python_api/index.rst index b2a9cd3c6942b..c63039f540abe 100644 --- a/docs/source/python_api/index.rst +++ b/docs/source/python_api/index.rst @@ -3,13 +3,13 @@ Python API ========== -The MLflow Python API is organized into the following modules. The most common functions are also +The MLflow Python API is organized into the following modules. The most common functions are exposed in the :py:mod:`mlflow` module, so we recommend starting there. .. toctree:: - :glob: + :glob: - * + * -See also an :ref:`index of all functions and classes`. +See also the :ref:`index of all functions and classes`. diff --git a/docs/source/python_api/mlflow.onnx.rst b/docs/source/python_api/mlflow.onnx.rst new file mode 100644 index 0000000000000..ee80fd153070e --- /dev/null +++ b/docs/source/python_api/mlflow.onnx.rst @@ -0,0 +1,7 @@ +mlflow.onnx +================== + +.. automodule:: mlflow.onnx + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/python_api/mlflow.pyfunc.rst b/docs/source/python_api/mlflow.pyfunc.rst index f056da14d162e..4715d0fbf439d 100644 --- a/docs/source/python_api/mlflow.pyfunc.rst +++ b/docs/source/python_api/mlflow.pyfunc.rst @@ -5,3 +5,20 @@ mlflow.pyfunc :members: :undoc-members: :show-inheritance: + +.. Include ``get_default_conda_env()``, which is imported from `mlflow.pyfunc.model`, in the + `mlflow.pyfunc` namespace +.. autofunction:: mlflow.pyfunc.get_default_conda_env + +.. Include ``PythonModelContext`` as a renamed class to avoid documenting constructor parameters. + This class is meant to be constructed implicitly, and users should only be aware of its + documented member properties. +.. autoclass:: mlflow.pyfunc.PythonModelContext() + :members: + :undoc-members: + +.. Include ``PythonModelContext``, which is imported from `mlflow.pyfunc.model`, in the + `mlflow.pyfunc` namespace +.. autoclass:: mlflow.pyfunc.PythonModel + :members: + :undoc-members: diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index 2fa85636382de..2e036030355ce 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -9,14 +9,15 @@ Installing MLflow You install MLflow by running: .. code-section:: - .. code-block:: bash + + .. code-block:: python pip install mlflow .. code-block:: R - devtools::install_github("mlflow/mlflow", subdir = "mlflow/R/mlflow") - mlflow_install() + install.packages("mlflow") + mlflow::install_mlflow() .. note:: @@ -46,6 +47,7 @@ science code and see a history of your runs. You can try it out by writing a sim as follows (this example is also included in ``quickstart/mlflow_tracking.py``): .. code-section:: + .. code-block:: python import os @@ -64,6 +66,7 @@ as follows (this example is also included in ``quickstart/mlflow_tracking.py``): with open("output.txt", "w") as f: f.write("Hello world!") log_artifact("output.txt") + .. code-block:: R library(mlflow) @@ -87,9 +90,11 @@ By default, wherever you run your program, the tracking API writes data into fil You can then run MLflow's Tracking UI: .. code-section:: - .. code-block:: bash + + .. code-block:: python mlflow ui + .. code-block:: R mlflow_ui() @@ -113,16 +118,16 @@ project and what arguments they take. You can easily run existing projects with the ``mlflow run`` command, which runs a project from either a local directory or a GitHub URI: -.. code:: bash +.. code-block:: bash - mlflow run tutorial -P alpha=0.5 + mlflow run sklearn_elasticnet_wine -P alpha=0.5 - mlflow run git@github.com:mlflow/mlflow-example.git -P alpha=5 + mlflow run https://github.com/mlflow/mlflow-example.git -P alpha=5 There's a sample project in ``tutorial``, including a ``MLproject`` file that -specifies its dependencies. All projects that run also log their Tracking API data in the local -``mlruns`` directory (or on your tracking server if you've configured one), so you should be able -to see these runs using ``mlflow ui``. +specifies its dependencies. if you haven't configured a :ref:`tracking server `, +projects log their Tracking API data in the local ``mlruns`` directory so you can see these +runs using ``mlflow ui``. .. note:: By default ``mlflow run`` installs all dependencies using `conda `_. @@ -145,7 +150,7 @@ To illustrate this functionality, the ``mlflow.sklearn`` package can log scikit- MLflow artifacts and then load them again for serving. There is an example training application in ``sklearn_logistic_regression/train.py`` that you can run as follows: -.. code:: bash +.. code-block:: bash python sklearn_logistic_regression/train.py @@ -153,37 +158,30 @@ When you run the example, it outputs an MLflow run ID for that experiment. If yo ``mlflow ui``, you will also see that the run saved a ``model`` folder containing an ``MLmodel`` description file and a pickled scikit-learn model. You can pass the run ID and the path of the model within the artifacts directory (here "model") to various tools. For example, MLflow includes a -simple REST server for scikit-learn models: +simple REST server for python-based models: -.. code:: bash +.. code-block:: bash - mlflow sklearn serve -r model + mlflow models serve -m runs://model .. note:: By default the server runs on port 5000. If that port is already in use, use the `--port` option to - specify a different port. For example: ``mlflow sklearn serve --port 1234 -r model`` + specify a different port. For example: ``mlflow models serve -m runs://model --port 1234`` -Once you have started the server, you can pass it some sample data with ``curl`` and see the -predictions: +Once you have started the server, you can pass it some sample data and see the +predictions. -.. code:: bash +The following example uses ``curl`` to send a JSON-serialized pandas DataFrame with the ``split`` +orientation to the model server. For more information about the input data formats accepted by +the pyfunc model server, see the :ref:`MLflow deployment tools documentation `. - curl -d '[{"x": 1}, {"x": -1}]' -H 'Content-Type: application/json' -X POST localhost:5000/invocations +.. code-block:: bash + + curl -d '{"columns":["x"], "data":[[1], [-1]]}' -H 'Content-Type: application/json; format=pandas-split' -X POST localhost:5000/invocations which returns:: {"predictions": [1, 0]} -.. note:: - - The ``sklearn_logistic_regression/train.py`` script must be run with the same Python version as - the version of Python that runs ``mlflow sklearn serve``. If they are not the same version, - the stacktrace below may appear:: - - File "/usr/local/lib/python3.6/site-packages/mlflow/sklearn.py", line 54, in _load_model_from_local_file - return pickle.load(f) - UnicodeDecodeError: 'ascii' codec can't decode byte 0xc6 in position 0: ordinal not in range(128) - - For more information, see :doc:`models`. diff --git a/docs/source/rest-api.rst b/docs/source/rest-api.rst index 50b2fec60e279..b73af521dcc68 100755 --- a/docs/source/rest-api.rst +++ b/docs/source/rest-api.rst @@ -25,11 +25,11 @@ Create Experiment ================= -+-------------------------------------------+-------------+ -| Endpoint | HTTP Method | -+===========================================+=============+ -| ``2.0/preview/mlflow/experiments/create`` | ``POST`` | -+-------------------------------------------+-------------+ ++-----------------------------------+-------------+ +| Endpoint | HTTP Method | ++===================================+=============+ +| ``2.0/mlflow/experiments/create`` | ``POST`` | ++-----------------------------------+-------------+ Create an experiment with a name. Returns the ID of the newly created experiment. Validates that another experiment with the same name does not already exist and fails if @@ -72,11 +72,11 @@ Response Structure -+---------------+-----------+---------------------------------------+ -| Field Name | Type | Description | -+===============+===========+=======================================+ -| experiment_id | ``INT64`` | Unique identifier for the experiment. | -+---------------+-----------+---------------------------------------+ ++---------------+------------+---------------------------------------+ +| Field Name | Type | Description | ++===============+============+=======================================+ +| experiment_id | ``STRING`` | Unique identifier for the experiment. | ++---------------+------------+---------------------------------------+ =========================== @@ -88,11 +88,11 @@ List Experiments ================ -+-----------------------------------------+-------------+ -| Endpoint | HTTP Method | -+=========================================+=============+ -| ``2.0/preview/mlflow/experiments/list`` | ``GET`` | -+-----------------------------------------+-------------+ ++---------------------------------+-------------+ +| Endpoint | HTTP Method | ++=================================+=============+ +| ``2.0/mlflow/experiments/list`` | ``GET`` | ++---------------------------------+-------------+ Get a list of all experiments. @@ -126,11 +126,11 @@ Response Structure -+-------------+-------------------------------------+-----------------+ -| Field Name | Type | Description | -+=============+=====================================+=================+ -| experiments | An array of :ref:`mlflowexperiment` | All experiments | -+-------------+-------------------------------------+-----------------+ ++-------------+-------------------------------------+------------------+ +| Field Name | Type | Description | ++=============+=====================================+==================+ +| experiments | An array of :ref:`mlflowexperiment` | All experiments. | ++-------------+-------------------------------------+------------------+ =========================== @@ -142,13 +142,14 @@ Get Experiment ============== -+----------------------------------------+-------------+ -| Endpoint | HTTP Method | -+========================================+=============+ -| ``2.0/preview/mlflow/experiments/get`` | ``GET`` | -+----------------------------------------+-------------+ ++--------------------------------+-------------+ +| Endpoint | HTTP Method | ++================================+=============+ +| ``2.0/mlflow/experiments/get`` | ``GET`` | ++--------------------------------+-------------+ Get metadata for an experiment and a list of runs for the experiment. +This method works on deleted experiments. @@ -163,13 +164,13 @@ Request Structure -+---------------+-----------+----------------------------------+ -| Field Name | Type | Description | -+===============+===========+==================================+ -| experiment_id | ``INT64`` | Identifier to get an experiment. | -| | | This field is required. | -| | | | -+---------------+-----------+----------------------------------+ ++---------------+------------+----------------------------------+ +| Field Name | Type | Description | ++===============+============+==================================+ +| experiment_id | ``STRING`` | ID of the associated experiment. | +| | | This field is required. | +| | | | ++---------------+------------+----------------------------------+ .. _mlflowGetExperimentResponse: @@ -181,13 +182,13 @@ Response Structure -+------------+----------------------------------+---------------------------------------------------------------------+ -| Field Name | Type | Description | -+============+==================================+=====================================================================+ -| experiment | :ref:`mlflowexperiment` | Returns experiment details. | -+------------+----------------------------------+---------------------------------------------------------------------+ -| runs | An array of :ref:`mlflowruninfo` | All (max limit to be imposed) runs associated with this experiment. | -+------------+----------------------------------+---------------------------------------------------------------------+ ++------------+----------------------------------+----------------------------------------------------------------------------+ +| Field Name | Type | Description | ++============+==================================+============================================================================+ +| experiment | :ref:`mlflowexperiment` | Experiment details. | ++------------+----------------------------------+----------------------------------------------------------------------------+ +| runs | An array of :ref:`mlflowruninfo` | All (max limit to be imposed) active runs associated with this experiment. | ++------------+----------------------------------+----------------------------------------------------------------------------+ =========================== @@ -195,17 +196,17 @@ Response Structure .. _mlflowMlflowServicedeleteExperiment: -Experiments Delete -========================= +Delete Experiment +================= -+-------------------------------------------+-------------+ -| Endpoint | HTTP Method | -+===========================================+=============+ -| ``2.0/preview/mlflow/experiments/delete`` | ``POST`` | -+-------------------------------------------+-------------+ ++-----------------------------------+-------------+ +| Endpoint | HTTP Method | ++===================================+=============+ +| ``2.0/mlflow/experiments/delete`` | ``POST`` | ++-----------------------------------+-------------+ -Mark an experiment and associated runs, params, metrics, ... etc for deletion. +Mark an experiment and associated metadata, runs, metrics, params, and tags for deletion. If the experiment uses FileStore, artifacts associated with experiment are also deleted. @@ -221,13 +222,13 @@ Request Structure -+---------------+-----------+---------------------------------+ -| Field Name | Type | Description | -+===============+===========+=================================+ -| experiment_id | ``INT64`` | ID of the associated experiment | -| | | This field is required. | -| | | | -+---------------+-----------+---------------------------------+ ++---------------+------------+----------------------------------+ +| Field Name | Type | Description | ++===============+============+==================================+ +| experiment_id | ``STRING`` | ID of the associated experiment. | +| | | This field is required. | +| | | | ++---------------+------------+----------------------------------+ =========================== @@ -235,18 +236,18 @@ Request Structure .. _mlflowMlflowServicerestoreExperiment: -Experiments Restore -========================== +Restore Experiment +================== -+--------------------------------------------+-------------+ -| Endpoint | HTTP Method | -+============================================+=============+ -| ``2.0/preview/mlflow/experiments/restore`` | ``POST`` | -+--------------------------------------------+-------------+ ++------------------------------------+-------------+ +| Endpoint | HTTP Method | ++====================================+=============+ +| ``2.0/mlflow/experiments/restore`` | ``POST`` | ++------------------------------------+-------------+ Restore an experiment marked for deletion. This also restores -associated metadata, runs, metrics, and params. If experiment uses FileStore, underlying +associated metadata, runs, metrics, params, and tags. If experiment uses FileStore, underlying artifacts associated with experiment are also restored. Throws ``RESOURCE_DOES_NOT_EXIST`` if experiment was never created or was permanently deleted. @@ -264,13 +265,54 @@ Request Structure -+---------------+-----------+---------------------------------+ -| Field Name | Type | Description | -+===============+===========+=================================+ -| experiment_id | ``INT64`` | ID of the associated experiment | -| | | This field is required. | -| | | | -+---------------+-----------+---------------------------------+ ++---------------+------------+----------------------------------+ +| Field Name | Type | Description | ++===============+============+==================================+ +| experiment_id | ``STRING`` | ID of the associated experiment. | +| | | This field is required. | +| | | | ++---------------+------------+----------------------------------+ + +=========================== + + + +.. _mlflowMlflowServiceupdateExperiment: + +Update Experiment +================= + + ++-----------------------------------+-------------+ +| Endpoint | HTTP Method | ++===================================+=============+ +| ``2.0/mlflow/experiments/update`` | ``POST`` | ++-----------------------------------+-------------+ + +Update experiment metadata. + + + + +.. _mlflowUpdateExperiment: + +Request Structure +----------------- + + + + + + ++---------------+------------+---------------------------------------------------------------------------------------------+ +| Field Name | Type | Description | ++===============+============+=============================================================================================+ +| experiment_id | ``STRING`` | ID of the associated experiment. | +| | | This field is required. | +| | | | ++---------------+------------+---------------------------------------------------------------------------------------------+ +| new_name | ``STRING`` | If provided, the experiment's name is changed to the new name. The new name must be unique. | ++---------------+------------+---------------------------------------------------------------------------------------------+ =========================== @@ -282,11 +324,11 @@ Create Run ========== -+------------------------------------+-------------+ -| Endpoint | HTTP Method | -+====================================+=============+ -| ``2.0/preview/mlflow/runs/create`` | ``POST`` | -+------------------------------------+-------------+ ++----------------------------+-------------+ +| Endpoint | HTTP Method | ++============================+=============+ +| ``2.0/mlflow/runs/create`` | ``POST`` | ++----------------------------+-------------+ Create a new run within an experiment. A run is usually a single execution of a machine learning or data ETL pipeline. MLflow uses runs to track :ref:`mlflowParam`, @@ -305,28 +347,19 @@ Request Structure -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ -| Field Name | Type | Description | -+==================+=================================+================================================================================================+ -| experiment_id | ``INT64`` | ID of the associated experiment. | -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ -| user_id | ``STRING`` | ID of the user executing the run. | -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ -| run_name | ``STRING`` | Human readable name for the run. | -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ -| source_type | :ref:`mlflowsourcetype` | Originating source for the run. | -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ -| source_name | ``STRING`` | String descriptor for the run's source. For example, name or description of a notebook, or the | -| | | URL or path to a project. | -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ -| entry_point_name | ``STRING`` | Name of the project entry point associated with the current run, if any. | -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ -| start_time | ``INT64`` | Unix timestamp of when the run started in milliseconds. | -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ -| source_version | ``STRING`` | Git commit hash of the source code used to create run. | -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ -| tags | An array of :ref:`mlflowruntag` | Additional metadata for run. | -+------------------+---------------------------------+------------------------------------------------------------------------------------------------+ ++---------------+---------------------------------+----------------------------------------------------------------------------+ +| Field Name | Type | Description | ++===============+=================================+============================================================================+ +| experiment_id | ``STRING`` | ID of the associated experiment. | ++---------------+---------------------------------+----------------------------------------------------------------------------+ +| user_id | ``STRING`` | ID of the user executing the run. | +| | | This field is deprecated as of MLflow 1.0, and will be removed in a future | +| | | MLflow release. Use 'mlflow.user' tag instead. | ++---------------+---------------------------------+----------------------------------------------------------------------------+ +| start_time | ``INT64`` | Unix timestamp in milliseconds of when the run started. | ++---------------+---------------------------------+----------------------------------------------------------------------------+ +| tags | An array of :ref:`mlflowruntag` | Additional metadata for run. | ++---------------+---------------------------------+----------------------------------------------------------------------------+ .. _mlflowCreateRunResponse: @@ -348,25 +381,24 @@ Response Structure -.. _mlflowMlflowServicegetRun: +.. _mlflowMlflowServicedeleteRun: -Get Run -======= +Delete Run +========== -+---------------------------------+-------------+ -| Endpoint | HTTP Method | -+=================================+=============+ -| ``2.0/preview/mlflow/runs/get`` | ``GET`` | -+---------------------------------+-------------+ ++----------------------------+-------------+ +| Endpoint | HTTP Method | ++============================+=============+ +| ``2.0/mlflow/runs/delete`` | ``POST`` | ++----------------------------+-------------+ -Get metadata, params, tags, and metrics for a run. Only the last logged value for each metric -is returned. +Mark a run for deletion. -.. _mlflowGetRun: +.. _mlflowDeleteRun: Request Structure ----------------- @@ -376,155 +408,135 @@ Request Structure -+------------+------------+-------------------------+ -| Field Name | Type | Description | -+============+============+=========================+ -| run_uuid | ``STRING`` | ID of the run to fetch. | -| | | This field is required. | -| | | | -+------------+------------+-------------------------+ ++------------+------------+--------------------------+ +| Field Name | Type | Description | ++============+============+==========================+ +| run_id | ``STRING`` | ID of the run to delete. | +| | | This field is required. | +| | | | ++------------+------------+--------------------------+ -.. _mlflowGetRunResponse: +=========================== -Response Structure ------------------- +.. _mlflowMlflowServicerestoreRun: +Restore Run +=========== ++-----------------------------+-------------+ +| Endpoint | HTTP Method | ++=============================+=============+ +| ``2.0/mlflow/runs/restore`` | ``POST`` | ++-----------------------------+-------------+ -+------------+------------------+-----------------------------------------------------------------------+ -| Field Name | Type | Description | -+============+==================+=======================================================================+ -| run | :ref:`mlflowrun` | Run metadata (name, start time, etc) and data (metrics, params, etc). | -+------------+------------------+-----------------------------------------------------------------------+ +Restore a deleted run. -=========================== -.. _mlflowMlflowServicelogMetric: +.. _mlflowRestoreRun: -Log Metric -========== +Request Structure +----------------- -+----------------------------------------+-------------+ -| Endpoint | HTTP Method | -+========================================+=============+ -| ``2.0/preview/mlflow/runs/log-metric`` | ``POST`` | -+----------------------------------------+-------------+ -Log a metric for a run. A metric is a key-value pair (string key, float value) with an -associated timestamp. Examples include the various metrics that represent ML model accuracy. -A metric can be logged multiple times. ++------------+------------+---------------------------+ +| Field Name | Type | Description | ++============+============+===========================+ +| run_id | ``STRING`` | ID of the run to restore. | +| | | This field is required. | +| | | | ++------------+------------+---------------------------+ -.. _mlflowLogMetric: +=========================== -Request Structure ------------------ +.. _mlflowMlflowServicegetRun: +Get Run +======= ++-------------------------+-------------+ +| Endpoint | HTTP Method | ++=========================+=============+ +| ``2.0/mlflow/runs/get`` | ``GET`` | ++-------------------------+-------------+ -+------------+------------+---------------------------------------------------------------+ -| Field Name | Type | Description | -+============+============+===============================================================+ -| run_uuid | ``STRING`` | ID of the run under which to log the metric. | -| | | This field is required. | -| | | | -+------------+------------+---------------------------------------------------------------+ -| key | ``STRING`` | Name of the metric. | -| | | This field is required. | -| | | | -+------------+------------+---------------------------------------------------------------+ -| value | ``FLOAT`` | Float value of the metric being logged. | -| | | This field is required. | -| | | | -+------------+------------+---------------------------------------------------------------+ -| timestamp | ``INT64`` | Unix timestamp in milliseconds at the time metric was logged. | -| | | This field is required. | -| | | | -+------------+------------+---------------------------------------------------------------+ +Get metadata, metrics, params, and tags for a run. In the case where multiple metrics +with the same key are logged for a run, return only the value with the latest timestamp. +If there are multiple values with the latest timestamp, return the maximum of these values. -=========================== -.. _mlflowMlflowServicesetTag: +.. _mlflowGetRun: -Set Tag -======= +Request Structure +----------------- -+-------------------------------------+-------------+ -| Endpoint | HTTP Method | -+=====================================+=============+ -| ``2.0/preview/mlflow/runs/set-tag`` | ``POST`` | -+-------------------------------------+-------------+ -Set a tag on a run. Tags are run metadata that can be updated during a run and after -a run completes. ++------------+------------+--------------------------------------------------------------------------+ +| Field Name | Type | Description | ++============+============+==========================================================================+ +| run_id | ``STRING`` | ID of the run to fetch. Must be provided. | ++------------+------------+--------------------------------------------------------------------------+ +| run_uuid | ``STRING`` | [Deprecated, use run_id instead] ID of the run to fetch. This field will | +| | | be removed in a future MLflow version. | ++------------+------------+--------------------------------------------------------------------------+ -.. _mlflowSetTag: +.. _mlflowGetRunResponse: -Request Structure ------------------ +Response Structure +------------------ -+------------+------------+------------------------------------------------------------------+ -| Field Name | Type | Description | -+============+============+==================================================================+ -| run_uuid | ``STRING`` | ID of the run under which to set the tag. | -| | | This field is required. | -| | | | -+------------+------------+------------------------------------------------------------------+ -| key | ``STRING`` | Name of the tag. Maximum size is 255 bytes. | -| | | This field is required. | -| | | | -+------------+------------+------------------------------------------------------------------+ -| value | ``STRING`` | String value of the tag being logged. Maximum size if 500 bytes. | -| | | This field is required. | -| | | | -+------------+------------+------------------------------------------------------------------+ ++------------+------------------+----------------------------------------------------------------------------+ +| Field Name | Type | Description | ++============+==================+============================================================================+ +| run | :ref:`mlflowrun` | Run metadata (name, start time, etc) and data (metrics, params, and tags). | ++------------+------------------+----------------------------------------------------------------------------+ =========================== -.. _mlflowMlflowServicelogParam: +.. _mlflowMlflowServicelogMetric: -Log Param -========= +Log Metric +========== -+-------------------------------------------+-------------+ -| Endpoint | HTTP Method | -+===========================================+=============+ -| ``2.0/preview/mlflow/runs/log-parameter`` | ``POST`` | -+-------------------------------------------+-------------+ ++--------------------------------+-------------+ +| Endpoint | HTTP Method | ++================================+=============+ +| ``2.0/mlflow/runs/log-metric`` | ``POST`` | ++--------------------------------+-------------+ -Log a param used for a run. A param is a key-value pair (string key, -string value). Examples include hyperparameters used for ML model training and -constant dates and values used in an ETL pipeline. A param can be logged only once for a run. +Log a metric for a run. A metric is a key-value pair (string key, float value) with an +associated timestamp. Examples include the various metrics that represent ML model accuracy. +A metric can be logged multiple times. -.. _mlflowLogParam: +.. _mlflowLogMetric: Request Structure ----------------- @@ -534,104 +546,145 @@ Request Structure -+------------+------------+--------------------------------------------------------------------+ -| Field Name | Type | Description | -+============+============+====================================================================+ -| run_uuid | ``STRING`` | ID of the run under which to log the param. | -| | | This field is required. | -| | | | -+------------+------------+--------------------------------------------------------------------+ -| key | ``STRING`` | Name of the param. Maximum size is 255 bytes. | -| | | This field is required. | -| | | | -+------------+------------+--------------------------------------------------------------------+ -| value | ``STRING`` | String value of the param being logged. Maximum size if 500 bytes. | -| | | This field is required. | -| | | | -+------------+------------+--------------------------------------------------------------------+ ++------------+------------+-----------------------------------------------------------------------------------------------+ +| Field Name | Type | Description | ++============+============+===============================================================================================+ +| run_id | ``STRING`` | ID of the run under which to log the metric. Must be provided. | ++------------+------------+-----------------------------------------------------------------------------------------------+ +| run_uuid | ``STRING`` | [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will | +| | | be removed in a future MLflow version. | ++------------+------------+-----------------------------------------------------------------------------------------------+ +| key | ``STRING`` | Name of the metric. | +| | | This field is required. | +| | | | ++------------+------------+-----------------------------------------------------------------------------------------------+ +| value | ``DOUBLE`` | Double value of the metric being logged. | +| | | This field is required. | +| | | | ++------------+------------+-----------------------------------------------------------------------------------------------+ +| timestamp | ``INT64`` | Unix timestamp in milliseconds at the time metric was logged. | +| | | This field is required. | +| | | | ++------------+------------+-----------------------------------------------------------------------------------------------+ +| step | ``INT64`` | Step at which to log the metric | ++------------+------------+-----------------------------------------------------------------------------------------------+ =========================== -.. _mlflowMlflowServicegetParam: +.. _mlflowMlflowServicelogBatch: -Get Param +Log Batch ========= -+-----------------------------------+-------------+ -| Endpoint | HTTP Method | -+===================================+=============+ -| ``2.0/preview/mlflow/params/get`` | ``GET`` | -+-----------------------------------+-------------+ ++-------------------------------+-------------+ +| Endpoint | HTTP Method | ++===============================+=============+ +| ``2.0/mlflow/runs/log-batch`` | ``POST`` | ++-------------------------------+-------------+ -Get a param value. +Log a batch of metrics, params, and tags for a run. +If any data failed to be persisted, the server will respond with an error (non-200 status code). +In case of error (due to internal server error or an invalid request), partial data may +be written. +You can write metrics, params, and tags in interleaving fashion, but within a given entity +type are guaranteed to follow the order specified in the request body. That is, for an API +request like +.. code-block:: json + { + "run_id": "2a14ed5c6a87499199e0106c3501eab8", + "metrics": [ + {"key": "mae", "value": 2.5, "timestamp": 1552550804}, + {"key": "rmse", "value": 2.7, "timestamp": 1552550804}, + ], + "params": [ + {"key": "model_class", "value": "LogisticRegression"}, + ] + } -.. _mlflowGetParam: +the server is guaranteed to write metric "rmse" after "mae", though it may write param +"model_class" before both metrics, after "mae", or after both metrics. -Request Structure ------------------ +The overwrite behavior for metrics, params, and tags is as follows: +- Metrics: metric values are never overwritten. Logging a metric (key, value, timestamp) appends to the set of values for the metric with the provided key. +- Tags: tag values can be overwritten by successive writes to the same tag key. That is, if multiple tag values with the same key are provided in the same API request, the last-provided tag value is written. Logging the same tag (key, value) is permitted - that is, logging a tag is idempotent. +- Params: once written, param values cannot be changed (attempting to overwrite a param value will result in an error). However, logging the same param (key, value) is permitted - that is, logging a param is idempotent. +Request Limits +-------------- +A single JSON-serialized API request may be up to 1 MB in size and contain: +- No more than 1000 metrics, params, and tags in total +- Up to 1000 metrics +- Up to 100 params +- Up to 100 tags -+------------+------------+-------------------------------------------------------+ -| Field Name | Type | Description | -+============+============+=======================================================+ -| run_uuid | ``STRING`` | ID of the run from which to retrieve the param value. | -| | | This field is required. | -| | | | -+------------+------------+-------------------------------------------------------+ -| param_name | ``STRING`` | Name of the param. | -| | | This field is required. | -| | | | -+------------+------------+-------------------------------------------------------+ +For example, a valid request might contain 900 metrics, 50 params, and 50 tags, but logging +900 metrics, 50 params, and 51 tags is invalid. The following limits also apply +to metric, param, and tag keys and values: + +- Metric, param, and tag keys can be up to 250 characters in length +- Param and tag values can be up to 250 characters in length -.. _mlflowGetParamResponse: -Response Structure ------------------- +.. _mlflowLogBatch: + +Request Structure +----------------- + -+------------+--------------------+-----------------------+ -| Field Name | Type | Description | -+============+====================+=======================+ -| parameter | :ref:`mlflowparam` | Param key-value pair. | -+------------+--------------------+-----------------------+ + ++------------+---------------------------------+---------------------------------------------------------------------------------+ +| Field Name | Type | Description | ++============+=================================+=================================================================================+ +| run_id | ``STRING`` | ID of the run to log under | ++------------+---------------------------------+---------------------------------------------------------------------------------+ +| metrics | An array of :ref:`mlflowmetric` | Metrics to log. A single request can contain up to 1000 metrics, and up to 1000 | +| | | metrics, params, and tags in total. | ++------------+---------------------------------+---------------------------------------------------------------------------------+ +| params | An array of :ref:`mlflowparam` | Params to log. A single request can contain up to 100 params, and up to 1000 | +| | | metrics, params, and tags in total. | ++------------+---------------------------------+---------------------------------------------------------------------------------+ +| tags | An array of :ref:`mlflowruntag` | Tags to log. A single request can contain up to 100 tags, and up to 1000 | +| | | metrics, params, and tags in total. | ++------------+---------------------------------+---------------------------------------------------------------------------------+ =========================== -.. _mlflowMlflowServicegetMetric: +.. _mlflowMlflowServicesetTag: -Get Metric -========== +Set Tag +======= -+------------------------------------+-------------+ -| Endpoint | HTTP Method | -+====================================+=============+ -| ``2.0/preview/mlflow/metrics/get`` | ``GET`` | -+------------------------------------+-------------+ ++-----------------------------+-------------+ +| Endpoint | HTTP Method | ++=============================+=============+ +| ``2.0/mlflow/runs/set-tag`` | ``POST`` | ++-----------------------------+-------------+ -Get the value for a metric logged during a run. If the metric is logged more -than once, returns the last logged value. +Set a tag on a run. Tags are run metadata that can be updated during a run and after +a run completes. -.. _mlflowGetMetric: +.. _mlflowSetTag: Request Structure ----------------- @@ -641,33 +694,72 @@ Request Structure -+------------+------------+--------------------------------------------------------+ -| Field Name | Type | Description | -+============+============+========================================================+ -| run_uuid | ``STRING`` | ID of the run from which to retrieve the metric value. | -| | | This field is required. | -| | | | -+------------+------------+--------------------------------------------------------+ -| metric_key | ``STRING`` | Name of the metric. | -| | | This field is required. | -| | | | -+------------+------------+--------------------------------------------------------+ ++------------+------------+--------------------------------------------------------------------------------------------+ +| Field Name | Type | Description | ++============+============+============================================================================================+ +| run_id | ``STRING`` | ID of the run under which to log the tag. Must be provided. | ++------------+------------+--------------------------------------------------------------------------------------------+ +| run_uuid | ``STRING`` | [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will | +| | | be removed in a future MLflow version. | ++------------+------------+--------------------------------------------------------------------------------------------+ +| key | ``STRING`` | Name of the tag. Maximum size is 255 bytes. | +| | | This field is required. | +| | | | ++------------+------------+--------------------------------------------------------------------------------------------+ +| value | ``STRING`` | String value of the tag being logged. Maximum size is 5000 bytes. | +| | | This field is required. | +| | | | ++------------+------------+--------------------------------------------------------------------------------------------+ -.. _mlflowGetMetricResponse: +=========================== -Response Structure ------------------- +.. _mlflowMlflowServicelogParam: +Log Param +========= ++-----------------------------------+-------------+ +| Endpoint | HTTP Method | ++===================================+=============+ +| ``2.0/mlflow/runs/log-parameter`` | ``POST`` | ++-----------------------------------+-------------+ -+------------+---------------------+------------------------------------------------+ -| Field Name | Type | Description | -+============+=====================+================================================+ -| metric | :ref:`mlflowmetric` | Latest reported value of the specified metric. | -+------------+---------------------+------------------------------------------------+ +Log a param used for a run. A param is a key-value pair (string key, +string value). Examples include hyperparameters used for ML model training and +constant dates and values used in an ETL pipeline. A param can be logged only once for a run. + + + + +.. _mlflowLogParam: + +Request Structure +----------------- + + + + + + ++------------+------------+----------------------------------------------------------------------------------------------+ +| Field Name | Type | Description | ++============+============+==============================================================================================+ +| run_id | ``STRING`` | ID of the run under which to log the param. Must be provided. | ++------------+------------+----------------------------------------------------------------------------------------------+ +| run_uuid | ``STRING`` | [Deprecated, use run_id instead] ID of the run under which to log the param. This field will | +| | | be removed in a future MLflow version. | ++------------+------------+----------------------------------------------------------------------------------------------+ +| key | ``STRING`` | Name of the param. Maximum size is 255 bytes. | +| | | This field is required. | +| | | | ++------------+------------+----------------------------------------------------------------------------------------------+ +| value | ``STRING`` | String value of the param being logged. Maximum size is 500 bytes. | +| | | This field is required. | +| | | | ++------------+------------+----------------------------------------------------------------------------------------------+ =========================== @@ -679,11 +771,11 @@ Get Metric History ================== -+--------------------------------------------+-------------+ -| Endpoint | HTTP Method | -+============================================+=============+ -| ``2.0/preview/mlflow/metrics/get-history`` | ``GET`` | -+--------------------------------------------+-------------+ ++------------------------------------+-------------+ +| Endpoint | HTTP Method | ++====================================+=============+ +| ``2.0/mlflow/metrics/get-history`` | ``GET`` | ++------------------------------------+-------------+ Get a list of all values for the specified metric for a given run. @@ -700,17 +792,18 @@ Request Structure -+------------+------------+--------------------------------------------------+ -| Field Name | Type | Description | -+============+============+==================================================+ -| run_uuid | ``STRING`` | ID of the run from which to fetch metric values. | -| | | This field is required. | -| | | | -+------------+------------+--------------------------------------------------+ -| metric_key | ``STRING`` | Name of the metric. | -| | | This field is required. | -| | | | -+------------+------------+--------------------------------------------------+ ++------------+------------+----------------------------------------------------------------------------------------------+ +| Field Name | Type | Description | ++============+============+==============================================================================================+ +| run_id | ``STRING`` | ID of the run from which to fetch metric values. Must be provided. | ++------------+------------+----------------------------------------------------------------------------------------------+ +| run_uuid | ``STRING`` | [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field | +| | | will be removed in a future MLflow version. | ++------------+------------+----------------------------------------------------------------------------------------------+ +| metric_key | ``STRING`` | Name of the metric. | +| | | This field is required. | +| | | | ++------------+------------+----------------------------------------------------------------------------------------------+ .. _mlflowGetMetricHistoryResponse: @@ -738,11 +831,11 @@ Search Runs =========== -+------------------------------------+-------------+ -| Endpoint | HTTP Method | -+====================================+=============+ -| ``2.0/preview/mlflow/runs/search`` | ``POST`` | -+------------------------------------+-------------+ ++----------------------------+-------------+ +| Endpoint | HTTP Method | ++============================+=============+ +| ``2.0/mlflow/runs/search`` | ``POST`` | ++----------------------------+-------------+ Search for runs that satisfy expressions. Search expressions can use :ref:`mlflowMetric` and :ref:`mlflowParam` keys. @@ -760,13 +853,34 @@ Request Structure -+-------------------+-------------------------------------------+--------------------------------------------------------------------+ -| Field Name | Type | Description | -+===================+===========================================+====================================================================+ -| experiment_ids | An array of ``INT64`` | List of experiment IDs to search over. | -+-------------------+-------------------------------------------+--------------------------------------------------------------------+ -| anded_expressions | An array of :ref:`mlflowsearchexpression` | Expressions describing runs (AND-ed together when filtering runs). | -+-------------------+-------------------------------------------+--------------------------------------------------------------------+ ++----------------+------------------------+------------------------------------------------------------------------------------------------------+ +| Field Name | Type | Description | ++================+========================+======================================================================================================+ +| experiment_ids | An array of ``STRING`` | List of experiment IDs to search over. | ++----------------+------------------------+------------------------------------------------------------------------------------------------------+ +| filter | ``STRING`` | A filter expression over params, metrics, and tags, that allows returning a subset of | +| | | runs. The syntax is a subset of SQL that supports ANDing together binary operations | +| | | between a param, metric, or tag and a constant. | +| | | | +| | | Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'`` | +| | | | +| | | You can select columns with special characters (hyphen, space, period, etc.) by using double quotes: | +| | | ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'`` | +| | | | +| | | Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``. | +| | | | +| | | You cannot provide ``filter`` when ``anded_expressions`` is present; an ``INVALID_PARAMETER_VALUE`` | +| | | error will be returned if both are specified. | +| | | If both ``filter`` and ``anded_expressions`` are absent, all runs part of the given experiments | +| | | are returned. | ++----------------+------------------------+------------------------------------------------------------------------------------------------------+ +| run_view_type | :ref:`mlflowviewtype` | Whether to display only active, only deleted, or all runs. | +| | | Defaults to only active runs. | ++----------------+------------------------+------------------------------------------------------------------------------------------------------+ +| max_results | ``INT32`` | Maximum number of runs desired. Max threshold is 50000 | ++----------------+------------------------+------------------------------------------------------------------------------------------------------+ +| order_by | An array of ``STRING`` | Ordering expressions like "tags.`model class` DESC" | ++----------------+------------------------+------------------------------------------------------------------------------------------------------+ .. _mlflowSearchRunsResponse: @@ -794,11 +908,11 @@ List Artifacts ============== -+---------------------------------------+-------------+ -| Endpoint | HTTP Method | -+=======================================+=============+ -| ``2.0/preview/mlflow/artifacts/list`` | ``GET`` | -+---------------------------------------+-------------+ ++-------------------------------+-------------+ +| Endpoint | HTTP Method | ++===============================+=============+ +| ``2.0/mlflow/artifacts/list`` | ``GET`` | ++-------------------------------+-------------+ List artifacts for a run. Takes an optional ``artifact_path`` prefix which if specified, the response contains only artifacts with the specified prefix. @@ -819,7 +933,10 @@ Request Structure +------------+------------+-----------------------------------------------------------------------------------------+ | Field Name | Type | Description | +============+============+=========================================================================================+ -| run_uuid | ``STRING`` | ID of the run whose artifacts to list. | +| run_id | ``STRING`` | ID of the run whose artifacts to list. Must be provided. | ++------------+------------+-----------------------------------------------------------------------------------------+ +| run_uuid | ``STRING`` | [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will | +| | | be removed in a future MLflow version. | +------------+------------+-----------------------------------------------------------------------------------------+ | path | ``STRING`` | Filter artifacts matching this path (a relative path from the root artifact directory). | +------------+------------+-----------------------------------------------------------------------------------------+ @@ -852,11 +969,11 @@ Update Run ========== -+------------------------------------+-------------+ -| Endpoint | HTTP Method | -+====================================+=============+ -| ``2.0/preview/mlflow/runs/update`` | ``POST`` | -+------------------------------------+-------------+ ++----------------------------+-------------+ +| Endpoint | HTTP Method | ++============================+=============+ +| ``2.0/mlflow/runs/update`` | ``POST`` | ++----------------------------+-------------+ Update run metadata. @@ -873,17 +990,18 @@ Request Structure -+------------+------------------------+-------------------------------------------------------+ -| Field Name | Type | Description | -+============+========================+=======================================================+ -| run_uuid | ``STRING`` | ID of the run to update. | -| | | This field is required. | -| | | | -+------------+------------------------+-------------------------------------------------------+ -| status | :ref:`mlflowrunstatus` | Updated status of the run. | -+------------+------------------------+-------------------------------------------------------+ -| end_time | ``INT64`` | Unix timestamp of when the run ended in milliseconds. | -+------------+------------------------+-------------------------------------------------------+ ++------------+------------------------+----------------------------------------------------------------------------+ +| Field Name | Type | Description | ++============+========================+============================================================================+ +| run_id | ``STRING`` | ID of the run to update. Must be provided. | ++------------+------------------------+----------------------------------------------------------------------------+ +| run_uuid | ``STRING`` | [Deprecated, use run_id instead] ID of the run to update.. This field will | +| | | be removed in a future MLflow version. | ++------------+------------------------+----------------------------------------------------------------------------+ +| status | :ref:`mlflowrunstatus` | Updated status of the run. | ++------------+------------------------+----------------------------------------------------------------------------+ +| end_time | ``INT64`` | Unix timestamp in milliseconds of when the run ended. | ++------------+------------------------+----------------------------------------------------------------------------+ .. _mlflowUpdateRunResponse: @@ -901,6 +1019,10 @@ Response Structure | run_info | :ref:`mlflowruninfo` | Updated metadata of the run. | +------------+----------------------+------------------------------+ +=========================== + + + .. _RESTadd: Data Structures @@ -921,7 +1043,7 @@ Experiment +-------------------+------------+--------------------------------------------------------------------+ | Field Name | Type | Description | +===================+============+====================================================================+ -| experiment_id | ``INT64`` | Unique identifier for the experiment. | +| experiment_id | ``STRING`` | Unique identifier for the experiment. | +-------------------+------------+--------------------------------------------------------------------+ | name | ``STRING`` | Human readable name that identifies the experiment. | +-------------------+------------+--------------------------------------------------------------------+ @@ -955,24 +1077,6 @@ Metadata of a single artifact file or directory. | file_size | ``INT64`` | Size in bytes. Unset for directories. | +------------+------------+---------------------------------------------------+ -.. _mlflowFloatClause: - -FloatClause ------------ - - - - - - -+------------+------------+------------------------------------------+ -| Field Name | Type | Description | -+============+============+==========================================+ -| comparator | ``STRING`` | OneOf (">", ">=", "==", "!=", "<=", "<") | -+------------+------------+------------------------------------------+ -| value | ``FLOAT`` | Float value for comparison. | -+------------+------------+------------------------------------------+ - .. _mlflowMetric: Metric @@ -988,30 +1092,12 @@ Metric associated with a run, represented as a key-value pair. +============+============+==================================================+ | key | ``STRING`` | Key identifying this metric. | +------------+------------+--------------------------------------------------+ -| value | ``FLOAT`` | Value associated with this metric. | +| value | ``DOUBLE`` | Value associated with this metric. | +------------+------------+--------------------------------------------------+ | timestamp | ``INT64`` | The timestamp at which this metric was recorded. | +------------+------------+--------------------------------------------------+ - -.. _mlflowMetricSearchExpression: - -MetricSearchExpression ----------------------- - - - - - - -+------------+--------------------------+--------------------------------------------+ -| Field Name | Type | Description | -+============+==========================+============================================+ -| ``float`` | :ref:`mlflowfloatclause` | | -| | | | -| | | If ``float``, float clause for comparison. | -+------------+--------------------------+--------------------------------------------+ -| key | ``STRING`` | :ref:`mlflowMetric` key for search. | -+------------+--------------------------+--------------------------------------------+ +| step | ``INT64`` | Step at which to log the metric. | ++------------+------------+--------------------------------------------------+ .. _mlflowParam: @@ -1031,26 +1117,6 @@ Param associated with a run. | value | ``STRING`` | Value associated with this param. | +------------+------------+-----------------------------------+ -.. _mlflowParameterSearchExpression: - -ParameterSearchExpression -------------------------- - - - - - - -+------------+---------------------------+----------------------------------------------+ -| Field Name | Type | Description | -+============+===========================+==============================================+ -| ``string`` | :ref:`mlflowstringclause` | | -| | | | -| | | If ``string``, string clause for comparison. | -+------------+---------------------------+----------------------------------------------+ -| key | ``STRING`` | :ref:`mlflowParam` key for search. | -+------------+---------------------------+----------------------------------------------+ - .. _mlflowRun: Run @@ -1076,7 +1142,7 @@ RunData -Run data (metrics, params, etc). +Run data (metrics, params, and tags). +------------+---------------------------------+--------------------------------------+ @@ -1099,36 +1165,33 @@ RunInfo Metadata of a single run. -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| Field Name | Type | Description | -+==================+=========================+==================================================================================+ -| run_uuid | ``STRING`` | Unique identifier for the run. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| experiment_id | ``INT64`` | The experiment ID. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| name | ``STRING`` | Human readable name that identifies this run. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| source_type | :ref:`mlflowsourcetype` | Source type. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| source_name | ``STRING`` | Source identifier: GitHub URL, name of notebook, name of job, etc. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| user_id | ``STRING`` | User who initiated the run. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| status | :ref:`mlflowrunstatus` | Current status of the run. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| start_time | ``INT64`` | Unix timestamp of when the run started in milliseconds. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| end_time | ``INT64`` | Unix timestamp of when the run ended in milliseconds. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| source_version | ``STRING`` | Git commit hash of the code used for the run. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| entry_point_name | ``STRING`` | Name of the entry point for the run. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ -| artifact_uri | ``STRING`` | URI of the directory where artifacts should be uploaded. | -| | | This can be a local path (starting with "/"), or a distributed file system (DFS) | -| | | path, like ``s3://bucket/directory`` or ``dbfs:/my/directory``. | -| | | If not set, the local ``./mlruns`` directory is chosen. | -+------------------+-------------------------+----------------------------------------------------------------------------------+ ++-----------------+------------------------+----------------------------------------------------------------------------------+ +| Field Name | Type | Description | ++=================+========================+==================================================================================+ +| run_id | ``STRING`` | Unique identifier for the run. | ++-----------------+------------------------+----------------------------------------------------------------------------------+ +| run_uuid | ``STRING`` | [Deprecated, use run_id instead] Unique identifier for the run. This field will | +| | | be removed in a future MLflow version. | ++-----------------+------------------------+----------------------------------------------------------------------------------+ +| experiment_id | ``STRING`` | The experiment ID. | ++-----------------+------------------------+----------------------------------------------------------------------------------+ +| user_id | ``STRING`` | User who initiated the run. | +| | | This field is deprecated as of MLflow 1.0, and will be removed in a future | +| | | MLflow release. Use 'mlflow.user' tag instead. | ++-----------------+------------------------+----------------------------------------------------------------------------------+ +| status | :ref:`mlflowrunstatus` | Current status of the run. | ++-----------------+------------------------+----------------------------------------------------------------------------------+ +| start_time | ``INT64`` | Unix timestamp of when the run started in milliseconds. | ++-----------------+------------------------+----------------------------------------------------------------------------------+ +| end_time | ``INT64`` | Unix timestamp of when the run ended in milliseconds. | ++-----------------+------------------------+----------------------------------------------------------------------------------+ +| artifact_uri | ``STRING`` | URI of the directory where artifacts should be uploaded. | +| | | This can be a local path (starting with "/"), or a distributed file system (DFS) | +| | | path, like ``s3://bucket/directory`` or ``dbfs:/my/directory``. | +| | | If not set, the local ``./mlruns`` directory is chosen. | ++-----------------+------------------------+----------------------------------------------------------------------------------+ +| lifecycle_stage | ``STRING`` | Current life cycle stage of the experiment : OneOf("active", "deleted") | ++-----------------+------------------------+----------------------------------------------------------------------------------+ .. _mlflowRunTag: @@ -1141,55 +1204,13 @@ Tag for a run. +------------+------------+----------------+ -| Field Name | Type | Description | +| Field Name | Type | Description | +============+============+================+ | key | ``STRING`` | The tag key. | +------------+------------+----------------+ | value | ``STRING`` | The tag value. | +------------+------------+----------------+ -.. _mlflowSearchExpression: - -SearchExpression ----------------- - - - - - - -+-----------------------------+-------------------------------------------------------------------------------+--------------------------------------------------+ -| Field Name | Type | Description | -+=============================+===============================================================================+==================================================+ -| ``metric`` OR ``parameter`` | :ref:`mlflowmetricsearchexpression` OR :ref:`mlflowparametersearchexpression` | | -| | | | -| | | If ``metric``, a metric search expression. | -| | | | -| | | | -| | | | -| | | | -| | | | -| | | If ``parameter``, a parameter search expression. | -+-----------------------------+-------------------------------------------------------------------------------+--------------------------------------------------+ - -.. _mlflowStringClause: - -StringClause ------------- - - - - - - -+------------+------------+------------------------------+ -| Field Name | Type | Description | -+============+============+==============================+ -| comparator | ``STRING`` | OneOf ("==", "!=", "~") | -+------------+------------+------------------------------+ -| value | ``STRING`` | String value for comparison. | -+------------+------------+------------------------------+ - .. _mlflowRunStatus: RunStatus @@ -1212,28 +1233,6 @@ Status of a run. | KILLED | Run killed by user. | +-----------+------------------------------------------+ -.. _mlflowSourceType: - -SourceType ----------- - - -Source that generated a run. - -+----------+------------------------------------------------------------------------+ -| Name | Description | -+==========+========================================================================+ -| NOTEBOOK | Databricks notebook environment. | -+----------+------------------------------------------------------------------------+ -| JOB | Scheduled or Run Now job. | -+----------+------------------------------------------------------------------------+ -| PROJECT | As a prepackaged project: either a Docker image or GitHub source, etc. | -+----------+------------------------------------------------------------------------+ -| LOCAL | Local run: Using CLI, IDE, or local notebook. | -+----------+------------------------------------------------------------------------+ -| UNKNOWN | Unknown source type. | -+----------+------------------------------------------------------------------------+ - .. _mlflowViewType: ViewType @@ -1250,4 +1249,4 @@ View type for ListExperiments query. | DELETED_ONLY | Return only deleted experiments. | +--------------+------------------------------------------+ | ALL | Get all experiments. | -+--------------+------------------------------------------+ ++--------------+------------------------------------------+ \ No newline at end of file diff --git a/docs/source/search-syntax.rst b/docs/source/search-syntax.rst new file mode 100644 index 0000000000000..0f43c24546381 --- /dev/null +++ b/docs/source/search-syntax.rst @@ -0,0 +1,145 @@ +.. _search-syntax: + +Search +====== + +The MLflow UI and API support searching runs within a single experiment or a group of experiments +using a search filter API. This API is a simplified version of the SQL ``WHERE`` clause. + +.. contents:: Table of Contents + :local: + :depth: 3 + +Syntax +------ + +A search filter can be one or more expressions joined by the ``AND`` keyword. +The syntax does not support ``OR``. Each expression has three parts: an identifier on +the left-hand side (LHS), a comparator, and constant on the right-hand side (RHS). + +Example Expressions +^^^^^^^^^^^^^^^^^^^^ + +- Search for the subset of runs with logged accuracy metric greater than 0.92. + + .. code-block:: sql + + metrics.accuracy > 0.92 + +- Search for runs created using a Logistic Regression model, a learning rate (lambda) of 0.001, and recorded error metric under 0.05. + + .. code-block:: sql + + params.model = "LogisticRegression" and params.lambda = "0.001" and metrics.error <= 0.05 + +- Search for all failed runs. + + .. code-block:: sql + + attributes.status = "FAILED" + + +Identifier +^^^^^^^^^^ + +Required in the LHS of a search expression. Signifies an entity to compare against. An identifier has two +parts separated by a period: the type of the entity and the name of the entity. +The type of the entity is ``metrics``, ``params``, ``tags``, or ``attributes``. The entity name can +contain alphanumeric characters and special characters. +For example: ``metrics.accuracy``. + +Entity Name Contains Special Characters +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When a metric, parameter, or tag name contains a special character like hyphen, space, period, and so on, +enclose the entity name in double quotes. + +.. rubric:: Examples + +.. code-block:: sql + + params."model-type" + +.. code-block:: sql + + metrics."error rate" + + +Entity Name Starts with a Number +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Unlike SQL syntax for column names, MLflow allows logging metrics, parameters, and tags with names +that have a leading number. If an entity name contains a leading number, enclose the entity name in double quotes. +For example: + +.. code-block:: sql + + metrics."2019-04-02 error rate" + + +Run Attributes +~~~~~~~~~~~~~~ + +The search syntax supports searching runs using two attributes: ``status`` and ``artifact_uri``. Both attributes have string values. Other fields in :py:class:`mlflow.entities.RunInfo` are :ref:`system_tags` that are searchable using the UI and the API. The search returns an error if you use other attribute names in the filter string. + +.. note:: + + - The experiment ID is implicitly selected by the search API. + - A run's ``lifecycle_stage`` attribute is not allowed because it is already encoded as a part of the API's ``run_view_type`` field. To search for runs using ``run_id``, it is more efficient to use ``get_run`` APIs. + - The ``start_time`` and ``end_time`` attributes are not supported. + +Comparator +^^^^^^^^^^ + +There are two classes of comparators: numeric and string. + +- Numeric comparators (``metrics``): ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``. +- String comparators (``params``, ``tags``, and ``attributes``): ``=`` and ``!=``. + +Constant +^^^^^^^^ + +The search syntax requires the RHS of the expression to be a constant. The type of the constant +depends on LHS. + +- If LHS is a metric, the RHS must be an integer or float number. +- If LHS is a parameter or tag, the RHS must be a string constant enclosed in single or double quotes. + +Programmatically Searching Runs +-------------------------------- + +The MLflow UI supports searching runs contained within the current experiment. To search runs across +multiple experiments, use one of the client APIs. + + +Python +^^^^^^ + +Get all active runs from experiments with IDs 3, 4, and 17 that used a CNN model with 10 layers and +had a prediction accuracy of 94.5% or higher. + +.. code-block:: py + + from mlflow.tracking.client import MlflowClient + + query = "params.model = 'CNN' and params.layers = '10' and metrics.'prediction accuracy' >= 0.945" + runs = MlflowClient().search_runs(["3", "4", "17"], query, ViewType.ACTIVE_ONLY) + + +Search all known experiments for any MLflow runs created using the Inception model architecture. + +.. code-block:: py + + from mlflow.tracking.client import MlflowClient + + all_experiments = [exp.experiment_id for exp in MlflowClient().list_experiments()] + runs = MlflowClient().search_runs(all_experiments, "params.model = 'Inception'", ViewType.ALL) + +Java +^^^^ +The Java API is similar to Python API. + +.. code-block:: java + + List experimentIds = Arrays.asList("1", "2", "4", "8"); + List searchResult = client.searchRuns(experimentIds, "metrics.accuracy_score < 99.90"); diff --git a/docs/source/tracking.rst b/docs/source/tracking.rst index 35bf2fdabf664..e1670a20a23f0 100644 --- a/docs/source/tracking.rst +++ b/docs/source/tracking.rst @@ -1,90 +1,97 @@ .. _tracking: +=============== MLflow Tracking =============== The MLflow Tracking component is an API and UI for logging parameters, code versions, metrics, and output files when running your machine learning code and for later visualizing the results. -MLflow Tracking lets you log and query experiments using both :ref:`Python ` and :ref:`REST ` APIs. +MLflow Tracking lets you log and query experiments using :ref:`Python `, :ref:`REST `, :ref:`R-api`, and :ref:`java_api` APIs. .. contents:: Table of Contents :local: - :depth: 1 + :depth: 2 Concepts --------- +======== MLflow Tracking is organized around the concept of *runs*, which are executions of some piece of data science code. Each run records the following information: Code Version - Git commit hash used to execute the run, if it was executed from an :ref:`MLflow Project `. + Git commit hash used for the run, if it was run from an :ref:`MLflow Project `. Start & End Time Start and end time of the run Source - Name of the file executed to launch the run, or the project name and entry point for the run - if the run was executed from an :ref:`MLflow Project `. + Name of the file to launch the run, or the project name and entry point for the run + if run from an :ref:`MLflow Project `. Parameters Key-value input parameters of your choice. Both keys and values are strings. Metrics - Key-value metrics where the value is numeric. Each metric can be updated throughout the + Key-value metrics, where the value is numeric. Each metric can be updated throughout the course of the run (for example, to track how your model's loss function is converging), and - MLflow will record and let you visualize the metric's full history. + MLflow records and lets you visualize the metric's full history. Artifacts Output files in any format. For example, you can record images (for example, PNGs), models - (for example, a pickled scikit-learn model), or even data files (for example, a + (for example, a pickled scikit-learn model), and data files (for example, a `Parquet `_ file) as artifacts. -Runs can be recorded from anywhere you run your code through MLflow's Python and REST APIs: for +You can record runs using MLflow Python, R, Java, and REST APIs from anywhere you run your code. For example, you can record them in a standalone program, on a remote cloud machine, or in an interactive notebook. If you record runs in an :ref:`MLflow Project `, MLflow remembers the project URI and source version. -Finally, runs can optionally be organized into *experiments*, which group together runs for a -specific task. You can create an experiment via the ``mlflow experiments`` CLI, with -:py:func:`mlflow.create_experiment`, or via the corresponding REST parameters. The MLflow API and +You can optionally organize runs into *experiments*, which group together runs for a +specific task. You can create an experiment using the ``mlflow experiments`` CLI, with +:py:func:`mlflow.create_experiment`, or using the corresponding REST parameters. The MLflow API and UI let you create and search for experiments. Once your runs have been recorded, you can query them using the :ref:`tracking_ui` or the MLflow API. -Where Runs Get Recorded ------------------------ +.. _where_runs_are_recorded: + +Where Runs Are Recorded +======================= -MLflow runs can be recorded either locally in files or remotely to a tracking server. -By default, the MLflow Python API logs runs to files in an ``mlruns`` directory wherever you -ran your program. You can then run ``mlflow ui`` to see the logged runs. Set the -``MLFLOW_TRACKING_URI`` environment variable to a server's URI or call -:py:func:`mlflow.set_tracking_uri` to log runs remotely. +MLflow runs can be recorded to local files, to a SQLAlchemy compatible database, or remotely +to a tracking server. By default, the MLflow Python API logs runs locally to files in an ``mlruns`` directory wherever you +ran your program. You can then run ``mlflow ui`` to see the logged runs. -There are a different kinds of remote tracking URIs: +To log runs remotely, set the ``MLFLOW_TRACKING_URI`` environment variable to a tracking server's URI or +call :py:func:`mlflow.set_tracking_uri`. + +There are different kinds of remote tracking URIs: - Local file path (specified as ``file:/my/local/dir``), where data is just directly stored locally. -- HTTP server (specified as ``https://my-server:5000``), which is a server hosting :ref:`your own tracking server `. -- Databricks workspace (specified as ``databricks``, or a specific Databricks CLI profile as ``databricks://``. - For more information on configuring a Databricks CLI, see the `GitHub README `_. - This works only in workspaces for which the Databricks MLflow Tracking Server is enabled; contact Databricks if interested. +- Database encoded as ``+://:@:/``. Mlflow supports the dialects ``mysql``, ``mssql``, ``sqlite``, and ``postgresql``. For more details, see `SQLAlchemy database uri `_. +- HTTP server (specified as ``https://my-server:5000``), which is a server hosting an :ref:`MLFlow tracking server `. +- Databricks workspace (specified as ``databricks`` or as ``databricks://``, a `Databricks CLI profile `_. Logging Data to Runs --------------------- +==================== -You can log data to runs using either the MLflow Python or REST API. This section +You can log data to runs using the MLflow Python, R, Java, or REST API. This section shows the Python API. +.. contents:: In this section: + :depth: 1 + :local: + .. _basic_logging_functions: -Basic Logging Functions -^^^^^^^^^^^^^^^^^^^^^^^ +Logging Functions +------------------ :py:func:`mlflow.set_tracking_uri` connects to a tracking URI. You can also set the ``MLFLOW_TRACKING_URI`` environment variable to have MLflow find a URI from there. In both cases, -the URI can either be a HTTP/HTTPS URI for a remote server, or a local path to log data to a -directory. The URI defaults to ``mlruns``. +the URI can either be a HTTP/HTTPS URI for a remote server, a database connection string, or a +local path to log data to a directory. The URI defaults to ``mlruns``. :py:func:`mlflow.tracking.get_tracking_uri` returns the current tracking URI. @@ -98,18 +105,22 @@ runs are launched under this experiment. :py:func:`mlflow.start_run` returns the currently active run (if one exists), or starts a new run and returns a :py:class:`mlflow.ActiveRun` object usable as a context manager for the current run. You do not need to call ``start_run`` explicitly: calling one of the logging functions -with no active run will automatically start a new one. +with no active run automatically starts a new one. :py:func:`mlflow.end_run` ends the currently active run, if any, taking an optional run status. :py:func:`mlflow.active_run` returns a :py:class:`mlflow.entities.Run` object corresponding to the currently active run, if any. -:py:func:`mlflow.log_param` logs a key-value parameter in the currently active run. The keys and -values are both strings. +:py:func:`mlflow.log_param` logs a single key-value param in the currently active run. The key and +value are both strings. Use :py:func:`mlflow.log_params` to log multiple params at once. + +:py:func:`mlflow.log_metric` logs a single key-value metric. The value must always be a number. +MLflow remembers the history of values for each metric. Use :py:func:`mlflow.log_metrics` to log +multiple metrics at once. -:py:func:`mlflow.log_metric` logs a key-value metric. The value must always be a number. MLflow will -remember the history of values for each metric. +:py:func:`mlflow.set_tag` sets a single key-value tag in the currently active run. The key and +value are both strings. Use :py:func:`mlflow.set_tags` to set multiple tags at once. :py:func:`mlflow.log_artifact` logs a local file as an artifact, optionally taking an ``artifact_path`` to place it in within the run's artifact URI. Run artifacts can be organized into @@ -122,16 +133,40 @@ an optional ``artifact_path``. logged to. +Enable Automatic Logging from TensorFlow (experimental) +------------------------------------------------------- +MLflow supports automatic logging from TensorFlow without the need for explicit log +statements. You can enable this feature by calling :py:func:`mlflow.tensorflow.autolog` +before your training code. **Note**: this feature is experimental - the API and format +of the logged data are subject to change. + + +:py:func:`mlflow.tensorflow.autolog` optionally accepts a ``metrics_every_n_steps`` +argument to specify the frequency with which metrics should be logged to MLflow. + +The following table details auto-logging capabilities for different TensorFlow workflows: + ++------------------+--------------------------------------------------------+----------------------------------------------------------+---------------+------------------------------------------------------------------------------------------------------------------+ +| Framework | Metrics | Parameters | Tags | Artifacts | ++------------------+--------------------------------------------------------+----------------------------------------------------------+---------------+------------------------------------------------------------------------------------------------------------------+ +| ``tf.keras`` | Training loss; validation loss; user-specified metrics | Number of layers; optimizer name; learning rate; epsilon | Model summary | `MLflow Model `_ (Keras model), TensorBoard logs; on training end | ++------------------+--------------------------------------------------------+----------------------------------------------------------+---------------+------------------------------------------------------------------------------------------------------------------+ +| ``tf.estimator`` | TensorBoard metrics | -- | -- | `MLflow Model `_ (TF saved model); on call to ``export_saved_model`` | ++------------------+--------------------------------------------------------+----------------------------------------------------------+---------------+------------------------------------------------------------------------------------------------------------------+ +| TensorFlow Core | All ``tf.summary.scalar`` calls | -- | -- | -- | ++------------------+--------------------------------------------------------+----------------------------------------------------------+---------------+------------------------------------------------------------------------------------------------------------------+ + + Launching Multiple Runs in One Program -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +-------------------------------------- -Sometimes you want to execute multiple MLflow runs in the same program: for example, maybe you are +Sometimes you want to launch multiple MLflow runs in the same program: for example, maybe you are performing a hyperparameter search locally or your experiments are just very fast to run. This is easy to do because the ``ActiveRun`` object returned by :py:func:`mlflow.start_run` is a Python `context manager `_. You can "scope" each run to just one block of code as follows: -.. code:: python +.. code-block:: py with mlflow.start_run(): mlflow.log_param("x", 1) @@ -141,58 +176,126 @@ just one block of code as follows: The run remains open throughout the ``with`` statement, and is automatically closed when the statement exits, even if it exits due to an exception. + +Performance Tracking with Metrics +--------------------------------- + +You log MLflow metrics with ``log`` methods in the Tracking API. The ``log`` methods support two alternative methods for distinguishing metric values on the x-axis: ``timestamp`` and ``step``. + +``timestamp`` is an optional long value that represents the time that the metric was logged. ``timestamp`` defaults to the current time. ``step`` is an optional integer that represents any measurement of training progress (number of training iterations, number of epochs, and so on). ``step`` defaults to 0 and has the following requirements and properties: + +- Must be a valid 64-bit integer value. +- Can be negative. +- Can be out of order in successive write calls. For example, (1, 3, 2) is a valid sequence. +- Can have "gaps" in the sequence of values specified in successive write calls. For example, (1, 5, 75, -20) is a valid sequence. + +If you specify both a timestamp and a step, metrics are recorded against both axes independently. + +Examples +~~~~~~~~ + +Python + .. code-block:: py + + with mlflow.start_run(): + for epoch in range(0, 3): + mlflow.log_metric(key="quality", value=2*epoch, step=epoch) + +Java and Scala + .. code-block:: java + + MlflowClient client = new MlflowClient(); + RunInfo run = client.createRun(); + for (int epoch = 0; epoch < 3; epoch ++) { + client.logMetric(run.getRunId(), "quality", 2 * epoch, System.currentTimeMillis(), epoch); + } + + +Visualizing Metrics +------------------- + +Here is an example plot of the :ref:`quick start tutorial ` with the step x-axis and two timestamp axes: + +.. figure:: _static/images/metrics-step.png + + X-axis step + +.. figure:: _static/images/metrics-time-wall.png + + X-axis wall time - graphs the absolute time each metric was logged + +.. figure:: _static/images/metrics-time-relative.png + + X-axis relative time - graphs the time relative to the first metric logged, for each run + + +.. _organizing_runs_in_experiments: + Organizing Runs in Experiments ------------------------------- +============================== MLflow allows you to group runs under experiments, which can be useful for comparing runs intended to tackle a particular task. You can create experiments using the :ref:`cli` (``mlflow experiments``) or -the :py:func:`mlflow.create_experiment` Python API. You can pass the experiment ID for a individual run -using the CLI (for example, ``mlflow run ... --experiment-id [ID]``) or the ``MLFLOW_EXPERIMENT_ID`` -environment variable. +the :py:func:`mlflow.create_experiment` Python API. You can pass the experiment name for a individual run +using the CLI (for example, ``mlflow run ... --experiment-name [name]``) or the ``MLFLOW_EXPERIMENT_NAME`` +environment variable. Alternatively, you can use the experiment ID instead, via the +``--experiment-id`` CLI flag or the ``MLFLOW_EXPERIMENT_ID`` environment variable. -.. code:: bash +.. code-block:: bash - # Prints "created an experiment with ID - mlflow experiments create fraud-detection - # Set the ID via environment variables - export MLFLOW_EXPERIMENT_ID= + # Set the experiment via environment variables + export MLFLOW_EXPERIMENT_NAME=fraud-detection -.. code:: python + mlflow experiments create --experiment-name fraud-detection - # Launch a run. The experiment ID is inferred from the MLFLOW_EXPERIMENT_ID environment - # variable, or from the --experiment-id parameter passed to the MLflow CLI (the latter +.. code-block:: py + + # Launch a run. The experiment is inferred from the MLFLOW_EXPERIMENT_NAME environment + # variable, or from the --experiment-name parameter passed to the MLflow CLI (the latter # taking precedence) with mlflow.start_run(): mlflow.log_param("a", 1) mlflow.log_metric("b", 2) Managing Experiments and Runs with the Tracking Service API -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +----------------------------------------------------------- MLflow provides a more detailed Tracking Service API for managing experiments and runs directly, which is available through client SDK in the :py:mod:`mlflow.tracking` module. -This makes it possible to query data about past runs, log additional information about them, create experiments and more. +This makes it possible to query data about past runs, log additional information about them, create experiments, +add tags to a run, and more. -Example usage: +.. rubric:: Example -.. code:: python +.. code-block:: py from mlflow.tracking import MlflowClient client = MlflowClient() experiments = client.list_experiments() # returns a list of mlflow.entities.Experiment run = client.create_run(experiments[0].experiment_id) # returns mlflow.entities.Run - client.log_param(run.info.run_uuid, "hello", "world") - client.set_terminated(run.info.run_uuid) + client.log_param(run.info.run_id, "hello", "world") + client.set_terminated(run.info.run_id) + +Adding Tags to Runs +~~~~~~~~~~~~~~~~~~~ + +The :py:func:`mlflow.tracking.MlflowClient.set_tag` function lets you add custom tags to runs. A tag can only have a single unique value mapped to it at a time. For example: + +.. code-block:: py + + client.set_tag(run.info.run_id, "tag_key", "tag_value") + +.. important:: Do not use the prefix ``mlflow`` for a tag. This prefix is reserved for use by MLflow. .. _tracking_ui: Tracking UI ------------ +=========== The Tracking UI lets you visualize, search and compare runs, as well as download run artifacts or -metadata for analysis in other tools. If you have been logging runs to a local ``mlruns`` directory, -run ``mlflow ui`` in the directory above it, and it will load the corresponding runs. -Alternatively, the :ref:`MLflow Server ` serves the same UI, and enables remote storage of run artifacts. +metadata for analysis in other tools. If you log runs to a local ``mlruns`` directory, +run ``mlflow ui`` in the directory above it, and it loads the corresponding runs. +Alternatively, the :ref:`MLflow tracking server ` serves the same UI and enables remote storage of run artifacts. The UI contains the following key features: @@ -204,56 +307,111 @@ The UI contains the following key features: .. _tracking_query_api: Querying Runs Programmatically ------------------------------- +============================== -All of the functions in the Tracking UI can be accessed programmatically through the -:py:mod:`mlflow.tracking` module and the :ref:`rest-api`. This makes it easy to do several -common tasks: +You can access all of the functions in the Tracking UI programmatically. This makes it easy to do several common tasks: * Query and compare runs using any data analysis tool of your choice, for example, **pandas**. -* Determine the artifact URI for a run to feed some of its artifacts into a new run when executing - a workflow. For an example of querying runs and constructing a multistep workflow, see the MLflow `Multistep Workflow Example project `_. -* Load artifacts from past runs as :ref:`models`. For an example of training, exporting, and loading a model, and predicting using -the model, see the MLFlow `TensorFlow example `_. -* Run automated parameter search algorithms, where you query the metrics from various runs to - submit new ones. For an example of running automated parameter search algorithms, see the MLflow `Hyperparameter Tuning Example project `_. +* Determine the artifact URI for a run to feed some of its artifacts into a new run when executing a workflow. For an example of querying runs and constructing a multistep workflow, see the MLflow `Multistep Workflow Example project `_. +* Load artifacts from past runs as :ref:`models`. For an example of training, exporting, and loading a model, and predicting using the model, see the MLFlow `TensorFlow example `_. +* Run automated parameter search algorithms, where you query the metrics from various runs to submit new ones. For an example of running automated parameter search algorithms, see the MLflow `Hyperparameter Tuning Example project `_. + +.. _artifact-locations: + +Referencing Artifacts +--------------------- + +When you specify the location of an artifact in MLflow APIs, the syntax depends on whether you +are invoking the Tracking, Models, or Projects API. For the Tracking API, you specify the artifact location using a (run ID, relative path) tuple. For the Models and Projects APIs, you specify the artifact location in the follow ways: + +- ``/Users/me/path/to/local/model`` +- ``relative/path/to/local/model`` +- ``/``. For example: + + - ``s3://my_bucket/path/to/model`` + - ``hdfs://:/`` + - ``runs://run-relative/path/to/model`` + +For example: + +.. rubric:: Tracking API + +.. code-block:: py + + mlflow.log_artifacts("", "/path/to/artifact") + +.. rubric:: Models API + +.. code-block:: py + + mlflow.pytorch.load_model("runs://run-relative/path/to/model") + + + .. _tracking_server: -Running a Tracking Server -------------------------- +MLflow Tracking Servers +======================= -The MLflow tracking server launched using ``mlflow server`` also hosts REST APIs for tracking runs, -writing data to the local filesystem. You can specify a tracking server URI -with the ``MLFLOW_TRACKING_URI`` environment variable and MLflow tracking APIs automatically -communicate with the tracking server at that URI to create/get run information, log metrics, and so on. +.. contents:: In this section: + :local: + :depth: 2 -An example configuration for a server is as follows: +You run an MLflow tracking server using ``mlflow server``. An example configuration for a server is: -.. code:: bash +.. code-block:: bash mlflow server \ - --file-store /mnt/persistent-disk \ + --backend-store-uri /mnt/persistent-disk \ --default-artifact-root s3://my-mlflow-bucket/ \ --host 0.0.0.0 Storage -^^^^^^^ +------- -The tracking server has two properties related to how data is stored: File Store and Artifact Store. +An MLflow tracking server has two components for storage: a *backend store* and an *artifact store*. -The **File Store** (exposed as ``--file-store``) is where the *server* stores run and experiment metadata. -It defaults to the local ``./mlruns`` directory (same as when running ``mlflow run`` locally), but when -running a server, make sure that this points to a persistent (that is, non-ephemeral) file system location. +The backend store is where MLflow Tracking Server stores experiment and run metadata as well as +params, metrics, and tags for runs. MLflow supports two types of backend stores: *file store* and +*database-backed store*. + +Use ``--backend-store-uri`` to configure the type of backend store. You specify a *file store* +backend as ``./path_to_store`` or ``file:/path_to_store`` and a *database-backed store* as +`SQLAlchemy database URI `_. The database URI typically takes the format ``+://:@:/``. +MLflow supports the database dialects ``mysql``, ``mssql``, ``sqlite``, and ``postgresql``. +Drivers are optional. If you do not specify a driver, SQLAlchemy uses a dialect's default driver. For example, ``--backend-store-uri sqlite:///mlflow.db`` would use a local SQLite database. + +.. important:: -The **Artifact Store** is a location suitable for large data (such as an S3 bucket or shared NFS file system) -where *clients* log their artifact output (for example, models). The Artifact Store is a property -of an experiment, but the ``--default-artifact-root`` flag sets the artifact root URI for -newly-created experiments that do not specify one. -Once you create an experiment, the ``--default-artifact-root`` is no longer relevant to it. + ``mlflow server`` will fail against a database-backed store with an out-of-date database schema. + To prevent this, upgrade your database schema to the latest supported version using + ``mlflow db upgrade [db_uri]``. Schema migrations can result in database downtime, may + take longer on larger databases, and are not guaranteed to be transactional. You should always + take a backup of your database prior to running ``mlflow db upgrade`` - consult your database's + documentation for instructions on taking a backup. -To allow the clients and server to access the artifact location, you should configure your cloud + +By default ``--backend-store-uri`` is set to the local ``./mlruns`` directory (the same as when +running ``mlflow run`` locally), but when running a server, make sure that this points to a +persistent (that is, non-ephemeral) file system location. + + +The artifact store is a location suitable for large data (such as an S3 bucket or shared NFS +file system) and is where clients log their artifact output (for example, models). +``artifact_location`` is a property recorded on :py:class:`mlflow.entities.Experiment` for +default location to store artifacts for all runs in this experiment. Additional, ``artifact_uri`` +is a property on :py:class:`mlflow.entities.RunInfo` to indicate location where all artifacts for +this run are stored. + +Use ``--default-artifact-root`` (defaults to local ``./mlruns`` directory) to configure default +location to server's artifact store. This will be used as artifact location for newly-created +experiments that do not specify one. Once you create an experiment, ``--default-artifact-root`` +is no longer relevant to that experiment. + +To allow the server and clients to access the artifact location, you should configure your cloud provider credentials as normal. For example, for S3, you can set the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment variables, use an IAM role, or configure a default profile in ``~/.aws/credentials``. @@ -262,17 +420,23 @@ See `Set up AWS Credentials and Region for Development ``), then the artifact root - will be a path inside the File Store. Typically this is not an appropriate location, as the client and - server will probably be referring to different physical locations (that is, the same path on different disks). + (for example, ``mlflow experiments create --artifact-location s3://``), the artifact root + is a path inside the file store. Typically this is not an appropriate location, as the client and + server probably refer to different physical locations (that is, the same path on different disks). -Supported Artifact Stores -^^^^^^^^^^^^^^^^^^^^^^^^^ -In addition to local file paths, MLflow supports the following storage systems as artifact stores: -Amazon S3, Azure Blob Storage, Google Cloud Storage, SFTP server, and NFS. +Artifact Stores +~~~~~~~~~~~~~~~~ + +.. contents:: In this section: + :local: + :depth: 1 + +In addition to local file paths, MLflow supports the following storage systems as artifact +stores: Amazon S3, Azure Blob Storage, Google Cloud Storage, SFTP server, and NFS. Amazon S3 -~~~~~~~~~ +^^^^^^^^^ + To store artifacts in S3, specify a URI of the form ``s3:///``. MLflow obtains credentials to access S3 from your machine's IAM role, a profile in ``~/.aws/credentials``, or the environment variables ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` depending on which of @@ -282,31 +446,40 @@ these are available. For more information on how to set credentials, see To store artifacts in a custom endpoint, set the ``MLFLOW_S3_ENDPOINT_URL`` to your endpoint's URL. For example, if you have a Minio server at 1.2.3.4 on port 9000: -.. code:: bash +.. code-block:: bash export MLFLOW_S3_ENDPOINT_URL=http://1.2.3.4:9000 Azure Blob Storage -~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^ + To store artifacts in Azure Blob Storage, specify a URI of the form ``wasbs://@.blob.core.windows.net/``. MLflow expects Azure Storage access credentials in the ``AZURE_STORAGE_CONNECTION_STRING`` or ``AZURE_STORAGE_ACCESS_KEY`` environment variables (preferring -a connection string if one is set), so you will need to set one of these variables on both your client -application and your MLflow tracking server. Finally, you will need to ``pip install azure-storage`` +a connection string if one is set), so you must set one of these variables on both your client +application and your MLflow tracking server. Finally, you must run ``pip install azure-storage`` separately (on both your client and the server) to access Azure Blob Storage; MLflow does not declare a dependency on this package by default. Google Cloud Storage -~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^ + To store artifacts in Google Cloud Storage, specify a URI of the form ``gs:///``. You should configure credentials for accessing the GCS container on the client and server as described in the `GCS documentation `_. -Finally, you will need to ``pip install google-cloud-storage`` (on both your client and the server) +Finally, you must run ``pip install google-cloud-storage`` (on both your client and the server) to access Google Cloud Storage; MLflow does not declare a dependency on this package by default. +FTP server +^^^^^^^^^^^ + +To store artifacts in a FTP server, specify a URI of the form ftp://user@host/path/to/directory . +The URI may optionally include a password for logging into the server, e.g. ``ftp://user:pass@host/path/to/directory`` + SFTP Server -~~~~~~~~~~~ +^^^^^^^^^^^ + To store artifacts in an SFTP server, specify a URI of the form ``sftp://user@host/path/to/directory``. You should configure the client to be able to log in to the SFTP server without a password over SSH (e.g. public key, identity file in ssh_config, etc.). @@ -315,31 +488,112 @@ The format ``sftp://user:pass@host/`` is supported for logging in. However, for When using this store, ``pysftp`` must be installed on both the server and the client. Run ``pip install pysftp`` to install the required package. NFS -~~~ +^^^ + To store artifacts in an NFS mount, specify a URI as a normal file system path, e.g., ``/mnt/nfs``. -This path must the same on both the server and the client -- you may need to use symlinks or remount +This path must be the same on both the server and the client -- you may need to use symlinks or remount the client in order to enforce this property. +HDFS +^^^^ + +To store artifacts in HDFS, specify a ``hdfs:`` URI. It can contain host and port: ``hdfs://:/`` or just the path: ``hdfs://``. + +There are also two ways to authenticate to HDFS: + +- Use current UNIX account authorization +- Kerberos credentials using following environment variables: + +.. code-block:: bash + + export MLFLOW_KERBEROS_TICKET_CACHE=/tmp/krb5cc_22222222 + export MLFLOW_KERBEROS_USER=user_name_to_use + +Most of the cluster contest settings are read from ``hdfs-site.xml`` accessed by the HDFS native +driver using the ``CLASSPATH`` environment variable. + +Optionally you can select a different version of the HDFS driver library using: + +.. code-block:: bash + + export MLFLOW_HDFS_DRIVER=libhdfs3 + +The default driver is ``libhdfs``. + + Networking -^^^^^^^^^^ +---------- The ``--host`` option exposes the service on all interfaces. If running a server in production, we would recommend not exposing the built-in server broadly (as it is unauthenticated and unencrypted), and instead putting it behind a reverse proxy like NGINX or Apache httpd, or connecting over VPN. -Additionally, you should ensure that the ``--file-store`` (which defaults to the ``./mlruns`` directory) -points to a persistent (non-ephemeral) disk. +You can then pass authentication headers to MLflow using these :ref:`environment variables `. -Connecting to a Remote Server -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Once you have a server running, set ``MLFLOW_TRACKING_URI`` to the server's URI, along -with its scheme and port (for example, ``http://10.0.0.1:5000``). Then you can use ``mlflow``: +Additionally, you should ensure that the ``--backend-store-uri`` (which defaults to the +``./mlruns`` directory) points to a persistent (non-ephemeral) disk or database connection. -.. code:: python +Logging to a Tracking Server +---------------------------- + +To log to a tracking server, set the ``MLFLOW_TRACKING_URI`` environment variable to the server's URI, +along with its scheme and port (for example, ``http://10.0.0.1:5000``) or call :py:func:`mlflow.set_tracking_uri`. + +The :py:func:`mlflow.start_run`, :py:func:`mlflow.log_param`, and :py:func:`mlflow.log_metric` calls +then make API requests to your remote tracking server. + +.. code-block:: py import mlflow with mlflow.start_run(): - mlflow.log_metric("a", 1) + mlflow.log_param("a", 1) + mlflow.log_metric("b", 2) -The :py:func:`mlflow.start_run` and :py:func:`mlflow.log_metric` calls make API requests to your remote -tracking server. +.. _tracking_auth: + +In addition to the ``MLFLOW_TRACKING_URI`` environment variable, the following environment variables +allow passing HTTP authentication to the tracking server: + +- ``MLFLOW_TRACKING_USERNAME`` and ``MLFLOW_TRACKING_PASSWORD`` - username and password to use with HTTP + Basic authentication. To use Basic authentication, you must set `both` environment variables . +- ``MLFLOW_TRACKING_TOKEN`` - token to use with HTTP Bearer authentication. Basic authentication takes precedence if set. +- ``MLFLOW_TRACKING_INSECURE_TLS`` - if set to the literal ``true``, MLflow does not verify the TLS connection, + meaning it does not validate certificates or hostnames for ``https://`` tracking URIs. This flag is not recommended for + production environments. + +.. _system_tags: + +System Tags +=========== + +You can annotate runs with arbitrary tags. Tag keys that start with ``mlflow.`` are reserved for +internal use. The following tags are set automatically by MLflow, when appropriate: + ++-------------------------------+----------------------------------------------------------------------------------------+ +| Key | Description | ++===============================+========================================================================================+ +| ``mlflow.runName`` | Human readable name that identifies this run. | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.parentRunId`` | The ID of the parent run, if this is a nested run. | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.user`` | Identifier of the user who created the run. | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.source.type`` | Source type (possible values are ``"NOTEBOOK"``, ``"JOB"``, ``"PROJECT"``, | +| | ``"LOCAL"``, and ``"UNKNOWN"``) | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.source.name`` | Source identifier (e.g., GitHub URL, local Python filename, name of notebook) | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.source.git.commit`` | Commit hash of the executed code, if in a git repository. | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.source.git.branch`` | Name of the branch of the executed code, if in a git repository. | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.source.git.repoURL`` | URL that the executed code was cloned from. | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.project.env`` | One of "docker" or "conda", indicating the runtime context used by the MLflow project. | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.project.entryPoint`` | Name of the project entry point associated with the current run, if any. | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.docker.image.name`` | Name of the Docker image used to execute this run. | ++-------------------------------+----------------------------------------------------------------------------------------+ +| ``mlflow.docker.image.id`` | ID of the Docker image used to execute this run. | ++-------------------------------+----------------------------------------------------------------------------------------+ diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst index c73f1a0711f08..3f9156fa7556c 100644 --- a/docs/source/tutorial.rst +++ b/docs/source/tutorial.rst @@ -21,236 +21,152 @@ is from UCI's `machine learning repository `_ + - Install `conda `_ - Clone (download) the MLflow repository via ``git clone https://github.com/mlflow/mlflow`` - - `cd` into the ``examples`` directory within your clone of MLflow - we'll use this working + - ``cd`` into the ``examples`` directory within your clone of MLflow - we'll use this working directory for running the tutorial. We avoid running directly from our clone of MLflow as doing - so would cause the tutorial to use MLflow from source, rather than your PyPi installation of + so would cause the tutorial to use MLflow from source, rather than your PyPI installation of MLflow. .. container:: R - To run this tutorial, you'll need to: - - - Install `conda `_ - - Install the MLflow package (via ``devtools::install_github("mlflow/mlflow", subdir = "mlflow/R/mlflow")``) + - Install `conda `_ + - Install the MLflow package (via ``install.packages("mlflow")``) - Install MLflow (via ``mlflow::mlflow_install()``) - Clone (download) the MLflow repository via ``git clone https://github.com/mlflow/mlflow`` - - `setwd()` into the directory within your clone of MLflow - we'll use this working - directory for running the tutorial. We avoid running directly from our clone of MLflow as doing - so would cause the tutorial to use MLflow from source, rather than your PyPi installation of - MLflow. + - ``setwd()`` into the ``examples`` directory within your clone of MLflow - we'll use this + working directory for running the tutorial. We avoid running directly from our clone of + MLflow as doing so would cause the tutorial to use MLflow from source, rather than your + PyPI installation of MLflow. Training the Model ------------------ -.. plain-section:: - - .. container:: python - - First, train a linear regression model that takes two hyperparameters: ``alpha`` and ``l1_ratio``. The code is located at ``examples/sklearn_elasticnet_wine/train.py`` and is reproduced below. - - .. code:: python - - import os - import sys - - import pandas as pd - import numpy as np - from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score - from sklearn.model_selection import train_test_split - from sklearn.linear_model import ElasticNet - - import mlflow - import mlflow.sklearn - # Run from the root of MLflow - # Read the wine-quality csv file - wine_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "wine-quality.csv") - data = pd.read_csv(wine_path) - - # Split the data into training and test sets. (0.75, 0.25) split. - train, test = train_test_split(data) - # The predicted column is "quality" which is a scalar from [3, 9] - train_x = train.drop(["quality"], axis=1) - test_x = test.drop(["quality"], axis=1) - train_y = train[["quality"]] - test_y = test[["quality"]] +First, train a linear regression model that takes two hyperparameters: ``alpha`` and ``l1_ratio``. - alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.5 - l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.5 - - with mlflow.start_run(): - lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42) - lr.fit(train_x, train_y) - - predicted_qualities = lr.predict(test_x) - - (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities) - - print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio)) - print(" RMSE: %s" % rmse) - print(" MAE: %s" % mae) - print(" R2: %s" % r2) - - mlflow.log_param("alpha", alpha) - mlflow.log_param("l1_ratio", l1_ratio) - mlflow.log_metric("rmse", rmse) - mlflow.log_metric("r2", r2) - mlflow.log_metric("mae", mae) - - mlflow.sklearn.log_model(lr, "model") +.. plain-section:: - This example uses the familiar pandas, numpy, and sklearn APIs to create a simple machine learning - model. The :doc:`MLflow tracking APIs` log information about each - training run, like the hyperparameters ``alpha`` and ``l1_ratio``, used to train the model and metrics, like - the root mean square error, used to evaluate the model. The example also serializes the - model in a format that MLflow knows how to deploy. + .. container:: python - You can run the example with default hyperparameters as follows: + The code is located at ``examples/sklearn_elasticnet_wine/train.py`` and is reproduced below. + + .. literalinclude:: ../../examples/sklearn_elasticnet_wine/train.py - .. code:: bash + This example uses the familiar pandas, numpy, and sklearn APIs to create a simple machine learning + model. The :doc:`MLflow tracking APIs` log information about each + training run, like the hyperparameters ``alpha`` and ``l1_ratio``, used to train the model and metrics, like + the root mean square error, used to evaluate the model. The example also serializes the + model in a format that MLflow knows how to deploy. - python examples/sklearn_elasticnet_wine/train.py + You can run the example with default hyperparameters as follows: - Try out some other values for ``alpha`` and ``l1_ratio`` by passing them as arguments to ``train.py``: + .. code-block:: py - .. code:: bash + python examples/sklearn_elasticnet_wine/train.py - python examples/sklearn_elasticnet_wine/train.py + Try out some other values for ``alpha`` and ``l1_ratio`` by passing them as arguments to ``train.py``: - Each time you run the example, MLflow logs information about your experiment runs in the directory ``mlruns``. + .. code-block:: py - .. note:: - If you would like to use the Jupyter notebook version of ``train.py``, try out the tutorial notebook at ``examples/sklearn_elasticnet_wine/train.ipynb``. + python examples/sklearn_elasticnet_wine/train.py - .. container:: R + Each time you run the example, MLflow logs information about your experiment runs in the directory ``mlruns``. - First, train a linear regression model that takes two hyperparameters: ``alpha`` and ``lambda``. The code is located at ``examples/r_wine/train.R`` and is reproduced below. + .. note:: + If you would like to use the Jupyter notebook version of ``train.py``, try out the tutorial notebook at ``examples/sklearn_elasticnet_wine/train.ipynb``. - .. code:: R + .. container:: R - library(mlflow) - library(glmnet) + The code is located at ``examples/r_wine/train.R`` and is reproduced below. - # Read the wine-quality csv file - data <- read.csv("../wine-quality.csv") + .. literalinclude:: ../../examples/r_wine/train.R - # Split the data into training and test sets. (0.75, 0.25) split. - sampled <- sample(1:nrow(data), 0.75 * nrow(data)) - train <- data[sampled, ] - test <- data[-sampled, ] + This example uses the familiar ``glmnet`` package to create a simple machine learning + model. The :doc:`MLflow tracking APIs` log information about each + training run, like the hyperparameters ``alpha`` and ``lambda``, used to train the model and metrics, like + the root mean square error, used to evaluate the model. The example also serializes the + model in a format that MLflow knows how to deploy. - # The predicted column is "quality" which is a scalar from [3, 9] - train_x <- as.matrix(train[, !(names(train) == "quality")]) - test_x <- as.matrix(test[, !(names(train) == "quality")]) - train_y <- train[, "quality"] - test_y <- test[, "quality"] + You can run the example with default hyperparameters as follows: - alpha <- mlflow_param("alpha", 0.5, "numeric") - lambda <- mlflow_param("lambda", 0.5, "numeric") + .. code-block:: R - with(mlflow_start_run(), { - model <- glmnet(train_x, train_y, alpha = alpha, lambda = lambda, family = "gaussian") - predictor <- crate(~ glmnet::predict.glmnet(model, as.matrix(.x)), model) - predicted <- predictor(test_x) + mlflow_run(uri = "examples/r_wine", entry_point = "train.R") - rmse <- sqrt(mean((predicted - test_y) ^ 2)) - mae <- mean(abs(predicted - test_y)) - r2 <- as.numeric(cor(predicted, test_y) ^ 2) + Try out some other values for ``alpha`` and ``lambda`` by passing them as arguments to ``train.R``: - message("Elasticnet model (alpha=", alpha, ", lambda=", lambda, "):") - message(" RMSE: ", rmse) - message(" MAE: ", mae) - message(" R2: ", r2) + .. code-block:: R - mlflow_log_param("alpha", alpha) - mlflow_log_param("lambda", lambda) - mlflow_log_metric("rmse", rmse) - mlflow_log_metric("r2", r2) - mlflow_log_metric("mae", mae) + mlflow_run(uri = "examples/r_wine", entry_point = "train.R", parameters = list(alpha = 0.1, lambda = 0.5)) - mlflow_log_model(predictor, "model") - }) + Each time you run the example, MLflow logs information about your experiment runs in the directory ``mlruns``. - This example uses the familiar `glmnet` package to create a simple machine learning - model. The :doc:`MLflow tracking APIs` log information about each - training run, like the hyperparameters ``alpha`` and ``lambda``, used to train the model and metrics, like - the root mean square error, used to evaluate the model. The example also serializes the - model in a format that MLflow knows how to deploy. + .. note:: + If you would like to use an R notebook version of ``train.R``, try the tutorial notebook at ``examples/r_wine/train.Rmd``. - You can run the example with default hyperparameters as follows: +Comparing the Models +-------------------- - .. code:: R - mlflow_run(uri = "examples/r_wine", entry_point = "train.R") +Next, use the MLflow UI to compare the models that you have produced. In the same current working directory +as the one that contains the ``mlruns`` run: - Try out some other values for ``alpha`` and ``lambda`` by passing them as arguments to ``train.R``: +.. code-section:: + .. code-block:: python - .. code:: R + mlflow ui + .. code-block:: R - mlflow_run(uri = "examples/r_wine", entry_point = "train.R", param_list = list(alpha = 0.1, lambda = 0.5)) + mlflow_ui() - Each time you run the example, MLflow logs information about your experiment runs in the directory ``mlruns``. +and view it at ``_. - .. note:: - If you would like to use the R notebook version of ``train.R``, try out the tutorial notebook at ``examples/r_wine/train.Rmd``. - -Comparing the Models --------------------- +On this page, you can see a list of experiment runs with metrics you can use to compare the models. .. plain-section:: - .. container:: python - - Next, use the MLflow UI to compare the models that you have produced. Run ``mlflow ui`` - in the same current working directory as the one that contains the ``mlruns`` directory and - open http://localhost:5000 in your browser. - - On this page, you can see a list of experiment runs with metrics you can use to compare the models. - - .. image:: _static/images/tutorial-compare.png - - You can see that the lower ``alpha`` is, the better the model. You can also - use the search feature to quickly filter out many models. For example, the query ``metrics.rmse < 0.8`` - returns all the models with root mean squared error less than 0.8. For more complex manipulations, - you can download this table as a CSV and use your favorite data munging software to analyze it. + .. container:: python - .. container:: R - - Next, use the MLflow UI to compare the models that you have produced. Run ``mlflow_ui()`` - in the same current working directory as the one that contains the ``mlruns``. + .. image:: _static/images/tutorial-compare.png - On this page, you can see a list of experiment runs with metrics you can use to compare the models. + .. container:: R .. image:: _static/images/tutorial-compare-R.png - You can use the search feature to quickly filter out many models. For example, the query ``metrics.rmse < 0.8`` - returns all the models with root mean squared error less than 0.8. For more complex manipulations, - you can download this table as a CSV and use your favorite data munging software to analyze it. +You can use the search feature to quickly filter out many models. For example, the query ``metrics.rmse < 0.8`` +returns all the models with root mean squared error less than 0.8. For more complex manipulations, +you can download this table as a CSV and use your favorite data munging software to analyze it. + Packaging the Training Code --------------------------- +Now that you have your training code, you can package it so that other data scientists can easily reuse the model, or so that you can run the training remotely, for example on Databricks. + .. plain-section:: .. container:: python - Now that you have your training code, you can package it so that other data scientists can easily reuse the model, or so that you can run the training remotely, for example on Databricks. You do this by using :doc:`projects` conventions to specify the - dependencies and entry points to your code. The ``tutorial/MLproject`` file specifies that the project has the dependencies located in a - `Conda environment file `_ + You do this by using :doc:`projects` conventions to specify the dependencies and entry points to your code. The ``sklearn_elasticnet_wine/MLproject`` file specifies that the project has the dependencies located in a `Conda environment file `_ called ``conda.yaml`` and has one entry point that takes two parameters: ``alpha`` and ``l1_ratio``. - .. code:: yaml + .. code-block:: yaml - # tutorial/MLproject + # sklearn_elasticnet_wine/MLproject name: tutorial @@ -266,9 +182,9 @@ Packaging the Training Code The Conda file lists the dependencies: - .. code:: yaml + .. code-block:: yaml - # tutorial/conda.yaml + # sklearn_elasticnet_wine/conda.yaml name: tutorial channels: @@ -280,8 +196,8 @@ Packaging the Training Code - pip: - mlflow - To run this project, invoke ``mlflow run tutorial -P alpha=0.42``. After running - this command, MLflow will run your training code in a new Conda environment with the dependencies + To run this project, invoke ``mlflow run examples/sklearn_elasticnet_wine -P alpha=0.42``. After running + this command, MLflow runs your training code in a new Conda environment with the dependencies specified in ``conda.yaml``. If the repository has an ``MLproject`` file in the root you can also run a project directly from GitHub. This tutorial is duplicated in the https://github.com/mlflow/mlflow-example repository @@ -289,11 +205,11 @@ Packaging the Training Code .. container:: R - Now that you have your training code, you can package it so that other data scientists can easily reuse the model, or so that you can run the training remotely, for example on Databricks. You do this by running ``mlflow_snapshot()`` to create an `R dependencies packrat file `_ called ``r-dependencies.txt``. + You do this by running ``mlflow_snapshot()`` to create an `R dependencies packrat file `_ called ``r-dependencies.txt``. The R dependencies file lists the dependencies: - .. code:: + .. code-block:: r # examples/r_wine/r-dependencies.txt @@ -317,45 +233,45 @@ Packaging the Training Code To run this project, invoke: - .. code:: R + .. code-block:: r - mlflow_run("examples/r_wine", entry_point = "train.R", param_list = list(alpha = 0.2)) + mlflow_run("examples/r_wine", entry_point = "train.R", parameters = list(alpha = 0.2)) - After running this command, MLflow will run your training code in a new R session. + After running this command, MLflow runs your training code in a new R session. To restore the dependencies specified in ``r-dependencies.txt``, you can run instead: - .. code:: R + .. code-block:: r mlflow_restore_snapshot() - mlflow_run("examples/r_wine", entry_point = "train.R", param_list = list(alpha = 0.2)) + mlflow_run("examples/r_wine", entry_point = "train.R", parameters = list(alpha = 0.2)) You can also run a project directly from GitHub. This tutorial is duplicated in the https://github.com/rstudio/mlflow-example repository which you can run with: - .. code:: R + .. code-block:: r mlflow_run( "train.R", "https://github.com/rstudio/mlflow-example", - param_list = list(alpha = 0.2) + parameters = list(alpha = 0.2) ) Serving the Model ----------------- -.. plain-section:: +Now that you have packaged your model using the MLproject convention and have identified the best model, +it is time to deploy the model using :doc:`models`. An MLflow Model is a standard format for +packaging machine learning models that can be used in a variety of downstream tools — for example, +real-time serving through a REST API or batch inference on Apache Spark. - .. container:: python +In the example training code, after training the linear regression model, a function +in MLflow saved the model as an artifact within the run. - Now that you have packaged your model using the MLproject convention and have identified the best model, - it is time to deploy the model using :doc:`models`. An MLflow Model is a standard format for - packaging machine learning models that can be used in a variety of downstream tools — for example, - real-time serving through a REST API or batch inference on Apache Spark. +.. plain-section:: - In the example training code, after training the linear regression model, a function - in MLflow saved the model as an artifact within the run. + .. container:: python - .. code:: + .. code-block:: py mlflow.sklearn.log_model(lr, "model") @@ -371,40 +287,36 @@ Serving the Model In this example, you can use this MLmodel format with MLflow to deploy a local REST server that can serve predictions. - To deploy the server, run: + To deploy the server, run (replace the path with your model's actual path): - .. code:: + .. code-block:: bash - mlflow sklearn serve /Users/mlflow/mlflow-prototype/mlruns/0/7c1a0d5c42844dcdb8f5191146925174/artifacts/model -p 1234 + mlflow models serve -m /Users/mlflow/mlflow-prototype/mlruns/0/7c1a0d5c42844dcdb8f5191146925174/artifacts/model -p 1234 .. note:: - The version of Python used to create the model must be the same as the one running ``mlflow sklearn``. + The version of Python used to create the model must be the same as the one running ``mlflow models serve``. If this is not the case, you may see the error ``UnicodeDecodeError: 'ascii' codec can't decode byte 0x9f in position 1: ordinal not in range(128)`` or ``raise ValueError, "unsupported pickle protocol: %d"``. - To serve a prediction, run: + Once you have deployed the server, you can pass it some sample data and see the + predictions. The following example uses ``curl`` to send a JSON-serialized pandas DataFrame + with the ``split`` orientation to the model server. For more information about the input data + formats accepted by the model server, see the + :ref:`MLflow deployment tools documentation `. - .. code:: + .. code-block:: bash - curl -X POST -H "Content-Type:application/json" --data '[{"fixed acidity": 6.2, "volatile acidity": 0.66, "citric acid": 0.48, "residual sugar": 1.2, "chlorides": 0.029, "free sulfur dioxide": 29, "total sulfur dioxide": 75, "density": 0.98, "pH": 3.33, "sulphates": 0.39, "alcohol": 12.8}]' http://127.0.0.1:1234/invocations + curl -X POST -H "Content-Type:application/json; format=pandas-split" --data '{"columns":["alcohol", "chlorides", "citric acid", "density", "fixed acidity", "free sulfur dioxide", "pH", "residual sugar", "sulphates", "total sulfur dioxide", "volatile acidity"],"data":[[12.8, 0.029, 0.48, 0.98, 6.2, 29, 3.33, 1.2, 0.39, 75, 0.66]]}' http://127.0.0.1:1234/invocations - which should return something like:: + the server should respond with output similar to:: - {"predictions": [6.379428821398614]} + [6.379428821398614] .. container:: R - Now that you have packaged your model using the MLproject convention and have identified the best model, - it is time to deploy the model using :doc:`models`. An MLflow Model is a standard format for - packaging machine learning models that can be used in a variety of downstream tools — for example, - real-time serving through a REST API or batch inference on Apache Spark. - - In the example training code, after training the linear regression model, a function - in MLflow saved the model as an artifact within the run. - - .. code:: R + .. code-block:: r mlflow_log_model(predictor, "model") @@ -422,41 +334,60 @@ Serving the Model To deploy the server, run: - .. code:: R + .. code-block:: r - mlflow_rfunc_serve(model_path = "model", run_uuid = "1bf3cca7f3814d8fac7be7874de1046d") + mlflow_rfunc_serve(model_uri="mlruns/0/c2a7325210ef4242bd4631cec8f92351/artifacts/model", port=8090) - This will initialize a REST server and open a `swagger `_ interface to perform predicitons against + This initializes a REST server and opens a `Swagger `_ interface to perform predictions against the REST API: .. image:: _static/images/tutorial-serving-r.png - .. note:: R + .. note:: By default, a model is served using the R packages available. To ensure the environment serving the prediction function matches the model, set ``restore = TRUE`` when calling ``mlflow_rfunc_serve()``. - To serve a prediction, run: + To serve a prediction, enter this in the Swagger UI:: + + { + "fixed acidity": 6.2, + "volatile acidity": 0.66, + "citric acid": 0.48, + "residual sugar": 1.2, + "chlorides": 0.029, + "free sulfur dioxide": 29, + "total sulfur dioxide": 75, + "density": 0.98, + "pH": 3.33, + "sulphates": 0.39, + "alcohol": 12.8 + } - .. code:: + which should return something like:: + + [ + [ + 6.4287492410792 + ] + ] + + Or run: + + .. code-block:: bash curl -X POST "http://127.0.0.1:8090/predict/" -H "accept: application/json" -H "Content-Type: application/json" -d "{\"fixed acidity\": 6.2, \"volatile acidity\": 0.66, \"citric acid\": 0.48, \"residual sugar\": 1.2, \"chlorides\": 0.029, \"free sulfur dioxide\": 29, \"total sulfur dioxide\": 75, \"density\": 0.98, \"pH\": 3.33, \"sulphates\": 0.39, \"alcohol\": 12.8}" - which should return something like:: + the server should respond with output similar to:: + + [[6.4287492410792]] - { - "predicitons": [ - [ - 6.1312 - ] - ] - } More Resources -------------- -Congratulations on finishing the tutorial! For more reading, see :doc:`tracking`, :doc:`projects`, :doc:`models`, -and more. + +Congratulations on finishing the tutorial! For more reading, see :doc:`tracking`, :doc:`projects`, :doc:`models`, and more. .. [1] P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. Modeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009. diff --git a/docs/theme/mlflow/breadcrumbs.html b/docs/theme/mlflow/breadcrumbs.html index fde6b373b0f94..eea3d01ff6b5d 100644 --- a/docs/theme/mlflow/breadcrumbs.html +++ b/docs/theme/mlflow/breadcrumbs.html @@ -6,7 +6,7 @@ {% set suffix = source_suffix %} {% endif %} -{% set github_user = 'databricks' %} +{% set github_user = 'mlflow' %} {% set github_repo = 'mlflow' %} {% set github_version = 'master' %} {% set conf_py_path = 'docs/source' %} diff --git a/docs/theme/mlflow/static/css/custom.css b/docs/theme/mlflow/static/css/custom.css index 2ba66acbfd413..e6ada0a59d966 100644 --- a/docs/theme/mlflow/static/css/custom.css +++ b/docs/theme/mlflow/static/css/custom.css @@ -174,7 +174,7 @@ b, strong, mark, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) .optional, .rst-content p.rubric { - font-weight: 500; + font-weight: 600; } @@ -1099,4 +1099,3 @@ text-transform: capitalize; .plain-section ul.section-selector li.selected:hover { background-color: #333; } - diff --git a/examples/README.md b/examples/README.md index d4ae241158974..98d1150b60392 100644 --- a/examples/README.md +++ b/examples/README.md @@ -23,3 +23,5 @@ and stores (logs) them as MLflow artifacts. * `sklearn_logisic_regression` is a simple MLflow example with hooks to log training data to MLflow tracking server. * `tensorflow` is an end-to-end one run example from train to predict. +* `docker` demonstrates how to create and run an MLflow project using docker (rather than conda) + to manage project dependencies diff --git a/mlflow/sagemaker/container/scoring_server/__init__.py b/examples/docker/.dockerignore similarity index 100% rename from mlflow/sagemaker/container/scoring_server/__init__.py rename to examples/docker/.dockerignore diff --git a/examples/docker/Dockerfile b/examples/docker/Dockerfile new file mode 100644 index 0000000000000..e436f49c7f9aa --- /dev/null +++ b/examples/docker/Dockerfile @@ -0,0 +1,9 @@ +FROM continuumio/miniconda:4.5.4 + +RUN pip install mlflow>=1.0 \ + && pip install azure-storage==0.36.0 \ + && pip install numpy==1.14.3 \ + && pip install scipy \ + && pip install pandas==0.22.0 \ + && pip install scikit-learn==0.19.1 \ + && pip install cloudpickle diff --git a/examples/docker/MLproject b/examples/docker/MLproject new file mode 100644 index 0000000000000..28289de45496d --- /dev/null +++ b/examples/docker/MLproject @@ -0,0 +1,11 @@ +name: docker-example + +docker_env: + image: mlflow-docker-example + +entry_points: + main: + parameters: + alpha: float + l1_ratio: {type: float, default: 0.1} + command: "python train.py --alpha {alpha} --l1-ratio {l1_ratio}" diff --git a/examples/docker/README.rst b/examples/docker/README.rst new file mode 100644 index 0000000000000..36896d4ae9b04 --- /dev/null +++ b/examples/docker/README.rst @@ -0,0 +1,64 @@ +Dockerized Model Training with MLflow +------------------------------------- +This directory contains an MLflow project that trains a linear regression model on the UC Irvine +Wine Quality Dataset. The project uses a Docker image to capture the dependencies needed to run +training code. Running a project in a Docker environment (as opposed to Conda) allows for capturing +non-Python dependencies, e.g. Java libraries. In the future, we also hope to add tools to MLflow +for running Dockerized projects e.g. on a Kubernetes cluster for scale out. + +Structure of this MLflow Project +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This MLflow project contains a ``train.py`` file that trains a scikit-learn model and uses +MLflow Tracking APIs to log the model and its metadata (e.g., hyperparameters and metrics) +for later use and reference. ``train.py`` operates on the Wine Quality Dataset, which is included +in ``wine-quality.csv``. + +Most importantly, the project also includes an ``MLproject`` file, which specifies the Docker +container environment in which to run the project using the ``docker_env`` field: + +.. code-block:: yaml + + docker_env: + image: mlflow-docker-example + +Here, ``image`` can be any valid argument to ``docker run``, such as the tag, ID or URL of a Docker +image (see `Docker docs `_). The above +example references a locally-stored image (``mlflow-docker-example``) by tag. + +Finally, the project includes a ``Dockerfile`` that is used to build the image referenced by the +``MLproject`` file. The ``Dockerfile`` specifies library dependencies required by the project, such +as ``mlflow`` and ``scikit-learn``. + +Running this Example +^^^^^^^^^^^^^^^^^^^^ + +First, install MLflow (via ``pip install mlflow``) and install +`Docker `_. + +Then, build the image for the project's Docker container environment. You must use the same image +name that is given by the ``docker_env.image`` field of the MLproject file. In this example, the +image name is ``mlflow-docker-example``. Issue the following command to build an image with this +name: + +.. code-block:: bash + + docker build -t mlflow-docker-example -f Dockerfile . + +Note that the name if the image used in the ``docker build`` command, ``mlflow-docker-example``, +matches the name of the image referenced in the ``MLproject`` file. + +Finally, run the example project using ``mlflow run examples/docker -P alpha=0.5``. + +What happens when the project is run? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Running ``mlflow run examples/docker`` builds a new Docker image based on ``mlflow-docker-example`` +that also contains our project code. The resulting image is tagged as +``mlflow-docker-example-`` where ```` is the git commit ID. After the image is +built, MLflow executes the default (main) project entry point within the container using ``docker run``. + +Environment variables, such as ``MLFLOW_TRACKING_URI``, are propagated inside the container during +project execution. When running against a local tracking URI, MLflow mounts the host system's +tracking directory (e.g., a local ``mlruns`` directory) inside the container so that metrics and +params logged during project execution are accessible afterwards. diff --git a/examples/docker/kubernetes_config.json b/examples/docker/kubernetes_config.json new file mode 100644 index 0000000000000..a8b44709e41c2 --- /dev/null +++ b/examples/docker/kubernetes_config.json @@ -0,0 +1,5 @@ +{ + "kube-context": "docker-for-desktop", + "kube-job-template-path": "examples/docker/kubernetes_job_template.yaml", + "repository-uri": "username/mlflow-kubernetes-example" +} diff --git a/examples/docker/kubernetes_job_template.yaml b/examples/docker/kubernetes_job_template.yaml new file mode 100644 index 0000000000000..a3561202d9a88 --- /dev/null +++ b/examples/docker/kubernetes_job_template.yaml @@ -0,0 +1,20 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: "{replaced with MLflow Project name}" + namespace: mlflow +spec: + ttlSecondsAfterFinished: 100 + backoffLimit: 0 + template: + spec: + containers: + - name: "{replaced with MLflow Project name}" + image: "{replaced with URI of Docker image created during Project execution}" + command: ["{replaced with MLflow Project entry point command}"] + resources: + limits: + memory: 512Mi + requests: + memory: 256Mi + restartPolicy: Never diff --git a/examples/docker/train.py b/examples/docker/train.py new file mode 100644 index 0000000000000..4ac412eae2979 --- /dev/null +++ b/examples/docker/train.py @@ -0,0 +1,72 @@ +# The data set used in this example is from http://archive.ics.uci.edu/ml/datasets/Wine+Quality +# P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. +# Modeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009. + +import os +import warnings +import sys +import argparse + +import pandas as pd +import numpy as np +from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score +from sklearn.model_selection import train_test_split +from sklearn.linear_model import ElasticNet + +import mlflow +import mlflow.sklearn + + +def eval_metrics(actual, pred): + rmse = np.sqrt(mean_squared_error(actual, pred)) + mae = mean_absolute_error(actual, pred) + r2 = r2_score(actual, pred) + return rmse, mae, r2 + + + +if __name__ == "__main__": + warnings.filterwarnings("ignore") + np.random.seed(40) + + parser = argparse.ArgumentParser() + parser.add_argument('--alpha') + parser.add_argument('--l1-ratio') + args = parser.parse_args() + + # Read the wine-quality csv file (make sure you're running this from the root of MLflow!) + wine_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "wine-quality.csv") + data = pd.read_csv(wine_path) + + # Split the data into training and test sets. (0.75, 0.25) split. + train, test = train_test_split(data) + + # The predicted column is "quality" which is a scalar from [3, 9] + train_x = train.drop(["quality"], axis=1) + test_x = test.drop(["quality"], axis=1) + train_y = train[["quality"]] + test_y = test[["quality"]] + + alpha = float(args.alpha) + l1_ratio = float(args.l1_ratio) + + with mlflow.start_run(): + lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42) + lr.fit(train_x, train_y) + + predicted_qualities = lr.predict(test_x) + + (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities) + + print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio)) + print(" RMSE: %s" % rmse) + print(" MAE: %s" % mae) + print(" R2: %s" % r2) + + mlflow.log_param("alpha", alpha) + mlflow.log_param("l1_ratio", l1_ratio) + mlflow.log_metric("rmse", rmse) + mlflow.log_metric("r2", r2) + mlflow.log_metric("mae", mae) + + mlflow.sklearn.log_model(lr, "model") diff --git a/examples/docker/wine-quality.csv b/examples/docker/wine-quality.csv new file mode 100644 index 0000000000000..df5cbcb1af6f9 --- /dev/null +++ b/examples/docker/wine-quality.csv @@ -0,0 +1,4899 @@ +"fixed acidity","volatile acidity","citric acid","residual sugar","chlorides","free sulfur dioxide","total sulfur dioxide","density","pH","sulphates","alcohol","quality" +7,0.27,0.36,20.7,0.045,45,170,1.001,3,0.45,8.8,6 +6.3,0.3,0.34,1.6,0.049,14,132,0.994,3.3,0.49,9.5,6 +8.1,0.28,0.4,6.9,0.05,30,97,0.9951,3.26,0.44,10.1,6 +7.2,0.23,0.32,8.5,0.058,47,186,0.9956,3.19,0.4,9.9,6 +7.2,0.23,0.32,8.5,0.058,47,186,0.9956,3.19,0.4,9.9,6 +8.1,0.28,0.4,6.9,0.05,30,97,0.9951,3.26,0.44,10.1,6 +6.2,0.32,0.16,7,0.045,30,136,0.9949,3.18,0.47,9.6,6 +7,0.27,0.36,20.7,0.045,45,170,1.001,3,0.45,8.8,6 +6.3,0.3,0.34,1.6,0.049,14,132,0.994,3.3,0.49,9.5,6 +8.1,0.22,0.43,1.5,0.044,28,129,0.9938,3.22,0.45,11,6 +8.1,0.27,0.41,1.45,0.033,11,63,0.9908,2.99,0.56,12,5 +8.6,0.23,0.4,4.2,0.035,17,109,0.9947,3.14,0.53,9.7,5 +7.9,0.18,0.37,1.2,0.04,16,75,0.992,3.18,0.63,10.8,5 +6.6,0.16,0.4,1.5,0.044,48,143,0.9912,3.54,0.52,12.4,7 +8.3,0.42,0.62,19.25,0.04,41,172,1.0002,2.98,0.67,9.7,5 +6.6,0.17,0.38,1.5,0.032,28,112,0.9914,3.25,0.55,11.4,7 +6.3,0.48,0.04,1.1,0.046,30,99,0.9928,3.24,0.36,9.6,6 +6.2,0.66,0.48,1.2,0.029,29,75,0.9892,3.33,0.39,12.8,8 +7.4,0.34,0.42,1.1,0.033,17,171,0.9917,3.12,0.53,11.3,6 +6.5,0.31,0.14,7.5,0.044,34,133,0.9955,3.22,0.5,9.5,5 +6.2,0.66,0.48,1.2,0.029,29,75,0.9892,3.33,0.39,12.8,8 +6.4,0.31,0.38,2.9,0.038,19,102,0.9912,3.17,0.35,11,7 +6.8,0.26,0.42,1.7,0.049,41,122,0.993,3.47,0.48,10.5,8 +7.6,0.67,0.14,1.5,0.074,25,168,0.9937,3.05,0.51,9.3,5 +6.6,0.27,0.41,1.3,0.052,16,142,0.9951,3.42,0.47,10,6 +7,0.25,0.32,9,0.046,56,245,0.9955,3.25,0.5,10.4,6 +6.9,0.24,0.35,1,0.052,35,146,0.993,3.45,0.44,10,6 +7,0.28,0.39,8.7,0.051,32,141,0.9961,3.38,0.53,10.5,6 +7.4,0.27,0.48,1.1,0.047,17,132,0.9914,3.19,0.49,11.6,6 +7.2,0.32,0.36,2,0.033,37,114,0.9906,3.1,0.71,12.3,7 +8.5,0.24,0.39,10.4,0.044,20,142,0.9974,3.2,0.53,10,6 +8.3,0.14,0.34,1.1,0.042,7,47,0.9934,3.47,0.4,10.2,6 +7.4,0.25,0.36,2.05,0.05,31,100,0.992,3.19,0.44,10.8,6 +6.2,0.12,0.34,1.5,0.045,43,117,0.9939,3.42,0.51,9,6 +5.8,0.27,0.2,14.95,0.044,22,179,0.9962,3.37,0.37,10.2,5 +7.3,0.28,0.43,1.7,0.08,21,123,0.9905,3.19,0.42,12.8,5 +6.5,0.39,0.23,5.4,0.051,25,149,0.9934,3.24,0.35,10,5 +7,0.33,0.32,1.2,0.053,38,138,0.9906,3.13,0.28,11.2,6 +7.3,0.24,0.39,17.95,0.057,45,149,0.9999,3.21,0.36,8.6,5 +7.3,0.24,0.39,17.95,0.057,45,149,0.9999,3.21,0.36,8.6,5 +6.7,0.23,0.39,2.5,0.172,63,158,0.9937,3.11,0.36,9.4,6 +6.7,0.24,0.39,2.9,0.173,63,157,0.9937,3.1,0.34,9.4,6 +7,0.31,0.26,7.4,0.069,28,160,0.9954,3.13,0.46,9.8,6 +6.6,0.24,0.27,1.4,0.057,33,152,0.9934,3.22,0.56,9.5,6 +6.7,0.23,0.26,1.4,0.06,33,154,0.9934,3.24,0.56,9.5,6 +7.4,0.18,0.31,1.4,0.058,38,167,0.9931,3.16,0.53,10,7 +6.2,0.45,0.26,4.4,0.063,63,206,0.994,3.27,0.52,9.8,4 +6.2,0.46,0.25,4.4,0.066,62,207,0.9939,3.25,0.52,9.8,5 +7,0.31,0.26,7.4,0.069,28,160,0.9954,3.13,0.46,9.8,6 +6.9,0.19,0.35,5,0.067,32,150,0.995,3.36,0.48,9.8,5 +7.2,0.19,0.31,1.6,0.062,31,173,0.9917,3.35,0.44,11.7,6 +6.6,0.25,0.29,1.1,0.068,39,124,0.9914,3.34,0.58,11,7 +6.2,0.16,0.33,1.1,0.057,21,82,0.991,3.32,0.46,10.9,7 +6.4,0.18,0.35,1,0.045,39,108,0.9911,3.31,0.35,10.9,6 +6.8,0.2,0.59,0.9,0.147,38,132,0.993,3.05,0.38,9.1,6 +6.9,0.25,0.35,1.3,0.039,29,191,0.9908,3.13,0.52,11,6 +7.2,0.21,0.34,11.9,0.043,37,213,0.9962,3.09,0.5,9.6,6 +6,0.19,0.26,12.4,0.048,50,147,0.9972,3.3,0.36,8.9,6 +6.6,0.38,0.15,4.6,0.044,25,78,0.9931,3.11,0.38,10.2,6 +7.4,0.2,0.36,1.2,0.038,44,111,0.9926,3.36,0.34,9.9,6 +6.8,0.22,0.24,4.9,0.092,30,123,0.9951,3.03,0.46,8.6,6 +6,0.19,0.26,12.4,0.048,50,147,0.9972,3.3,0.36,8.9,6 +7,0.47,0.07,1.1,0.035,17,151,0.991,3.02,0.34,10.5,5 +6.6,0.38,0.15,4.6,0.044,25,78,0.9931,3.11,0.38,10.2,6 +7.2,0.24,0.27,1.4,0.038,31,122,0.9927,3.15,0.46,10.3,6 +6.2,0.35,0.03,1.2,0.064,29,120,0.9934,3.22,0.54,9.1,5 +6.4,0.26,0.24,6.4,0.04,27,124,0.9903,3.22,0.49,12.6,7 +6.7,0.25,0.13,1.2,0.041,81,174,0.992,3.14,0.42,9.8,5 +6.7,0.23,0.31,2.1,0.046,30,96,0.9926,3.33,0.64,10.7,8 +7.4,0.24,0.29,10.1,0.05,21,105,0.9962,3.13,0.35,9.5,5 +6.2,0.27,0.43,7.8,0.056,48,244,0.9956,3.1,0.51,9,6 +6.8,0.3,0.23,4.6,0.061,50.5,238.5,0.9958,3.32,0.6,9.5,5 +6,0.27,0.28,4.8,0.063,31,201,0.9964,3.69,0.71,10,5 +8.6,0.23,0.46,1,0.054,9,72,0.9941,2.95,0.49,9.1,6 +6.7,0.23,0.31,2.1,0.046,30,96,0.9926,3.33,0.64,10.7,8 +7.4,0.24,0.29,10.1,0.05,21,105,0.9962,3.13,0.35,9.5,5 +7.1,0.18,0.36,1.4,0.043,31,87,0.9898,3.26,0.37,12.7,7 +7,0.32,0.34,1.3,0.042,20,69,0.9912,3.31,0.65,12,7 +7.4,0.18,0.3,8.8,0.064,26,103,0.9961,2.94,0.56,9.3,5 +6.7,0.54,0.28,5.4,0.06,21,105,0.9949,3.27,0.37,9,5 +6.8,0.22,0.31,1.4,0.053,34,114,0.9929,3.39,0.77,10.6,6 +7.1,0.2,0.34,16,0.05,51,166,0.9985,3.21,0.6,9.2,6 +7.1,0.34,0.2,6.1,0.063,47,164,0.9946,3.17,0.42,10,5 +7.3,0.22,0.3,8.2,0.047,42,207,0.9966,3.33,0.46,9.5,6 +7.1,0.43,0.61,11.8,0.045,54,155,0.9974,3.11,0.45,8.7,5 +7.1,0.44,0.62,11.8,0.044,52,152,0.9975,3.12,0.46,8.7,6 +7.2,0.39,0.63,11,0.044,55,156,0.9974,3.09,0.44,8.7,6 +6.8,0.25,0.31,13.3,0.05,69,202,0.9972,3.22,0.48,9.7,6 +7.1,0.43,0.61,11.8,0.045,54,155,0.9974,3.11,0.45,8.7,5 +7.1,0.44,0.62,11.8,0.044,52,152,0.9975,3.12,0.46,8.7,6 +7.2,0.39,0.63,11,0.044,55,156,0.9974,3.09,0.44,8.7,6 +6.1,0.27,0.43,7.5,0.049,65,243,0.9957,3.12,0.47,9,5 +6.9,0.24,0.33,1.7,0.035,47,136,0.99,3.26,0.4,12.6,7 +6.9,0.21,0.33,1.8,0.034,48,136,0.9899,3.25,0.41,12.6,7 +7.5,0.17,0.32,1.7,0.04,51,148,0.9916,3.21,0.44,11.5,7 +7.1,0.26,0.29,12.4,0.044,62,240,0.9969,3.04,0.42,9.2,6 +6,0.34,0.66,15.9,0.046,26,164,0.9979,3.14,0.5,8.8,6 +8.6,0.265,0.36,1.2,0.034,15,80,0.9913,2.95,0.36,11.4,7 +9.8,0.36,0.46,10.5,0.038,4,83,0.9956,2.89,0.3,10.1,4 +6,0.34,0.66,15.9,0.046,26,164,0.9979,3.14,0.5,8.8,6 +7.4,0.25,0.37,13.5,0.06,52,192,0.9975,3,0.44,9.1,5 +7.1,0.12,0.32,9.6,0.054,64,162,0.9962,3.4,0.41,9.4,5 +6,0.21,0.24,12.1,0.05,55,164,0.997,3.34,0.39,9.4,5 +7.5,0.305,0.4,18.9,0.059,44,170,1,2.99,0.46,9,5 +7.4,0.25,0.37,13.5,0.06,52,192,0.9975,3,0.44,9.1,5 +7.3,0.13,0.32,14.4,0.051,34,109,0.9974,3.2,0.35,9.2,6 +7.1,0.12,0.32,9.6,0.054,64,162,0.9962,3.4,0.41,9.4,5 +7.1,0.23,0.35,16.5,0.04,60,171,0.999,3.16,0.59,9.1,6 +7.1,0.23,0.35,16.5,0.04,60,171,0.999,3.16,0.59,9.1,6 +6.9,0.33,0.28,1.3,0.051,37,187,0.9927,3.27,0.6,10.3,5 +6.5,0.17,0.54,8.5,0.082,64,163,0.9959,2.89,0.39,8.8,6 +7.2,0.27,0.46,18.75,0.052,45,255,1,3.04,0.52,8.9,5 +7.2,0.31,0.5,13.3,0.056,68,195,0.9982,3.01,0.47,9.2,5 +6.7,0.41,0.34,9.2,0.049,29,150,0.9968,3.22,0.51,9.1,5 +6.7,0.41,0.34,9.2,0.049,29,150,0.9968,3.22,0.51,9.1,5 +5.5,0.485,0,1.5,0.065,8,103,0.994,3.63,0.4,9.7,4 +6,0.31,0.24,3.3,0.041,25,143,0.9914,3.31,0.44,11.3,6 +7,0.14,0.4,1.7,0.035,16,85,0.9911,3.19,0.42,11.8,6 +7.2,0.31,0.5,13.3,0.056,68,195,0.9982,3.01,0.47,9.2,5 +7.3,0.32,0.48,13.3,0.06,57,196,0.9982,3.04,0.5,9.2,5 +5.9,0.36,0.04,5.7,0.046,21,87,0.9934,3.22,0.51,10.2,5 +7.8,0.24,0.32,12.2,0.054,42,138,0.9984,3.01,0.54,8.8,5 +7.4,0.16,0.31,6.85,0.059,31,131,0.9952,3.29,0.34,9.7,5 +6.9,0.19,0.28,5,0.058,14,146,0.9952,3.29,0.36,9.1,6 +6.4,0.13,0.47,1.6,0.092,40,158,0.9928,3.21,0.36,9.8,6 +6.7,0.19,0.36,1.1,0.026,63,143,0.9912,3.27,0.48,11,6 +7.4,0.39,0.23,7,0.033,29,126,0.994,3.14,0.42,10.5,5 +6.5,0.24,0.32,7.6,0.038,48,203,0.9958,3.45,0.54,9.7,7 +6.1,0.3,0.56,2.8,0.044,47,179,0.9924,3.3,0.57,10.9,7 +6.1,0.3,0.56,2.7,0.046,46,184,0.9924,3.31,0.57,10.9,6 +5.7,0.26,0.25,10.4,0.02,7,57,0.994,3.39,0.37,10.6,5 +6.5,0.24,0.32,7.6,0.038,48,203,0.9958,3.45,0.54,9.7,7 +6.5,0.425,0.4,13.1,0.038,59,241,0.9979,3.23,0.57,9,5 +6.6,0.24,0.27,15.8,0.035,46,188,0.9982,3.24,0.51,9.2,5 +6.8,0.27,0.22,8.1,0.034,55,203,0.9961,3.19,0.52,8.9,5 +6.7,0.27,0.31,15.7,0.036,44,179,0.9979,3.26,0.56,9.6,5 +8.2,0.23,0.4,1.2,0.027,36,121,0.992,3.12,0.38,10.7,6 +7.1,0.37,0.67,10.5,0.045,49,155,0.9975,3.16,0.44,8.7,5 +6.8,0.19,0.36,1.9,0.035,30,96,0.9917,3.15,0.54,10.8,7 +8.1,0.28,0.39,1.9,0.029,18,79,0.9923,3.23,0.52,11.8,6 +6.3,0.31,0.34,2.2,0.045,20,77,0.9927,3.3,0.43,10.2,5 +7.1,0.37,0.67,10.5,0.045,49,155,0.9975,3.16,0.44,8.7,5 +7.9,0.21,0.4,1.2,0.039,38,107,0.992,3.21,0.54,10.8,6 +8.5,0.21,0.41,4.3,0.036,24,99,0.9947,3.18,0.53,9.7,6 +8.1,0.2,0.4,2,0.037,19,87,0.9921,3.12,0.54,11.2,6 +6.3,0.255,0.37,1.1,0.04,37,114,0.9905,3,0.39,10.9,6 +5.6,0.16,0.27,1.4,0.044,53,168,0.9918,3.28,0.37,10.1,6 +6.4,0.595,0.14,5.2,0.058,15,97,0.9951,3.38,0.36,9,4 +6.3,0.34,0.33,4.6,0.034,19,80,0.9917,3.38,0.58,12,7 +6.9,0.25,0.3,4.1,0.054,23,116,0.994,2.99,0.38,9.4,6 +7.9,0.22,0.38,8,0.043,46,152,0.9934,3.12,0.32,11.5,7 +7.6,0.18,0.46,10.2,0.055,58,135,0.9968,3.14,0.43,9.9,6 +6.9,0.25,0.3,4.1,0.054,23,116,0.994,2.99,0.38,9.4,6 +7.2,0.18,0.41,1.2,0.048,41,97,0.9919,3.14,0.45,10.4,5 +8.2,0.23,0.4,7.5,0.049,12,76,0.9966,3.06,0.84,9.7,6 +7.4,0.24,0.42,14,0.066,48,198,0.9979,2.89,0.42,8.9,6 +7.4,0.24,0.42,14,0.066,48,198,0.9979,2.89,0.42,8.9,6 +6.1,0.32,0.24,1.5,0.036,38,124,0.9898,3.29,0.42,12.4,7 +5.2,0.44,0.04,1.4,0.036,43,119,0.9894,3.36,0.33,12.1,8 +5.2,0.44,0.04,1.4,0.036,43,119,0.9894,3.36,0.33,12.1,8 +6.1,0.32,0.24,1.5,0.036,38,124,0.9898,3.29,0.42,12.4,7 +6.4,0.22,0.56,14.5,0.055,27,159,0.998,2.98,0.4,9.1,5 +6.3,0.36,0.3,4.8,0.049,14,85,0.9932,3.28,0.39,10.6,5 +7.4,0.24,0.42,14,0.066,48,198,0.9979,2.89,0.42,8.9,6 +6.7,0.24,0.35,13.1,0.05,64,205,0.997,3.15,0.5,9.5,5 +7,0.23,0.36,13,0.051,72,177,0.9972,3.16,0.49,9.8,5 +8.4,0.27,0.46,8.7,0.048,39,197,0.9974,3.14,0.59,9.6,6 +6.7,0.46,0.18,2.4,0.034,25,98,0.9896,3.08,0.44,12.6,7 +7.5,0.29,0.31,8.95,0.055,20,151,0.9968,3.08,0.54,9.3,5 +9.8,0.42,0.48,9.85,0.034,5,110,0.9958,2.87,0.29,10,5 +7.1,0.3,0.46,1.5,0.066,29,133,0.9906,3.12,0.54,12.7,6 +7.9,0.19,0.45,1.5,0.045,17,96,0.9917,3.13,0.39,11,6 +7.6,0.48,0.37,0.8,0.037,4,100,0.9902,3.03,0.39,11.4,4 +6.3,0.22,0.43,4.55,0.038,31,130,0.9918,3.35,0.33,11.5,7 +7.5,0.27,0.31,17.7,0.051,33,173,0.999,3.09,0.64,10.2,5 +6.9,0.23,0.4,7.5,0.04,50,151,0.9927,3.11,0.27,11.4,6 +7.2,0.32,0.47,5.1,0.044,19,65,0.991,3.03,0.41,12.6,4 +5.9,0.23,0.3,12.9,0.054,57,170,0.9972,3.28,0.39,9.4,5 +6,0.67,0.07,1.2,0.06,9,108,0.9931,3.11,0.35,8.7,4 +6.4,0.25,0.32,5.5,0.049,41,176,0.995,3.19,0.68,9.2,6 +6.4,0.33,0.31,5.5,0.048,42,173,0.9951,3.19,0.66,9.3,6 +7.1,0.34,0.15,1.2,0.053,61,183,0.9936,3.09,0.43,9.2,5 +6.8,0.28,0.4,22,0.048,48,167,1.001,2.93,0.5,8.7,5 +6.9,0.27,0.4,14,0.05,64,227,0.9979,3.18,0.58,9.6,6 +6.8,0.26,0.56,11.9,0.043,64,226,0.997,3.02,0.63,9.3,5 +6.8,0.29,0.56,11.9,0.043,66,230,0.9972,3.02,0.63,9.3,5 +6.7,0.24,0.41,9.4,0.04,49,166,0.9954,3.12,0.61,9.9,6 +5.9,0.3,0.23,4.2,0.038,42,119,0.9924,3.15,0.5,11,5 +6.8,0.53,0.35,3.8,0.034,26,109,0.9906,3.26,0.57,12.7,8 +6.5,0.28,0.28,8.5,0.047,54,210,0.9962,3.09,0.54,8.9,4 +6.6,0.28,0.28,8.5,0.052,55,211,0.9962,3.09,0.55,8.9,6 +6.8,0.28,0.4,22,0.048,48,167,1.001,2.93,0.5,8.7,5 +6.8,0.28,0.36,8,0.045,28,123,0.9928,3.02,0.37,11.4,6 +6.6,0.15,0.34,5.1,0.055,34,125,0.9942,3.36,0.42,9.6,5 +6.4,0.29,0.44,3.6,0.2,75,181,0.9942,3.02,0.41,9.1,5 +6.4,0.3,0.45,3.5,0.197,76,180,0.9942,3.02,0.39,9.1,6 +6.4,0.29,0.44,3.6,0.197,75,183,0.9942,3.01,0.38,9.1,5 +6.8,0.26,0.24,7.8,0.052,54,214,0.9961,3.13,0.47,8.9,5 +7.1,0.32,0.24,13.1,0.05,52,204,0.998,3.1,0.49,8.8,5 +6.8,0.26,0.24,7.8,0.052,54,214,0.9961,3.13,0.47,8.9,5 +6.8,0.27,0.26,16.1,0.049,55,196,0.9984,3.15,0.5,9.3,5 +7.1,0.32,0.24,13.1,0.05,52,204,0.998,3.1,0.49,8.8,5 +6.9,0.54,0.32,13.2,0.05,53,236,0.9973,3.2,0.5,9.6,5 +6.8,0.26,0.34,13.9,0.034,39,134,0.9949,3.33,0.53,12,6 +5.8,0.28,0.35,2.3,0.053,36,114,0.9924,3.28,0.5,10.2,4 +6.4,0.21,0.5,11.6,0.042,45,153,0.9972,3.15,0.43,8.8,5 +7,0.16,0.32,8.3,0.045,38,126,0.9958,3.21,0.34,9.2,5 +10.2,0.44,0.88,6.2,0.049,20,124,0.9968,2.99,0.51,9.9,4 +6.8,0.57,0.29,2.2,0.04,15,77,0.9938,3.32,0.74,10.2,5 +6.1,0.4,0.31,0.9,0.048,23,170,0.993,3.22,0.77,9.5,6 +5.6,0.245,0.25,9.7,0.032,12,68,0.994,3.31,0.34,10.5,5 +6.8,0.18,0.38,1.4,0.038,35,111,0.9918,3.32,0.59,11.2,7 +7,0.16,0.32,8.3,0.045,38,126,0.9958,3.21,0.34,9.2,5 +6.7,0.13,0.29,5.3,0.051,31,122,0.9944,3.44,0.37,9.7,6 +6.2,0.25,0.25,1.4,0.03,35,105,0.9912,3.3,0.44,11.1,7 +5.8,0.26,0.24,9.2,0.044,55,152,0.9961,3.31,0.38,9.4,5 +7.5,0.27,0.36,7,0.036,45,164,0.9939,3.03,0.33,11,5 +5.8,0.26,0.24,9.2,0.044,55,152,0.9961,3.31,0.38,9.4,5 +5.7,0.28,0.24,17.5,0.044,60,167,0.9989,3.31,0.44,9.4,5 +7.5,0.23,0.36,7,0.036,43,161,0.9938,3.04,0.32,11,5 +7.5,0.27,0.36,7,0.036,45,164,0.9939,3.03,0.33,11,5 +7.2,0.685,0.21,9.5,0.07,33,172,0.9971,3,0.55,9.1,6 +6.2,0.25,0.25,1.4,0.03,35,105,0.9912,3.3,0.44,11.1,7 +6.5,0.19,0.3,0.8,0.043,33,144,0.9936,3.42,0.39,9.1,6 +6.3,0.495,0.22,1.8,0.046,31,140,0.9929,3.39,0.54,10.4,6 +7.1,0.24,0.41,17.8,0.046,39,145,0.9998,3.32,0.39,8.7,5 +6.4,0.17,0.32,2.4,0.048,41,200,0.9938,3.5,0.5,9.7,6 +7.1,0.25,0.32,10.3,0.041,66,272,0.9969,3.17,0.52,9.1,6 +6.4,0.17,0.32,2.4,0.048,41,200,0.9938,3.5,0.5,9.7,6 +7.1,0.24,0.41,17.8,0.046,39,145,0.9998,3.32,0.39,8.7,5 +6.8,0.64,0.08,9.7,0.062,26,142,0.9972,3.37,0.46,8.9,4 +8.3,0.28,0.4,7.8,0.041,38,194,0.9976,3.34,0.51,9.6,6 +8.2,0.27,0.39,7.8,0.039,49,208,0.9976,3.31,0.51,9.5,6 +7.2,0.23,0.38,14.3,0.058,55,194,0.9979,3.09,0.44,9,6 +7.2,0.23,0.38,14.3,0.058,55,194,0.9979,3.09,0.44,9,6 +7.2,0.23,0.38,14.3,0.058,55,194,0.9979,3.09,0.44,9,6 +7.2,0.23,0.38,14.3,0.058,55,194,0.9979,3.09,0.44,9,6 +6.8,0.52,0.32,13.2,0.044,54,221,0.9972,3.27,0.5,9.6,6 +7,0.26,0.59,1.4,0.037,40,120,0.9918,3.34,0.41,11.1,7 +6.2,0.25,0.21,15.55,0.039,28,159,0.9982,3.48,0.64,9.6,6 +7.3,0.32,0.23,13.7,0.05,49,197,0.9985,3.2,0.46,8.7,5 +7.7,0.31,0.26,7.8,0.031,23,90,0.9944,3.13,0.5,10.4,5 +7.1,0.21,0.37,2.4,0.026,23,100,0.9903,3.15,0.38,11.4,7 +6.8,0.24,0.34,2.7,0.047,64.5,218.5,0.9934,3.3,0.58,9.7,6 +6.9,0.4,0.56,11.2,0.043,40,142,0.9975,3.14,0.46,8.7,5 +6.1,0.18,0.36,2,0.038,20,249.5,0.9923,3.37,0.79,11.3,6 +6.8,0.21,0.27,2.1,0.03,26,139,0.99,3.16,0.61,12.6,7 +5.8,0.2,0.27,1.4,0.031,12,77,0.9905,3.25,0.36,10.9,7 +5.6,0.19,0.26,1.4,0.03,12,76,0.9905,3.25,0.37,10.9,7 +6.1,0.41,0.14,10.4,0.037,18,119,0.996,3.38,0.45,10,5 +5.9,0.21,0.28,4.6,0.053,40,199,0.9964,3.72,0.7,10,4 +8.5,0.26,0.21,16.2,0.074,41,197,0.998,3.02,0.5,9.8,3 +6.9,0.4,0.56,11.2,0.043,40,142,0.9975,3.14,0.46,8.7,5 +5.8,0.24,0.44,3.5,0.029,5,109,0.9913,3.53,0.43,11.7,3 +5.8,0.24,0.39,1.5,0.054,37,158,0.9932,3.21,0.52,9.3,6 +6.7,0.26,0.39,1.1,0.04,45,147,0.9935,3.32,0.58,9.6,8 +6.3,0.35,0.3,5.7,0.035,8,97,0.9927,3.27,0.41,11,7 +6.3,0.35,0.3,5.7,0.035,8,97,0.9927,3.27,0.41,11,7 +6.4,0.23,0.39,1.8,0.032,23,118,0.9912,3.32,0.5,11.8,6 +5.8,0.36,0.38,0.9,0.037,3,75,0.9904,3.28,0.34,11.4,4 +6.9,0.115,0.35,5.4,0.048,36,108,0.9939,3.32,0.42,10.2,6 +6.9,0.29,0.4,19.45,0.043,36,156,0.9996,2.93,0.47,8.9,5 +6.9,0.28,0.4,8.2,0.036,15,95,0.9944,3.17,0.33,10.2,5 +7.2,0.29,0.4,13.6,0.045,66,231,0.9977,3.08,0.59,9.6,6 +6.2,0.24,0.35,1.2,0.038,22,167,0.9912,3.1,0.48,10.6,6 +6.9,0.29,0.4,19.45,0.043,36,156,0.9996,2.93,0.47,8.9,5 +6.9,0.32,0.26,8.3,0.053,32,180,0.9965,3.25,0.51,9.2,6 +5.3,0.58,0.07,6.9,0.043,34,149,0.9944,3.34,0.57,9.7,5 +5.3,0.585,0.07,7.1,0.044,34,145,0.9945,3.34,0.57,9.7,6 +5.4,0.59,0.07,7,0.045,36,147,0.9944,3.34,0.57,9.7,6 +6.9,0.32,0.26,8.3,0.053,32,180,0.9965,3.25,0.51,9.2,6 +5.2,0.6,0.07,7,0.044,33,147,0.9944,3.33,0.58,9.7,5 +5.8,0.25,0.26,13.1,0.051,44,148,0.9972,3.29,0.38,9.3,5 +6.6,0.58,0.3,5.1,0.057,30,123,0.9949,3.24,0.38,9,5 +7,0.29,0.54,10.7,0.046,59,234,0.9966,3.05,0.61,9.5,5 +6.6,0.19,0.41,8.9,0.046,51,169,0.9954,3.14,0.57,9.8,6 +6.7,0.2,0.41,9.1,0.044,50,166,0.9954,3.14,0.58,9.8,6 +7.7,0.26,0.4,1.1,0.042,9,60,0.9915,2.89,0.5,10.6,5 +6.8,0.32,0.34,1.2,0.044,14,67,0.9919,3.05,0.47,10.6,4 +7,0.3,0.49,4.7,0.036,17,105,0.9916,3.26,0.68,12.4,7 +7,0.24,0.36,2.8,0.034,22,112,0.99,3.19,0.38,12.6,8 +6.1,0.31,0.58,5,0.039,36,114,0.9909,3.3,0.6,12.3,8 +6.8,0.44,0.37,5.1,0.047,46,201,0.9938,3.08,0.65,10.5,4 +6.7,0.34,0.3,15.6,0.054,51,196,0.9982,3.19,0.49,9.3,5 +7.1,0.35,0.24,15.4,0.055,46,198,0.9988,3.12,0.49,8.8,5 +7.3,0.32,0.25,7.2,0.056,47,180,0.9961,3.08,0.47,8.8,5 +6.5,0.28,0.33,15.7,0.053,51,190,0.9978,3.22,0.51,9.7,6 +7.2,0.23,0.39,14.2,0.058,49,192,0.9979,2.98,0.48,9,7 +7.2,0.23,0.39,14.2,0.058,49,192,0.9979,2.98,0.48,9,7 +7.2,0.23,0.39,14.2,0.058,49,192,0.9979,2.98,0.48,9,7 +7.2,0.23,0.39,14.2,0.058,49,192,0.9979,2.98,0.48,9,7 +5.9,0.15,0.31,5.8,0.041,53,155,0.9945,3.52,0.46,10.5,6 +7.4,0.28,0.42,19.8,0.066,53,195,1,2.96,0.44,9.1,5 +6.2,0.28,0.22,7.3,0.041,26,157,0.9957,3.44,0.64,9.8,7 +9.1,0.59,0.38,1.6,0.066,34,182,0.9968,3.23,0.38,8.5,3 +6.3,0.33,0.27,1.2,0.046,34,175,0.9934,3.37,0.54,9.4,6 +8.3,0.39,0.7,10.6,0.045,33,169,0.9976,3.09,0.57,9.4,5 +7.2,0.19,0.46,3.8,0.041,82,187,0.9932,3.19,0.6,11.2,7 +7.5,0.17,0.44,11.3,0.046,65,146,0.997,3.17,0.45,10,6 +6.7,0.17,0.5,2.1,0.043,27,122,0.9923,3.15,0.45,10.3,6 +6.1,0.41,0,1.6,0.063,36,87,0.9914,3.27,0.67,10.8,6 +8.3,0.2,0.35,0.9,0.05,12,74,0.992,3.13,0.38,10.5,6 +6.1,0.41,0,1.6,0.063,36,87,0.9914,3.27,0.67,10.8,6 +6,0.29,0.21,1.3,0.055,42,168,0.9914,3.32,0.43,11.1,6 +7.3,0.41,0.24,6.8,0.057,41,163,0.9949,3.2,0.41,9.9,6 +7.3,0.41,0.24,6.8,0.057,41,163,0.9949,3.2,0.41,9.9,6 +7.2,0.43,0.24,6.7,0.058,40,163,0.995,3.2,0.41,9.9,5 +7.3,0.4,0.24,6.7,0.058,41,166,0.995,3.2,0.41,9.9,6 +6.2,0.33,0.27,4.9,0.036,30,134,0.9927,3.2,0.42,10.4,7 +6.2,0.31,0.26,4.8,0.037,36,148,0.9928,3.21,0.41,10.4,6 +6.1,0.36,0.27,2.1,0.035,16,100,0.9917,3.4,0.71,11.5,7 +5,0.55,0.14,8.3,0.032,35,164,0.9918,3.53,0.51,12.5,8 +7.8,0.25,0.41,3.7,0.042,37,149,0.9954,3.36,0.45,10,6 +5.7,0.36,0.21,6.7,0.038,51,166,0.9941,3.29,0.63,10,6 +5.8,0.34,0.21,6.6,0.04,50,167,0.9941,3.29,0.62,10,5 +6.8,0.28,0.6,1.1,0.132,42,127,0.9934,3.09,0.44,9.1,6 +6.8,0.25,0.34,4.7,0.031,34,134,0.9927,3.21,0.38,10.6,6 +6.6,0.24,0.35,7.7,0.031,36,135,0.9938,3.19,0.37,10.5,5 +5.9,0.3,0.47,7.85,0.03,19,133,0.9933,3.52,0.43,11.5,7 +6.1,0.125,0.25,3.3,0.04,10,69,0.9934,3.54,0.59,10.1,6 +6,0.1,0.24,1.1,0.041,15,65,0.9927,3.61,0.61,10.3,7 +6.6,0.24,0.35,7.7,0.031,36,135,0.9938,3.19,0.37,10.5,5 +6.8,0.25,0.34,4.7,0.031,34,134,0.9927,3.21,0.38,10.6,6 +6.8,0.28,0.44,9.3,0.031,35,137,0.9946,3.16,0.36,10.4,6 +8.3,0.41,0.51,2,0.046,11,207,0.993,3.02,0.55,11.4,5 +7.5,0.27,0.31,5.8,0.057,131,313,0.9946,3.18,0.59,10.5,5 +7.9,0.26,0.41,15.15,0.04,38,216,0.9976,2.96,0.6,10,6 +6.4,0.34,0.23,6.3,0.039,37,143,0.9944,3.19,0.65,10,6 +6.5,0.28,0.35,15.4,0.042,55,195,0.9978,3.23,0.5,9.6,6 +7.2,0.21,0.41,1.3,0.036,33,85,0.992,3.17,0.51,10.4,5 +6.4,0.32,0.35,4.8,0.03,34,101,0.9912,3.36,0.6,12.5,8 +6.8,0.24,0.34,4.6,0.032,37,135,0.9927,3.2,0.39,10.6,5 +6.3,0.23,0.3,1.8,0.033,16,91,0.9906,3.28,0.4,11.8,6 +6.5,0.28,0.34,9.9,0.038,30,133,0.9954,3.11,0.44,9.8,5 +5.6,0.26,0.26,5.7,0.031,12,80,0.9923,3.25,0.38,10.8,5 +6.3,0.23,0.3,1.8,0.033,16,91,0.9906,3.28,0.4,11.8,6 +6.3,0.23,0.33,1.5,0.036,15,105,0.991,3.32,0.42,11.2,6 +5.8,0.27,0.27,12.3,0.045,55,170,0.9972,3.28,0.42,9.3,6 +5.9,0.26,0.4,1.3,0.047,12,139,0.9945,3.45,0.53,10.4,5 +6.6,0.18,0.35,1.5,0.049,49,141,0.9934,3.43,0.85,10.2,7 +7.4,0.2,0.43,7.8,0.045,27,153,0.9964,3.19,0.55,9,7 +8,0.24,0.36,1.5,0.047,17,129,0.9948,3.2,0.54,10,6 +6.4,0.26,0.42,9.7,0.044,30,140,0.9962,3.18,0.47,9.1,6 +5.4,0.31,0.47,3,0.053,46,144,0.9931,3.29,0.76,10,5 +5.4,0.29,0.47,3,0.052,47,145,0.993,3.29,0.75,10,6 +7.1,0.145,0.33,4.6,0.05,33,131,0.9942,3.28,0.4,9.6,6 +5.6,0.34,0.1,1.3,0.031,20,68,0.9906,3.36,0.51,11.2,7 +6.7,0.19,0.41,15.6,0.056,75,155,0.9995,3.2,0.44,8.8,6 +7.8,0.18,0.46,13.6,0.052,38,118,0.998,3.15,0.5,10,6 +7.6,0.17,0.45,11.2,0.054,56,137,0.997,3.15,0.47,10,5 +6.3,0.12,0.36,2.1,0.044,47,146,0.9914,3.27,0.74,11.4,7 +7.3,0.33,0.4,6.85,0.038,32,138,0.992,3.03,0.3,11.9,7 +5.5,0.335,0.3,2.5,0.071,27,128,0.9924,3.14,0.51,9.6,6 +7.3,0.33,0.4,6.85,0.038,32,138,0.992,3.03,0.3,11.9,7 +5.8,0.4,0.42,4.4,0.047,38.5,245,0.9937,3.25,0.57,9.6,6 +7.3,0.22,0.37,14.3,0.063,48,191,0.9978,2.89,0.38,9,6 +7.3,0.22,0.37,14.3,0.063,48,191,0.9978,2.89,0.38,9,6 +6.1,0.36,0.33,1.1,0.05,24,169,0.9927,3.15,0.78,9.5,6 +10,0.2,0.39,1.4,0.05,19,152,0.994,3,0.42,10.4,6 +6.9,0.24,0.34,4.7,0.04,43,161,0.9935,3.2,0.59,10.6,6 +6.4,0.24,0.32,14.9,0.047,54,162,0.9968,3.28,0.5,10.2,6 +7.1,0.365,0.14,1.2,0.055,24,84,0.9941,3.15,0.43,8.9,5 +6.8,0.15,0.3,5.3,0.05,40,127,0.9942,3.4,0.39,9.7,6 +7.3,0.22,0.37,14.3,0.063,48,191,0.9978,2.89,0.38,9,6 +6.8,0.16,0.4,2.3,0.037,18,102,0.9923,3.49,0.42,11.4,7 +6,0.26,0.32,3.5,0.028,29,113,0.9912,3.4,0.71,12.3,7 +6,0.18,0.27,1.5,0.089,40,143,0.9923,3.49,0.62,10.8,6 +6.9,0.33,0.21,1,0.053,39,148,0.9927,3.12,0.45,9.4,6 +7.7,0.29,0.48,2.3,0.049,36,178,0.9931,3.17,0.64,10.6,6 +7.1,0.39,0.35,12.5,0.044,26,72,0.9941,3.17,0.29,11.6,5 +6.9,0.33,0.21,1,0.053,39,148,0.9927,3.12,0.45,9.4,6 +7.7,0.29,0.48,2.3,0.049,36,178,0.9931,3.17,0.64,10.6,6 +6.6,0.905,0.19,0.8,0.048,17,204,0.9934,3.34,0.56,10,5 +7.2,0.27,0.27,2.4,0.048,30,149,0.9936,3.1,0.51,9.2,6 +5.1,0.33,0.22,1.6,0.027,18,89,0.9893,3.51,0.38,12.5,7 +5.1,0.33,0.22,1.6,0.027,18,89,0.9893,3.51,0.38,12.5,7 +6.4,0.31,0.28,1.5,0.037,12,119,0.9919,3.32,0.51,10.4,7 +7.3,0.2,0.44,1.4,0.045,21,98,0.9924,3.15,0.46,10,7 +5.7,0.32,0.5,2.6,0.049,17,155,0.9927,3.22,0.64,10,6 +6.4,0.31,0.28,1.5,0.037,12,119,0.9919,3.32,0.51,10.4,7 +7.3,0.2,0.44,1.4,0.045,21,98,0.9924,3.15,0.46,10,7 +7.2,0.28,0.26,12.5,0.046,48,179,0.9975,3.1,0.52,9,6 +7.5,0.35,0.28,9.6,0.051,26,157,0.9969,3.12,0.53,9.2,6 +7.2,0.27,0.27,2.4,0.048,30,149,0.9936,3.1,0.51,9.2,6 +6,0.36,0.39,3.2,0.027,20,125,0.991,3.38,0.39,11.3,7 +5.1,0.33,0.22,1.6,0.027,18,89,0.9893,3.51,0.38,12.5,7 +5,0.17,0.56,1.5,0.026,24,115,0.9906,3.48,0.39,10.8,7 +6.3,0.39,0.35,5.9,0.04,82.5,260,0.9941,3.12,0.66,10.1,5 +6.7,0.21,0.32,5.4,0.047,29,140,0.995,3.39,0.46,9.7,6 +7,0.3,0.38,14.9,0.032,60,181,0.9983,3.18,0.61,9.3,7 +7,0.3,0.38,14.9,0.032,60,181,0.9983,3.18,0.61,9.3,7 +6.5,0.36,0.32,1.1,0.031,13,66,0.9916,3.1,0.46,10.6,5 +6.1,0.55,0.15,9.8,0.031,19,125,0.9957,3.36,0.47,10.2,6 +7.3,0.24,0.43,2,0.021,20,69,0.99,3.08,0.56,12.2,6 +6.8,0.37,0.51,11.8,0.044,62,163,0.9976,3.19,0.44,8.8,5 +6.8,0.27,0.12,1.3,0.04,87,168,0.992,3.18,0.41,10,5 +8.2,0.28,0.42,1.8,0.031,30,93,0.9917,3.09,0.39,11.4,5 +6.3,0.2,0.4,1.5,0.037,35,107,0.9917,3.46,0.5,11.4,6 +5.9,0.26,0.27,18.2,0.048,52,168,0.9993,3.35,0.44,9.4,5 +6.4,0.19,0.42,2.9,0.032,32,83,0.9908,3.3,0.41,11.7,6 +6.3,0.2,0.4,1.5,0.037,35,107,0.9917,3.46,0.5,11.4,6 +6.8,0.37,0.51,11.8,0.044,62,163,0.9976,3.19,0.44,8.8,5 +6.1,0.35,0.07,1.4,0.069,22,108,0.9934,3.23,0.52,9.2,5 +7.1,0.27,0.31,18.2,0.046,55,252,1,3.07,0.56,8.7,5 +6.8,0.22,0.31,6.3,0.035,33,170,0.9918,3.24,0.66,12.6,6 +6.8,0.27,0.12,1.3,0.04,87,168,0.992,3.18,0.41,10,5 +5.8,0.28,0.34,4,0.031,40,99,0.9896,3.39,0.39,12.8,7 +6.9,0.49,0.24,1.2,0.049,13,125,0.9932,3.17,0.51,9.4,5 +6.3,0.14,0.39,1.2,0.044,26,116,0.992,3.26,0.53,10.3,6 +8.2,0.28,0.42,1.8,0.031,30,93,0.9917,3.09,0.39,11.4,5 +7.2,0.25,0.39,18.95,0.038,42,155,0.9999,2.97,0.47,9,6 +7.3,0.28,0.36,12.7,0.04,38,140,0.998,3.3,0.79,9.6,6 +7.2,0.19,0.39,1.2,0.036,32,85,0.9918,3.16,0.5,10.5,5 +7.2,0.19,0.39,1.2,0.036,32,85,0.9918,3.16,0.5,10.5,5 +7.2,0.25,0.39,18.95,0.038,42,155,0.9999,2.97,0.47,9,6 +7.3,0.28,0.36,12.7,0.04,38,140,0.998,3.3,0.79,9.6,6 +7.4,0.21,0.27,1.2,0.041,27,99,0.9927,3.19,0.33,9.8,6 +6.8,0.26,0.22,7.7,0.047,57,210,0.9959,3.1,0.47,9,5 +7.4,0.21,0.27,1.2,0.041,27,99,0.9927,3.19,0.33,9.8,6 +7.4,0.31,0.28,1.6,0.05,33,137,0.9929,3.31,0.56,10.5,6 +7,0.22,0.31,2.7,0.03,41,136,0.9898,3.16,0.37,12.7,7 +7,0.21,0.28,8.7,0.045,37,222,0.9954,3.25,0.54,10.4,6 +7,0.21,0.28,8.6,0.045,37,221,0.9954,3.25,0.54,10.4,6 +7,0.21,0.28,8.6,0.045,37,221,0.9954,3.25,0.54,10.4,6 +6.9,0.23,0.38,8.3,0.047,47,162,0.9954,3.34,0.52,10.5,7 +7,0.21,0.28,8.7,0.045,37,222,0.9954,3.25,0.54,10.4,6 +7,0.21,0.28,8.6,0.045,37,221,0.9954,3.25,0.54,10.4,6 +6.8,0.29,0.5,13.3,0.053,48,194,0.9974,3.09,0.45,9.4,5 +7.8,0.21,0.27,1.2,0.051,20,89,0.9936,3.06,0.46,9.1,5 +7.1,0.31,0.47,13.6,0.056,54,197,0.9978,3.1,0.49,9.3,5 +6.8,0.29,0.5,13.3,0.053,48,194,0.9974,3.09,0.45,9.4,5 +6.4,0.34,0.1,1.1,0.048,19,84,0.9927,3.21,0.38,9.8,5 +7.4,0.155,0.34,2.3,0.045,73.5,214,0.9934,3.18,0.61,9.9,7 +7.2,0.55,0.09,1.5,0.108,16,151,0.9938,3.07,0.57,9.2,4 +7,0.23,0.36,7.1,0.028,31,104,0.9922,3.35,0.47,12.1,8 +6.9,0.2,0.37,6.2,0.027,24,97,0.992,3.38,0.49,12.2,7 +6.1,0.28,0.32,2.5,0.042,23,218.5,0.9935,3.27,0.6,9.8,5 +6.6,0.16,0.32,1.4,0.035,49,186,0.9906,3.35,0.64,12.4,8 +7.4,0.155,0.34,2.3,0.045,73.5,214,0.9934,3.18,0.61,9.9,7 +6.2,0.35,0.04,1.2,0.06,23,108,0.9934,3.26,0.54,9.2,5 +6.7,0.22,0.37,1.6,0.028,24,102,0.9913,3.29,0.59,11.6,7 +6.1,0.38,0.2,6.6,0.033,25,137,0.9938,3.3,0.69,10.4,6 +6,0.25,0.28,2.2,0.026,54,126,0.9898,3.43,0.65,12.9,8 +6.6,0.52,0.44,12.2,0.048,54,245,0.9975,3.26,0.54,9.3,6 +6.9,0.24,0.36,20.8,0.031,40,139,0.9975,3.2,0.33,11,6 +7.1,0.32,0.32,11,0.038,16,66,0.9937,3.24,0.4,11.5,3 +5.8,0.28,0.27,2.6,0.054,30,156,0.9914,3.53,0.42,12.4,5 +6.5,0.41,0.24,14,0.048,24,113,0.9982,3.44,0.53,9.8,6 +6.5,0.41,0.24,14,0.048,24,113,0.9982,3.44,0.53,9.8,6 +6.4,0.28,0.29,1.6,0.052,34,127,0.9929,3.48,0.56,10.5,7 +7.2,0.6,0.2,9.9,0.07,21,174,0.9971,3.03,0.54,9.1,5 +6.1,0.2,0.25,1.2,0.038,34,128,0.9921,3.24,0.44,10.1,5 +5.9,0.46,0.14,2.7,0.042,27,160,0.9931,3.46,0.51,10.6,7 +6,0.27,0.27,1.6,0.046,32,113,0.9924,3.41,0.51,10.5,7 +6.4,0.28,0.29,1.6,0.052,34,127,0.9929,3.48,0.56,10.5,7 +6.4,0.41,0.24,14,0.048,24,113,0.9982,3.44,0.53,9.8,6 +6.3,0.23,0.31,1.5,0.022,11,82,0.9892,3.3,0.4,12.9,7 +7.1,0.21,0.27,8.6,0.056,26,111,0.9956,2.95,0.52,9.5,5 +6,0.37,0.32,1,0.053,31,218.5,0.9924,3.29,0.72,9.8,6 +6.1,0.43,0.35,9.1,0.059,83,249,0.9971,3.37,0.5,8.5,5 +7.1,0.21,0.27,8.6,0.056,26,111,0.9956,2.95,0.52,9.5,5 +7,0.25,0.29,15.2,0.047,40,171,0.9982,3.22,0.45,9.3,5 +5.9,0.25,0.19,12.4,0.047,50,162,0.9973,3.35,0.38,9.5,5 +6.8,0.32,0.21,2.2,0.044,15,68,0.9932,3.17,0.39,9.4,6 +7.2,0.39,0.62,11,0.047,66,178,0.9976,3.16,0.5,8.7,5 +6.3,0.21,0.58,10,0.081,34,126,0.9962,2.95,0.46,8.9,5 +7,0.14,0.32,9,0.039,54,141,0.9956,3.22,0.43,9.4,6 +6.8,0.32,0.21,2.2,0.044,15,68,0.9932,3.17,0.39,9.4,6 +7.2,0.39,0.62,11,0.047,66,178,0.9976,3.16,0.5,8.7,5 +7.2,0.29,0.53,18.15,0.047,59,182,0.9992,3.09,0.52,9.6,5 +8.6,0.37,0.7,12.15,0.039,21,158,0.9983,3,0.73,9.3,6 +6.5,0.38,0.34,3.4,0.036,34,200,0.9937,3.14,0.76,10,5 +6.6,0.24,0.29,2,0.023,19,86,0.99,3.25,0.45,12.5,6 +7,0.17,0.31,4.8,0.034,34,132,0.9944,3.36,0.48,9.6,7 +5.5,0.16,0.22,4.5,0.03,30,102,0.9938,3.24,0.36,9.4,6 +7,0.24,0.51,11,0.029,55,227,0.9965,3.03,0.61,9.5,5 +7.4,0.28,0.36,1.1,0.028,42,105,0.9893,2.99,0.39,12.4,7 +7,0.22,0.28,1.5,0.037,29,115,0.9927,3.11,0.55,10.5,6 +7.1,0.55,0.13,1.7,0.073,21,165,0.994,2.97,0.58,9.2,6 +6.3,0.22,0.33,1.7,0.041,67,164,0.9928,3.32,0.56,10.4,6 +6.7,0.47,0.34,8.9,0.043,31,172,0.9964,3.22,0.6,9.2,5 +5.9,0.36,0.41,1.3,0.047,45,104,0.9917,3.33,0.51,10.6,6 +5.8,0.25,0.24,13.3,0.044,41,137,0.9972,3.34,0.42,9.5,5 +6.7,0.47,0.34,8.9,0.043,31,172,0.9964,3.22,0.6,9.2,5 +6.2,0.37,0.3,6.6,0.346,79,200,0.9954,3.29,0.58,9.6,5 +6.2,0.18,0.38,1.5,0.028,36,117,0.993,3.47,0.54,9.7,6 +6,0.16,0.37,1.5,0.025,43,117,0.9928,3.46,0.51,9.7,6 +6.6,0.34,0.28,1.3,0.035,32,90,0.9916,3.1,0.42,10.7,6 +7.4,0.29,0.29,1.6,0.045,53,180,0.9936,3.34,0.68,10.5,6 +7.4,0.26,0.31,7.6,0.047,52,177,0.9962,3.13,0.45,8.9,6 +7,0.28,0.36,1,0.035,8,70,0.9899,3.09,0.46,12.1,6 +7.1,0.23,0.39,1.6,0.032,12,65,0.9898,3.25,0.4,12.7,7 +7.8,0.19,0.26,8.9,0.039,42,182,0.996,3.18,0.46,9.9,6 +6.3,0.19,0.28,1.8,0.022,28,158,0.9907,3.2,0.64,11.4,6 +6.8,0.2,0.38,4.7,0.04,27,103,0.994,3.37,0.58,10.7,6 +5.7,0.44,0.13,7,0.025,28,173,0.9913,3.33,0.48,12.5,6 +7.2,0.4,0.62,10.8,0.041,70,189,0.9976,3.08,0.49,8.6,4 +6.8,0.23,0.32,1.6,0.026,43,147,0.9904,3.29,0.54,12.5,6 +5.7,0.335,0.34,1,0.04,13,174,0.992,3.27,0.66,10,5 +7.2,0.4,0.62,10.8,0.041,70,189,0.9976,3.08,0.49,8.6,4 +7.2,0.28,0.54,16.7,0.045,54,200,0.999,3.08,0.49,9.5,6 +6.8,0.19,0.58,14.2,0.038,51,164,0.9975,3.12,0.48,9.6,6 +6.4,0.3,0.3,2.25,0.038,8,210,0.9937,3.2,0.62,9.9,6 +6.5,0.3,0.29,2.25,0.037,8,210,0.9937,3.19,0.62,9.9,5 +7.8,0.18,0.31,12.2,0.053,46,140,0.998,3.06,0.53,8.9,6 +7.8,0.18,0.31,12.2,0.053,46,140,0.998,3.06,0.53,8.9,6 +7.3,0.51,0.26,3.3,0.09,7,135,0.9944,3.01,0.52,8.8,5 +6,0.24,0.27,1.9,0.048,40,170,0.9938,3.64,0.54,10,7 +5.9,0.62,0.28,3.5,0.039,55,152,0.9907,3.44,0.44,12,6 +6,0.24,0.27,1.9,0.048,40,170,0.9938,3.64,0.54,10,7 +6.7,0.27,0.12,1.3,0.041,62,138,0.9921,3.21,0.42,10,6 +7.8,0.34,0.35,1.8,0.042,8,167,0.9908,3.11,0.41,12.1,6 +7.3,0.26,0.36,5.2,0.04,31,141,0.9931,3.16,0.59,11,6 +7.4,0.36,0.33,1.4,0.025,27,55,0.9915,3.21,0.33,11.2,6 +7.8,0.28,0.32,9,0.036,34,115,0.9952,3.17,0.39,10.3,7 +6.1,0.31,0.26,2.2,0.051,28,167,0.9926,3.37,0.47,10.4,6 +6.8,0.18,0.37,1.6,0.055,47,154,0.9934,3.08,0.45,9.1,5 +7.4,0.15,0.42,1.7,0.045,49,154,0.992,3,0.6,10.4,6 +5.9,0.13,0.28,1.9,0.05,20,78,0.9918,3.43,0.64,10.8,6 +7.2,0.34,0.34,12.6,0.048,7,41,0.9942,3.19,0.4,11.7,5 +7.9,0.19,0.26,2.1,0.039,8,143,0.9942,3.05,0.74,9.8,5 +7.9,0.19,0.26,2.1,0.039,8,143,0.9942,3.05,0.74,9.8,5 +6.9,0.25,0.4,1.3,0.038,22,101,0.9901,3.03,0.39,11.4,6 +5.8,0.36,0.32,1.7,0.033,22,96,0.9898,3.03,0.38,11.2,6 +5.6,0.35,0.37,1,0.038,6,72,0.9902,3.37,0.34,11.4,5 +5.9,0.32,0.39,3.3,0.114,24,140,0.9934,3.09,0.45,9.2,6 +7.2,0.31,0.46,5,0.04,3,29,0.9906,3.04,0.53,12.5,4 +6.1,0.28,0.22,1.8,0.034,32,116,0.9898,3.36,0.44,12.6,6 +5.2,0.36,0.02,1.6,0.031,24,104,0.9896,3.44,0.35,12.2,6 +5.6,0.19,0.47,4.5,0.03,19,112,0.9922,3.56,0.45,11.2,6 +6.4,0.1,0.35,4.9,0.048,31,103,0.9947,3.43,0.79,9.7,6 +6.4,0.18,0.48,4,0.186,64,150,0.9945,3.06,0.4,9.3,5 +7.4,0.25,0.36,13.2,0.067,53,178,0.9976,3.01,0.48,9,6 +7.4,0.25,0.36,13.2,0.067,53,178,0.9976,3.01,0.48,9,6 +7.4,0.25,0.36,13.2,0.067,53,178,0.9976,3.01,0.48,9,6 +7.9,0.345,0.51,15.3,0.047,54,171,0.9987,3.09,0.51,9.1,5 +7.9,0.345,0.51,15.3,0.047,54,171,0.9987,3.09,0.51,9.1,5 +7.4,0.25,0.36,13.2,0.067,53,178,0.9976,3.01,0.48,9,6 +6.1,0.24,0.3,1.5,0.045,22,61,0.992,3.31,0.54,10.4,5 +6.8,0.25,0.24,4.55,0.053,41,211,0.9955,3.37,0.67,9.5,6 +6.7,0.31,0.31,9.9,0.04,10,175,0.9953,3.46,0.55,11.4,4 +7.2,0.46,0.65,10.4,0.05,76,192,0.9976,3.16,0.42,8.7,5 +5.5,0.35,0.35,1.1,0.045,14,167,0.992,3.34,0.68,9.9,6 +6.7,0.24,0.41,8.7,0.036,29,148,0.9952,3.22,0.62,9.9,6 +6.8,0.28,0.17,13.9,0.047,49,162,0.9983,3.21,0.51,9,6 +6.4,0.16,0.22,1.4,0.04,41,149,0.9933,3.49,0.58,10,6 +6.3,0.26,0.24,7.2,0.039,38,172,0.9958,3.49,0.64,9.7,6 +7.7,0.22,0.42,1.9,0.052,10,87,0.9922,3.3,0.49,11.8,6 +6.5,0.18,0.31,1.7,0.044,30,127,0.9928,3.49,0.5,10.2,7 +7.2,0.46,0.65,10.4,0.05,76,192,0.9976,3.16,0.42,8.7,5 +7,0.3,0.51,13.6,0.05,40,168,0.9976,3.07,0.52,9.6,7 +9.2,0.25,0.34,1.2,0.026,31,93,0.9916,2.93,0.37,11.3,7 +7.8,0.28,0.34,1.6,0.028,32,118,0.9901,3,0.38,12.1,7 +7,0.3,0.51,13.6,0.05,40,168,0.9976,3.07,0.52,9.6,7 +7.8,0.28,0.34,1.6,0.028,32,118,0.9901,3,0.38,12.1,7 +9.2,0.25,0.34,1.2,0.026,31,93,0.9916,2.93,0.37,11.3,7 +8.4,0.35,0.71,12.2,0.046,22,160,0.9982,2.98,0.65,9.4,5 +6.1,0.41,0.24,1.6,0.049,16,137,0.993,3.32,0.5,10.4,6 +5.9,0.21,0.24,12.1,0.044,53,165,0.9969,3.25,0.39,9.5,5 +7.2,0.34,0.44,4.2,0.047,51,144,0.991,3.01,0.76,12.3,6 +6.7,0.21,0.42,9.1,0.049,31,150,0.9953,3.12,0.74,9.9,7 +5.9,0.37,0.1,1.6,0.057,39,128,0.9924,3.24,0.48,10.1,5 +7.7,0.34,0.27,8.8,0.063,39,184,0.9969,3.09,0.63,9.2,6 +7.4,0.3,0.22,1.4,0.046,16,135,0.9928,3.08,0.77,10.4,7 +6.8,0.51,0.3,4.2,0.066,38,165,0.9945,3.2,0.42,9.1,5 +7.8,0.22,0.38,10.3,0.059,28,99,0.9967,3.12,0.47,10,6 +7.2,0.35,0.34,12.4,0.051,6,37,0.9944,3.13,0.39,11.5,6 +6,0.26,0.5,2.2,0.048,59,153,0.9928,3.08,0.61,9.8,5 +6.1,0.26,0.51,2.2,0.05,61,154,0.9929,3.08,0.6,9.8,6 +6.5,0.28,0.27,5.2,0.04,44,179,0.9948,3.19,0.69,9.4,6 +7.4,0.41,0.66,10.8,0.051,77,194,0.9976,3.05,0.46,8.7,5 +6.5,0.28,0.29,2.7,0.038,26,107,0.9912,3.32,0.41,11.6,7 +6.7,0.34,0.54,16.3,0.047,44,181,0.9987,3.04,0.56,8.8,5 +7.2,0.2,0.34,2.7,0.032,49,151,0.99,3.16,0.39,12.7,7 +7.4,0.2,0.33,1.9,0.035,39,138,0.991,3.17,0.44,11.7,7 +8.2,0.22,0.3,1.8,0.047,47,185,0.9933,3.13,0.5,10.2,6 +8.2,0.23,0.29,1.8,0.047,47,187,0.9933,3.13,0.5,10.2,6 +7.1,0.22,0.33,2.8,0.033,48,153,0.9899,3.15,0.38,12.7,7 +6.5,0.28,0.29,2.7,0.038,26,107,0.9912,3.32,0.41,11.6,7 +6,0.38,0.26,6,0.034,42,134,0.9912,3.38,0.38,12.3,7 +7.4,0.41,0.66,10.8,0.051,77,194,0.9976,3.05,0.46,8.7,5 +5.7,0.18,0.22,4.2,0.042,25,111,0.994,3.35,0.39,9.4,5 +7.3,0.3,0.22,6.4,0.056,44,168,0.9947,3.13,0.35,10.1,6 +7.4,0.24,0.22,10.7,0.042,26,81,0.9954,2.86,0.36,9.7,6 +6.6,0.25,0.3,1.6,0.046,32,134,0.993,3.42,0.51,10.1,7 +7.4,0.24,0.22,10.7,0.042,26,81,0.9954,2.86,0.36,9.7,6 +7.4,0.26,0.3,7.9,0.049,38,157,0.9963,3.13,0.48,8.9,6 +6.1,0.32,0.25,1.7,0.034,37,136,0.992,3.47,0.5,10.8,7 +6.9,0.28,0.27,2.1,0.036,42,121,0.9926,3.42,0.49,10.8,7 +7,0.23,0.33,5.8,0.04,25,136,0.995,3.19,0.58,9.5,6 +7.1,0.31,0.5,14.5,0.059,6,148,0.9983,2.94,0.44,9.1,5 +7.3,0.2,0.37,1.2,0.037,48,119,0.992,3.32,0.49,10.9,6 +6.9,0.41,0.33,10.1,0.043,28,152,0.9968,3.2,0.52,9.4,5 +6.4,0.45,0.07,1.1,0.03,10,131,0.9905,2.97,0.28,10.8,5 +6.4,0.475,0.06,1,0.03,9,131,0.9904,2.97,0.29,10.8,5 +6.3,0.27,0.38,0.9,0.051,7,140,0.9926,3.45,0.5,10.5,7 +6.9,0.41,0.33,10.1,0.043,28,152,0.9968,3.2,0.52,9.4,5 +7,0.29,0.37,4.9,0.034,26,127,0.9928,3.17,0.44,10.8,6 +5.9,0.27,0.29,11.4,0.036,31,115,0.9949,3.35,0.48,10.5,8 +6.9,0.19,0.4,1.4,0.036,14,55,0.9909,3.08,0.68,11.5,7 +6.7,0.3,0.35,1.4,0.18,36,160,0.9937,3.11,0.54,9.4,6 +7.2,0.24,0.4,1.4,0.045,31,106,0.9914,2.88,0.38,10.8,6 +6.4,0.45,0.07,1.1,0.03,10,131,0.9905,2.97,0.28,10.8,5 +6.4,0.475,0.06,1,0.03,9,131,0.9904,2.97,0.29,10.8,5 +6.3,0.26,0.49,1.5,0.052,34,134,0.9924,2.99,0.61,9.8,6 +6.3,0.26,0.49,1.5,0.052,34,134,0.9924,2.99,0.61,9.8,6 +7.3,0.25,0.29,7.5,0.049,38,158,0.9965,3.43,0.38,9.6,5 +7.3,0.25,0.29,7.5,0.049,38,158,0.9965,3.43,0.38,9.6,5 +6.1,0.28,0.25,17.75,0.044,48,161,0.9993,3.34,0.48,9.5,5 +7.4,0.37,0.35,5.7,0.061,12,94,0.9965,3.48,0.69,10.7,6 +6.5,0.36,0.28,3.2,0.037,29,119,0.9908,3.25,0.65,12.4,8 +7.4,0.24,0.4,4.3,0.032,9,95,0.992,3.09,0.39,11.1,6 +7.5,0.23,0.68,11,0.047,37,133,0.9978,2.99,0.38,8.8,5 +7.5,0.21,0.68,10.9,0.045,38,133,0.9978,3,0.36,8.7,5 +7.5,0.21,0.68,10.9,0.045,38,133,0.9978,3,0.36,8.7,5 +7.5,0.23,0.68,11,0.047,37,133,0.9978,2.99,0.38,8.8,5 +7.8,0.32,0.33,2.4,0.037,18,101,0.9912,3.21,0.65,11.7,7 +7.8,0.26,0.27,1.9,0.051,52,195,0.9928,3.23,0.5,10.9,6 +7.7,0.24,0.27,1.8,0.051,52,190,0.9928,3.23,0.5,10.8,6 +7.4,0.19,0.3,1.4,0.057,33,135,0.993,3.12,0.5,9.6,6 +6.5,0.46,0.41,16.8,0.084,59,222,0.9993,3.18,0.58,9,5 +6.5,0.26,0.43,8.9,0.083,50,171,0.9965,2.85,0.5,9,5 +5.3,0.32,0.12,6.6,0.043,22,141,0.9937,3.36,0.6,10.4,6 +7.2,0.24,0.34,1.1,0.045,3,64,0.9913,3.23,0.51,11.4,5 +6,0.36,0.06,1.4,0.066,27,128,0.9934,3.26,0.55,9.3,5 +6.2,0.24,0.29,13.3,0.039,49,130,0.9952,3.33,0.46,11,8 +7.6,0.56,0.12,10.4,0.096,22,177,0.9983,3.32,0.45,9.1,4 +7,0.32,0.24,6.2,0.048,31,228,0.9957,3.23,0.62,9.4,6 +7,0.32,0.24,6.2,0.048,31,228,0.9957,3.23,0.62,9.4,6 +5.8,0.31,0.33,1.2,0.036,23,99,0.9916,3.18,0.6,10.5,6 +7,0.23,0.42,18.05,0.05,35,144,0.9999,3.22,0.42,8.8,5 +7,0.23,0.42,18.05,0.05,35,144,0.9999,3.22,0.42,8.8,5 +6.9,0.24,0.33,4.8,0.04,16,131,0.9936,3.26,0.64,10.7,6 +6,0.29,0.2,12.6,0.045,45,187,0.9972,3.33,0.42,9.5,5 +6.1,0.17,0.28,4.5,0.033,46,150,0.9933,3.43,0.49,10.9,6 +5.9,0.14,0.25,4.5,0.027,34,140,0.9934,3.49,0.51,10.8,6 +6.2,0.17,0.28,4.7,0.037,39,133,0.9931,3.41,0.46,10.8,7 +7.4,0.28,0.25,11.9,0.053,25,148,0.9976,3.1,0.62,9.2,5 +5.6,0.35,0.14,5,0.046,48,198,0.9937,3.3,0.71,10.3,5 +5.8,0.335,0.14,5.8,0.046,49,197,0.9937,3.3,0.71,10.3,5 +5.6,0.235,0.29,1.2,0.047,33,127,0.991,3.34,0.5,11,7 +6.1,0.28,0.25,12.9,0.054,34,189,0.9979,3.25,0.43,9,4 +6.3,0.21,0.33,13.9,0.046,68,179,0.9971,3.36,0.5,10.4,6 +6.4,0.24,0.28,11.5,0.05,34,163,0.9969,3.31,0.45,9.5,5 +6.4,0.24,0.29,11.4,0.051,32,166,0.9968,3.31,0.45,9.5,5 +6.3,0.26,0.25,7.8,0.058,44,166,0.9961,3.24,0.41,9,5 +6.5,0.33,0.72,1.1,0.061,7,151,0.993,3.09,0.57,9.5,4 +7.4,0.105,0.34,12.2,0.05,57,146,0.9973,3.16,0.37,9,6 +6,0.32,0.12,5.9,0.041,34,190,0.9944,3.16,0.72,10,5 +7.1,0.26,0.34,14.4,0.067,35,189,0.9986,3.07,0.53,9.1,7 +7.1,0.26,0.34,14.4,0.067,35,189,0.9986,3.07,0.53,9.1,7 +7.1,0.26,0.34,14.4,0.067,35,189,0.9986,3.07,0.53,9.1,7 +7.1,0.26,0.34,14.4,0.067,35,189,0.9986,3.07,0.53,9.1,7 +5.9,0.24,0.26,12.3,0.053,34,134,0.9972,3.34,0.45,9.5,6 +6.5,0.21,0.37,2.5,0.048,70,138,0.9917,3.33,0.75,11.4,7 +7.7,0.27,0.35,5.3,0.03,30,117,0.992,3.11,0.42,12.2,6 +9,0.27,0.35,4.9,0.028,27,95,0.9932,3.04,0.4,11.3,6 +7.3,0.34,0.21,3.2,0.05,14,136,0.9936,3.25,0.44,10.2,5 +6.6,0.27,0.25,3.1,0.052,41,188,0.9915,3.24,0.4,11.3,5 +6.8,0.29,0.16,1.4,0.038,122.5,234.5,0.9922,3.15,0.47,10,4 +7.1,0.28,0.26,1.9,0.049,12,86,0.9934,3.15,0.38,9.4,5 +6.8,0.25,0.34,14,0.032,47,133,0.9952,3.37,0.5,12.2,7 +7,0.57,0.1,8.3,0.094,23,188,0.9972,3.4,0.47,9.2,4 +7.1,0.28,0.26,1.9,0.049,12,86,0.9934,3.15,0.38,9.4,5 +7.1,0.17,0.38,7.4,0.052,49,182,0.9958,3.35,0.52,9.6,6 +7.8,0.28,0.22,1.4,0.056,24,130,0.9944,3.28,0.48,9.5,5 +6.8,0.22,0.37,1.7,0.036,38,195,0.9908,3.35,0.72,12.5,6 +7.1,0.17,0.38,7.4,0.052,49,182,0.9958,3.35,0.52,9.6,6 +6.1,0.14,0.25,1.3,0.047,37,173,0.9925,3.35,0.46,10,6 +6.4,0.24,0.5,11.6,0.047,60,211,0.9966,3.18,0.57,9.3,5 +7.8,0.42,0.26,9.2,0.058,34,199,0.9972,3.14,0.55,9.3,6 +6.6,0.28,0.36,1.7,0.038,22,101,0.9912,3.29,0.57,11.6,6 +7.1,0.32,0.34,14.5,0.039,46,150,0.995,3.38,0.5,12.5,8 +6.7,0.31,0.3,2.1,0.038,18,130,0.9928,3.36,0.63,10.6,6 +6.4,0.32,0.5,10.7,0.047,57,206,0.9968,3.08,0.6,9.4,5 +6.1,0.28,0.25,6.9,0.056,44,201,0.9955,3.19,0.4,9.1,6 +5.9,0.29,0.25,12,0.057,48,224,0.9981,3.23,0.41,9,6 +5.8,0.32,0.38,4.75,0.033,23,94,0.991,3.42,0.42,11.8,7 +5.8,0.32,0.38,4.75,0.033,23,94,0.991,3.42,0.42,11.8,7 +5.7,0.32,0.38,4.75,0.033,23,94,0.991,3.42,0.42,11.8,7 +6.7,0.28,0.14,1.4,0.043,64,159,0.992,3.17,0.39,10,5 +6.8,0.34,0.69,1.3,0.058,12,171,0.9931,3.06,0.47,9.7,5 +5.9,0.25,0.25,11.3,0.052,30,165,0.997,3.24,0.44,9.5,6 +6.4,0.27,0.32,4.5,0.24,61,174,0.9948,3.12,0.48,9.4,5 +8.1,0.46,0.31,1.7,0.052,50,183,0.9923,3.03,0.42,11.2,5 +6.2,0.36,0.26,13.2,0.051,54,201,0.9976,3.25,0.46,9,5 +6.8,0.22,0.35,5.5,0.043,21,114,0.9938,3.3,0.53,10.7,7 +6.8,0.67,0.3,13,0.29,22,193,0.9984,3.08,0.67,9,4 +7.2,0.28,0.3,10.7,0.044,61,222,0.9972,3.14,0.5,9.1,6 +6.7,0.17,0.37,2,0.039,34,125,0.9922,3.26,0.6,10.8,7 +6.9,0.2,0.34,1.9,0.043,25,136,0.9935,3.31,0.6,10.1,4 +6.1,0.36,0.16,6.4,0.037,36,198,0.9944,3.17,0.62,9.9,6 +6,0.36,0.16,6.3,0.036,36,191,0.9942,3.17,0.62,9.8,5 +5.9,0.37,0.14,6.3,0.036,34,185,0.9944,3.17,0.63,9.8,5 +7.6,0.29,0.58,17.5,0.041,51,225,0.9997,3.16,0.66,9.5,6 +6.3,0.34,0.28,14.7,0.047,49,198,0.9977,3.23,0.46,9.5,5 +6.7,0.19,0.34,1,0.022,22,94,0.9912,3.23,0.57,11.1,6 +7.5,0.31,0.51,14.8,0.039,62,204,0.9982,3.06,0.6,9.5,5 +7.5,0.31,0.51,14.8,0.039,62,204,0.9982,3.06,0.6,9.5,5 +7.4,0.31,0.48,14.2,0.042,62,204,0.9983,3.06,0.59,9.4,5 +8.4,0.4,0.7,13.1,0.042,29,197,0.998,3.06,0.64,9.7,5 +5.9,0.34,0.22,2.4,0.03,19,135,0.9894,3.41,0.78,13.9,7 +6.6,0.38,0.18,1.2,0.042,20,84,0.9927,3.22,0.45,10.1,4 +6.4,0.33,0.28,1.1,0.038,30,110,0.9917,3.12,0.42,10.5,6 +5.6,0.25,0.26,3.6,0.037,18,115,0.9904,3.42,0.5,12.6,6 +8.6,0.27,0.46,6.1,0.032,13,41,0.993,2.89,0.34,10.9,5 +6.2,0.31,0.21,6.3,0.041,50,218,0.9941,3.15,0.6,10,5 +7.2,0.18,0.45,4.4,0.046,57,166,0.9943,3.13,0.62,11.2,6 +7.7,0.2,0.44,13.9,0.05,44,130,0.99855,3.11,0.48,10,6 +6.2,0.47,0.21,1,0.044,13,98,0.99345,3.14,0.46,9.2,5 +6.1,0.25,0.24,12.1,0.046,51,172,0.998,3.35,0.45,9.5,5 +8.2,0.27,0.43,1.6,0.035,31,128,0.9916,3.1,0.5,12.3,6 +8.2,0.27,0.43,1.6,0.035,31,128,0.9916,3.1,0.5,12.3,6 +6.4,0.31,0.39,7.5,0.04,57,213,0.99475,3.32,0.43,10,5 +6,0.39,0.26,2.7,0.038,39,187,0.99325,3.41,0.5,10.8,6 +6.2,0.21,0.27,1.7,0.038,41,150,0.9933,3.49,0.71,10.5,7 +7.7,0.42,0.31,9.2,0.048,22,221,0.9969,3.06,0.61,9.2,6 +7,0.27,0.41,18.75,0.042,34,157,1.0002,2.96,0.5,9.1,5 +6.2,0.21,0.27,1.7,0.038,41,150,0.9933,3.49,0.71,10.5,7 +7.4,0.29,0.5,1.8,0.042,35,127,0.9937,3.45,0.5,10.2,7 +6.6,0.29,0.44,9,0.053,62,178,0.99685,3.02,0.45,8.9,5 +6,0.3,0.44,1.5,0.046,15,182,0.99455,3.5,0.52,10.4,5 +6.9,0.31,0.34,1.6,0.032,23,128,0.9917,3.37,0.47,11.7,6 +6.6,0.33,0.31,1.3,0.02,29,89,0.99035,3.26,0.44,12.4,8 +7.8,0.3,0.4,1.8,0.028,23,122,0.9914,3.14,0.39,10.9,7 +6.4,0.39,0.21,1.2,0.041,35,136,0.99225,3.15,0.46,10.2,5 +6.4,0.24,0.31,2.8,0.038,41,114,0.99155,3.37,0.66,11.7,7 +7,0.21,0.34,8,0.057,19,101,0.9954,2.99,0.59,9.4,5 +6.4,0.16,0.31,5.3,0.043,42,157,0.99455,3.35,0.47,10.5,5 +6,0.33,0.27,0.8,0.185,12,188,0.9924,3.12,0.62,9.4,5 +6.5,0.23,0.33,13.8,0.042,25,139,0.99695,3.35,0.56,10.4,6 +6.2,0.25,0.48,10,0.044,78,240,0.99655,3.25,0.47,9.5,6 +8.8,0.28,0.45,6,0.022,14,49,0.9934,3.01,0.33,11.1,7 +6.6,0.25,0.3,14.4,0.052,40,183,0.998,3.02,0.5,9.1,6 +6.9,0.38,0.25,9.8,0.04,28,191,0.9971,3.28,0.61,9.2,5 +6.4,0.25,0.3,5.5,0.038,15,129,0.9948,3.14,0.49,9.6,6 +6.6,0.25,0.3,14.4,0.052,40,183,0.998,3.02,0.5,9.1,6 +6.9,0.38,0.25,9.8,0.04,28,191,0.9971,3.28,0.61,9.2,5 +7.1,0.21,0.31,3.8,0.021,40,142,0.99215,3.17,0.39,10.8,7 +6.4,0.25,0.3,5.5,0.038,15,129,0.9948,3.14,0.49,9.6,6 +6.9,0.39,0.4,4.6,0.022,5,19,0.9915,3.31,0.37,12.6,3 +5.8,0.2,0.3,1.5,0.031,21,57,0.99115,3.44,0.55,11,6 +7,0.2,0.37,2,0.03,26,136,0.9932,3.28,0.61,10.2,6 +5.9,0.26,0.25,12.5,0.034,38,152,0.9977,3.33,0.43,9.4,5 +7.4,0.38,0.27,7.5,0.041,24,160,0.99535,3.17,0.43,10,5 +7.4,0.2,1.66,2.1,0.022,34,113,0.99165,3.26,0.55,12.2,6 +7,0.21,0.34,8.5,0.033,31,253,0.9953,3.22,0.56,10.5,6 +7.2,0.29,0.4,7.6,0.024,56,177,0.9928,3.04,0.32,11.5,6 +6.9,0.18,0.38,8.1,0.049,44,176,0.9958,3.3,0.54,9.8,6 +7.3,0.3,0.42,7.35,0.025,51,175,0.9928,3.04,0.32,11.4,6 +7.2,0.29,0.4,7.6,0.024,56,177,0.9928,3.04,0.32,11.5,6 +6.9,0.2,0.5,10,0.036,78,167,0.9964,3.15,0.55,10.2,6 +6.7,0.2,0.42,14,0.038,83,160,0.9987,3.16,0.5,9.4,6 +7,0.21,0.34,8.5,0.033,31,253,0.9953,3.22,0.56,10.5,6 +5.9,0.35,0.47,2.2,0.11,14,138,0.9932,3.09,0.5,9.1,5 +7.1,0.28,0.44,1.8,0.032,32,107,0.9907,3.25,0.48,12.2,7 +5.8,0.25,0.28,11.1,0.056,45,175,0.99755,3.42,0.43,9.5,5 +6.8,0.22,0.37,15.2,0.051,68,178,0.99935,3.4,0.85,9.3,6 +7.1,0.14,0.4,1.2,0.051,55,136,0.9932,3.3,0.96,9.8,7 +7.1,0.13,0.4,1.2,0.047,54,134,0.9932,3.3,0.97,9.8,7 +6.9,0.18,0.38,8.1,0.049,44,176,0.9958,3.3,0.54,9.8,6 +7,0.2,0.38,8.1,0.05,42,173,0.99585,3.3,0.54,9.8,6 +6.8,0.24,0.49,19.3,0.057,55,247,1.00055,3,0.56,8.7,5 +5,0.44,0.04,18.6,0.039,38,128,0.9985,3.37,0.57,10.2,6 +6.3,0.3,0.28,5,0.042,36,168,0.99505,3.22,0.69,9.5,6 +7.2,0.27,0.42,1.6,0.05,35,135,0.992,2.94,0.46,11,6 +6.7,0.5,0.63,13.4,0.078,81,238,0.9988,3.08,0.44,9.2,5 +6.8,0.2,0.36,1.6,0.028,7,46,0.99175,3.21,0.6,10.9,6 +6.7,0.11,0.34,8.8,0.043,41,113,0.9962,3.42,0.4,9.3,7 +6.7,0.11,0.34,8.8,0.043,41,113,0.9962,3.42,0.4,9.3,7 +6.8,0.12,0.31,5.2,0.045,29,120,0.9942,3.41,0.46,9.8,7 +6.6,0.16,0.57,1.1,0.13,58,140,0.9927,3.12,0.39,9.3,7 +6.6,0.21,0.6,1.1,0.135,61,144,0.9927,3.12,0.39,9.3,7 +6.1,0.27,0.3,16.7,0.039,49,172,0.99985,3.4,0.45,9.4,5 +9.1,0.27,0.45,10.6,0.035,28,124,0.997,3.2,0.46,10.4,9 +6.4,0.225,0.48,2.2,0.115,29,104,0.9918,3.24,0.58,12.1,6 +8.3,0.14,0.45,1.5,0.039,18,98,0.99215,3.02,0.56,11,6 +7.2,0.23,0.19,13.7,0.052,47,197,0.99865,3.12,0.53,9,5 +6.9,0.22,0.37,15,0.053,59,178,0.9992,3.37,0.82,9.5,7 +8.1,0.17,0.44,14.1,0.053,43,145,1.0006,3.28,0.75,8.8,8 +6,0.395,0,1.4,0.042,7,55,0.99135,3.37,0.38,11.2,4 +7.8,0.29,0.22,9.5,0.056,44,213,0.99715,3.08,0.61,9.3,6 +6.9,0.22,0.37,15,0.053,59,178,0.9992,3.37,0.82,9.5,7 +8.1,0.17,0.44,14.1,0.053,43,145,1.0006,3.28,0.75,8.8,8 +7.2,0.23,0.19,13.7,0.052,47,197,0.99865,3.12,0.53,9,5 +7.6,0.3,0.27,10.6,0.039,31,119,0.99815,3.27,0.3,9.3,6 +7.7,0.34,0.28,11,0.04,31,117,0.99815,3.27,0.29,9.2,6 +7.7,0.34,0.28,11,0.04,31,117,0.99815,3.27,0.29,9.2,6 +5.8,0.34,0.16,7,0.037,26,116,0.9949,3.46,0.45,10,7 +7.6,0.3,0.27,10.6,0.039,31,119,0.99815,3.27,0.3,9.3,6 +7.7,0.34,0.28,11,0.04,31,117,0.99815,3.27,0.29,9.2,6 +5.9,0.24,0.3,2,0.033,28,92,0.99225,3.39,0.69,10.9,7 +6.4,0.46,0.08,4.9,0.046,34,144,0.99445,3.1,0.56,10,5 +5.9,0.24,0.3,2,0.033,28,92,0.99225,3.39,0.69,10.9,7 +7.4,0.32,0.27,1.4,0.049,38,173,0.99335,3.03,0.52,9.3,5 +7.2,0.31,0.26,7.3,0.05,37,157,0.99625,3.09,0.43,9,5 +7.8,0.42,0.23,8.8,0.054,42,215,0.9971,3.02,0.58,9.2,6 +6.9,0.24,0.33,12.5,0.046,47,153,0.9983,3.28,0.77,9.6,6 +5.4,0.18,0.24,4.8,0.041,30,113,0.99445,3.42,0.4,9.4,6 +6,0.18,0.31,1.4,0.036,14,75,0.99085,3.34,0.58,11.1,8 +7.8,0.27,0.58,11.2,0.036,44,161,0.9977,3.06,0.41,8.9,6 +6,0.28,0.49,6.8,0.048,61,222,0.9953,3.19,0.47,9.3,5 +6.8,0.39,0.35,11.6,0.044,57,220,0.99775,3.07,0.53,9.3,5 +6.6,0.21,0.31,11.4,0.039,46,165,0.99795,3.41,0.44,9.8,7 +7.3,0.32,0.34,6.6,0.032,24,112,0.99505,3.22,0.46,9.8,6 +7.8,0.27,0.58,11.2,0.036,44,161,0.9977,3.06,0.41,8.9,6 +6.4,0.31,0.26,13.2,0.046,57,205,0.9975,3.17,0.41,9.6,5 +6.2,0.29,0.26,13.1,0.046,55,204,0.99745,3.16,0.41,9.6,6 +6,0.39,0.17,12,0.046,65,246,0.9976,3.15,0.38,9,6 +6.2,0.3,0.26,13.4,0.046,57,206,0.99775,3.17,0.43,9.5,6 +6,0.28,0.49,6.8,0.048,61,222,0.9953,3.19,0.47,9.3,5 +6,0.41,0.05,1.5,0.063,17,120,0.9932,3.21,0.56,9.2,6 +6.4,0.35,0.28,1.1,0.055,9,160,0.99405,3.42,0.5,9.1,7 +6.5,0.26,0.32,16.5,0.045,44,166,1,3.38,0.46,9.5,6 +7.9,0.35,0.24,15.6,0.072,44,229,0.99785,3.03,0.59,10.5,6 +6.2,0.3,0.17,2.8,0.04,24,125,0.9939,3.01,0.46,9,5 +8.4,0.18,0.42,5.1,0.036,7,77,0.9939,3.16,0.52,11.7,5 +6.6,0.56,0.22,8.9,0.034,27,133,0.99675,3.2,0.51,9.1,5 +6.2,0.3,0.17,2.8,0.04,24,125,0.9939,3.01,0.46,9,5 +6.6,0.56,0.22,8.9,0.034,27,133,0.99675,3.2,0.51,9.1,5 +6.6,0.36,0.29,1.6,0.021,24,85,0.98965,3.41,0.61,12.4,9 +7.3,0.655,0.2,10.2,0.071,28,212,0.9971,2.96,0.58,9.2,6 +6.8,0.18,0.21,5.4,0.053,34,104,0.99445,3.3,0.43,9.4,5 +6.7,0.19,0.23,6.2,0.047,36,117,0.9945,3.34,0.43,9.6,6 +8.4,0.18,0.42,5.1,0.036,7,77,0.9939,3.16,0.52,11.7,5 +7,0.21,0.37,7.2,0.042,36,167,0.9958,3.26,0.56,9.8,6 +6.8,0.25,0.38,8.1,0.046,24,155,0.9956,3.33,0.59,10.2,6 +7.4,0.24,0.36,2,0.031,27,139,0.99055,3.28,0.48,12.5,9 +7.1,0.16,0.36,10.7,0.044,20,90,0.9959,3.16,0.44,10.9,7 +7.1,0.16,0.36,1.2,0.043,21,90,0.9925,3.16,0.42,11,7 +7.3,0.205,0.31,1.7,0.06,34,110,0.9963,3.72,0.69,10.5,6 +7.4,0.17,0.4,5.5,0.037,34,161,0.9935,3.05,0.62,11.5,4 +7.3,0.3,0.34,2.7,0.044,34,108,0.99105,3.36,0.53,12.8,8 +6.9,0.25,0.34,1.3,0.035,27,82,0.99045,3.18,0.44,12.2,6 +7.3,0.205,0.31,1.7,0.06,34,110,0.9963,3.72,0.69,10.5,6 +7.5,0.42,0.34,4.3,0.04,34,108,0.99155,3.14,0.45,12.8,8 +7.3,0.25,0.36,2.1,0.034,30,177,0.99085,3.25,0.4,11.9,8 +7.3,0.25,0.36,2.1,0.034,30,177,0.99085,3.25,0.4,11.9,8 +7.3,0.25,0.36,2.1,0.034,30,177,0.99085,3.25,0.4,11.9,8 +7.5,0.34,0.35,6,0.034,12,126,0.9924,3.16,0.39,12,7 +7.6,0.33,0.35,6.3,0.036,12,126,0.9924,3.16,0.39,12,7 +8.7,0.23,0.32,13.4,0.044,35,169,0.99975,3.12,0.47,8.8,7 +8.7,0.23,0.32,13.4,0.044,35,169,0.99975,3.12,0.47,8.8,7 +6.9,0.19,0.35,1.7,0.036,33,101,0.99315,3.21,0.54,10.8,7 +7.3,0.21,0.29,1.6,0.034,29,118,0.9917,3.3,0.5,11,8 +7.3,0.21,0.29,1.6,0.034,29,118,0.9917,3.3,0.5,11,8 +6.6,0.22,0.37,15.4,0.035,62,153,0.99845,3.02,0.4,9.3,5 +9.2,0.34,0.27,1.2,0.026,17,73,0.9921,3.08,0.39,10.8,5 +8.7,0.23,0.32,13.4,0.044,35,169,0.99975,3.12,0.47,8.8,7 +6,0.2,0.24,1.8,0.03,30,105,0.9909,3.31,0.47,11.5,6 +6.9,0.19,0.35,1.7,0.036,33,101,0.99315,3.21,0.54,10.8,7 +8.2,0.38,0.49,13.6,0.042,58,166,0.99855,3.1,0.54,9.4,5 +6.9,0.18,0.36,1.3,0.036,40,117,0.9934,3.27,0.95,9.5,7 +7.7,0.34,0.58,11.1,0.039,41,151,0.9978,3.06,0.49,8.6,5 +6.9,0.18,0.36,1.3,0.036,40,117,0.9934,3.27,0.95,9.5,7 +7.4,0.2,0.35,2.1,0.038,30,116,0.9949,3.49,0.77,10.3,7 +8.2,0.38,0.49,13.6,0.042,58,166,0.99855,3.1,0.54,9.4,5 +8.2,0.4,0.48,13.7,0.042,59,169,0.9986,3.1,0.52,9.4,5 +6.7,0.22,0.39,10.2,0.038,60,149,0.99725,3.17,0.54,10,7 +6.6,0.3,0.3,4.8,0.17,60,166,0.9946,3.18,0.47,9.4,5 +8.1,0.27,0.35,1.7,0.03,38,103,0.99255,3.22,0.63,10.4,8 +7.3,0.25,0.42,14.2,0.041,57,182,0.9996,3.29,0.75,9.1,7 +4.8,0.34,0,6.5,0.028,33,163,0.9939,3.36,0.61,9.9,6 +6.2,0.28,0.33,1.7,0.029,24,111,0.99,3.24,0.5,12.1,6 +4.8,0.33,0,6.5,0.028,34,163,0.9937,3.35,0.61,9.9,5 +6.1,0.27,0.33,2.2,0.021,26,117,0.9886,3.12,0.3,12.5,6 +6.9,0.18,0.36,1.3,0.036,40,117,0.9934,3.27,0.95,9.5,7 +7.8,0.18,0.46,12.6,0.042,41,143,1,3.24,0.76,8.5,8 +7.3,0.28,0.42,14.4,0.04,49,173,0.9994,3.28,0.82,9,7 +7.3,0.24,0.29,1.2,0.037,37,97,0.9926,3.19,0.7,10.1,6 +6,0.45,0.65,9.7,0.08,11,159,0.9956,3.04,0.48,9.4,5 +7.7,0.34,0.58,11.1,0.039,41,151,0.9978,3.06,0.49,8.6,5 +6.3,0.26,0.21,4,0.03,24,125,0.9915,3.06,0.34,10.7,6 +10.3,0.17,0.47,1.4,0.037,5,33,0.9939,2.89,0.28,9.6,3 +7.7,0.15,0.29,1.3,0.029,10,64,0.9932,3.35,0.39,10.1,5 +7.1,0.21,0.32,2.2,0.037,28,141,0.993,3.2,0.57,10,7 +6.9,0.36,0.34,4.2,0.018,57,119,0.9898,3.28,0.36,12.7,9 +6,0.28,0.34,1.6,0.119,33,104,0.9921,3.19,0.38,10.2,6 +6.2,0.16,0.54,1.4,0.126,37,110,0.9932,3.23,0.37,8.9,6 +6.9,0.12,0.36,2.2,0.037,18,111,0.9919,3.41,0.82,11.9,8 +7.1,0.21,0.32,2.2,0.037,28,141,0.993,3.2,0.57,10,7 +8.8,0.36,0.44,1.9,0.04,9,121,0.9953,3.19,0.48,9.9,6 +7.4,0.26,0.43,6,0.022,22,125,0.9928,3.13,0.55,11.5,6 +7.4,0.26,0.43,6,0.022,22,125,0.9928,3.13,0.55,11.5,6 +6.8,0.23,0.29,12.2,0.035,38,236,0.9976,3.35,0.52,9.8,6 +6.1,0.34,0.27,2.6,0.024,20,105,0.9906,3.4,0.67,12.2,7 +7.3,0.26,0.31,1.6,0.04,39,173,0.9918,3.19,0.51,11.4,6 +6.5,0.3,0.32,2,0.044,34,90,0.99185,3.37,0.68,11,7 +7.3,0.26,0.31,1.6,0.04,39,173,0.9918,3.19,0.51,11.4,6 +6.5,0.3,0.32,2,0.044,34,90,0.99185,3.37,0.68,11,7 +5,0.31,0,6.4,0.046,43,166,0.994,3.3,0.63,9.9,6 +5.8,0.26,0.18,1.2,0.031,40,114,0.9908,3.42,0.4,11,7 +5.9,0.26,0.3,1,0.036,38,114,0.9928,3.58,0.48,9.4,5 +7,0.31,0.29,1.4,0.037,33,128,0.9896,3.12,0.36,12.2,7 +5.8,0.26,0.18,1.2,0.031,40,114,0.9908,3.42,0.4,11,7 +5.6,0.19,0.39,1.1,0.043,17,67,0.9918,3.23,0.53,10.3,6 +6.8,0.18,0.28,8.7,0.047,52,242,0.9952,3.22,0.53,10.5,6 +7,0.29,0.26,1.6,0.044,12,87,0.9923,3.08,0.46,10.5,6 +6.6,0.26,0.29,1.4,0.039,13,67,0.9915,3.05,0.49,10.9,6 +6.8,0.18,0.28,8.5,0.047,52,242,0.9952,3.22,0.53,10.5,6 +6.6,0.2,0.38,7.9,0.052,30,145,0.9947,3.32,0.56,11,7 +8,0.29,0.29,13.2,0.046,26,113,0.9983,3.25,0.37,9.7,6 +6.1,0.28,0.35,12.8,0.048,63,229,0.9975,3.08,0.4,8.9,5 +5.9,0.31,0.3,7.7,0.047,60,206,0.995,3.2,0.39,9.6,6 +6.9,0.21,0.28,2.4,0.056,49,159,0.9944,3.02,0.47,8.8,8 +8.4,0.19,0.42,1.6,0.047,9,101,0.994,3.06,0.65,11.1,4 +8.3,0.27,0.45,1.3,0.048,8,72,0.9944,3.08,0.61,10.3,4 +7.1,0.25,0.39,2.1,0.036,30,124,0.9908,3.28,0.43,12.2,8 +8,0.23,0.37,9.6,0.054,23,159,0.99795,3.32,0.47,9.8,4 +7.5,0.24,0.31,13,0.049,46,217,0.9985,3.08,0.53,8.8,5 +6.3,0.33,0.2,5.8,0.04,24,144,0.99425,3.15,0.63,9.9,5 +6.2,0.33,0.19,5.6,0.042,22,143,0.99425,3.15,0.63,9.9,5 +6.3,0.34,0.19,5.8,0.041,22,145,0.9943,3.15,0.63,9.9,5 +5.8,0.29,0.05,0.8,0.038,11,30,0.9924,3.36,0.35,9.2,5 +8,0.32,0.26,1.2,0.05,11.5,88,0.9946,3.24,0.37,9.5,4 +5.6,0.29,0.05,0.8,0.038,11,30,0.9924,3.36,0.35,9.2,5 +7.4,0.13,0.39,4.7,0.042,36,137,0.995,3.36,0.56,10.3,7 +7.7,0.3,0.32,1.6,0.037,23,124,0.9919,2.93,0.33,11,6 +7,0.24,0.34,1.4,0.031,27,107,0.99,3.06,0.39,11.9,6 +8.6,0.18,0.4,1.1,0.04,20,107,0.9923,2.94,0.32,10.2,7 +7,0.11,0.32,4.6,0.057,59,144,0.9956,3.55,0.44,9.4,7 +7.7,0.32,0.62,10.6,0.036,56,153,0.9978,3.13,0.44,8.9,6 +7.7,0.32,0.62,10.6,0.036,56,153,0.9978,3.13,0.44,8.9,6 +6.5,0.26,0.27,12.9,0.044,69,215,0.9967,3.17,0.43,10,6 +7.9,0.28,0.41,2,0.044,50,152,0.9934,3.45,0.49,10.7,8 +6.3,0.27,0.23,2.9,0.047,13,100,0.9936,3.28,0.43,9.8,5 +5.4,0.595,0.1,2.8,0.042,26,80,0.9932,3.36,0.38,9.3,5 +6.7,0.25,0.33,2.9,0.057,52,173,0.9934,3.02,0.48,9.5,7 +6.5,0.25,0.35,12,0.055,47,179,0.998,3.58,0.47,10,5 +6.1,0.36,0.58,15,0.044,42,115,0.9978,3.15,0.51,9,5 +7.7,0.17,0.52,5.9,0.017,21,84,0.9929,3.14,0.4,11.9,7 +6.4,0.26,0.43,12.6,0.033,64,230,0.9974,3.08,0.38,8.9,5 +6.5,0.26,0.28,12.5,0.046,80,225,0.99685,3.18,0.41,10,6 +5.9,0.29,0.33,7.4,0.037,58,205,0.99495,3.26,0.41,9.6,5 +6.2,0.28,0.43,13,0.039,64,233,0.99745,3.08,0.38,8.9,5 +6.1,0.27,0.44,6.7,0.041,61,230,0.99505,3.12,0.4,8.9,5 +6.4,0.43,0.32,1.4,0.048,10,67,0.992,3.08,0.41,11.4,5 +6.1,0.36,0.58,15,0.044,42,115,0.9978,3.15,0.51,9,5 +6.2,0.35,0.29,7.3,0.044,56,244,0.9956,3.36,0.55,10,6 +7.7,0.24,0.29,15.3,0.044,39,194,0.9982,3.06,0.47,9.6,7 +6.2,0.34,0.28,7.5,0.034,40,197,0.99485,3.14,0.6,9.7,5 +6.3,0.27,0.46,11.75,0.037,61,212,0.9971,3.25,0.53,9.5,6 +5.4,0.415,0.19,1.6,0.039,27,88,0.99265,3.54,0.41,10,7 +6.9,0.48,0.36,3.5,0.03,31,135,0.9904,3.14,0.38,12.2,7 +6.5,0.18,0.33,8,0.051,16,131,0.9965,3.28,0.44,8.7,7 +6.7,0.15,0.29,5,0.058,28,105,0.9946,3.52,0.44,10.2,7 +8.2,0.345,1,18.2,0.047,55,205,0.99965,2.96,0.43,9.6,5 +8.5,0.16,0.35,1.6,0.039,24,147,0.9935,2.96,0.36,10,5 +6.8,0.705,0.25,3.2,0.048,10,57,0.996,3.36,0.52,9.5,4 +7.3,0.25,0.39,6.4,0.034,8,84,0.9942,3.18,0.46,11.5,5 +7.6,0.345,0.26,1.9,0.043,15,134,0.9936,3.08,0.38,9.5,5 +7.6,0.22,0.34,9.7,0.035,26,143,0.9965,3.08,0.49,9.8,6 +6.5,0.17,0.33,1.4,0.028,14,99,0.9928,3.23,0.55,10.1,6 +8.2,0.23,0.37,1.3,0.042,39,117,0.9928,2.99,0.36,10,5 +7.6,0.22,0.34,9.7,0.035,26,143,0.9965,3.08,0.49,9.8,6 +7.6,0.345,0.26,1.9,0.043,15,134,0.9936,3.08,0.38,9.5,5 +7.5,0.32,0.26,1.8,0.042,13,133,0.9938,3.07,0.38,9.5,5 +6.6,0.23,0.32,0.9,0.041,25,79,0.9926,3.39,0.54,10.2,7 +6.6,0.2,0.32,1.1,0.039,25,78,0.9926,3.39,0.54,10.2,7 +7.3,0.24,0.34,15.4,0.05,38,174,0.9983,3.03,0.42,9,6 +7.3,0.24,0.34,15.4,0.05,38,174,0.9983,3.03,0.42,9,6 +8,0.42,0.36,5,0.037,34,101,0.992,3.13,0.57,12.3,7 +7.3,0.24,0.34,15.4,0.05,38,174,0.9983,3.03,0.42,9,6 +6.1,0.19,0.25,4,0.023,23,112,0.9923,3.37,0.51,11.6,6 +5.9,0.26,0.21,12.5,0.034,36,152,0.9972,3.28,0.43,9.5,6 +8.3,0.23,0.43,3.2,0.035,14,101,0.9928,3.15,0.36,11.5,5 +6.5,0.34,0.28,1.8,0.041,43,188,0.9928,3.13,0.37,9.6,6 +6.8,0.22,0.35,17.5,0.039,38,153,0.9994,3.24,0.42,9,6 +6.5,0.08,0.33,1.9,0.028,23,93,0.991,3.34,0.7,12,7 +5.5,0.42,0.09,1.6,0.019,18,68,0.9906,3.33,0.51,11.4,7 +5.1,0.42,0.01,1.5,0.017,25,102,0.9894,3.38,0.36,12.3,7 +6,0.27,0.19,1.7,0.02,24,110,0.9898,3.32,0.47,12.6,7 +6.8,0.22,0.35,17.5,0.039,38,153,0.9994,3.24,0.42,9,6 +6.5,0.08,0.33,1.9,0.028,23,93,0.991,3.34,0.7,12,7 +7.1,0.13,0.38,1.8,0.046,14,114,0.9925,3.32,0.9,11.7,6 +7.6,0.3,0.25,4.3,0.054,22,111,0.9956,3.12,0.49,9.2,5 +6.6,0.13,0.3,4.9,0.058,47,131,0.9946,3.51,0.45,10.3,6 +6.5,0.14,0.33,7.6,0.05,53,189,0.9966,3.25,0.49,8.6,5 +7.7,0.28,0.33,6.7,0.037,32,155,0.9951,3.39,0.62,10.7,7 +6,0.2,0.71,1.6,0.15,10,54,0.9927,3.12,0.47,9.8,5 +6,0.19,0.71,1.5,0.152,9,55,0.9927,3.12,0.46,9.8,6 +7.7,0.28,0.33,6.7,0.037,32,155,0.9951,3.39,0.62,10.7,7 +5.1,0.39,0.21,1.7,0.027,15,72,0.9894,3.5,0.45,12.5,6 +5.7,0.36,0.34,4.2,0.026,21,77,0.9907,3.41,0.45,11.9,6 +6.9,0.19,0.33,1.6,0.043,63,149,0.9925,3.44,0.52,10.8,5 +6,0.41,0.21,1.9,0.05,29,122,0.9928,3.42,0.52,10.5,6 +7.4,0.28,0.3,5.3,0.054,44,161,0.9941,3.12,0.48,10.3,6 +7.4,0.3,0.3,5.2,0.053,45,163,0.9941,3.12,0.45,10.3,6 +6.9,0.19,0.33,1.6,0.043,63,149,0.9925,3.44,0.52,10.8,5 +7.7,0.28,0.39,8.9,0.036,8,117,0.9935,3.06,0.38,12,7 +8.6,0.16,0.38,3.4,0.04,41,143,0.9932,2.95,0.39,10.2,6 +8.2,0.26,0.44,1.3,0.046,7,69,0.9944,3.14,0.62,10.2,4 +6.5,0.25,0.27,15.2,0.049,75,217,0.9972,3.19,0.39,9.9,5 +7,0.24,0.18,1.3,0.046,9,62,0.994,3.38,0.47,10.1,4 +8.6,0.18,0.36,1.8,0.04,24,187,0.9956,3.25,0.55,9.5,6 +7.8,0.27,0.34,1.6,0.046,27,154,0.9927,3.05,0.45,10.5,6 +6,0.26,0.34,1.3,0.046,6,29,0.9924,3.29,0.63,10.4,5 +6.1,0.24,0.27,9.8,0.062,33,152,0.9966,3.31,0.47,9.5,6 +8,0.24,0.3,17.45,0.056,43,184,0.9997,3.05,0.5,9.2,6 +7.6,0.21,0.6,2.1,0.046,47,165,0.9936,3.05,0.54,10.1,7 +8,0.19,0.36,1.8,0.05,16,84,0.9936,3.15,0.45,9.8,7 +6.4,0.28,0.41,6.8,0.045,61,216,0.9952,3.09,0.46,9.4,5 +6.4,0.28,0.43,7.1,0.045,60,221,0.9952,3.09,0.45,9.4,6 +6.9,0.24,0.39,1.3,0.063,18,136,0.9928,3.31,0.48,10.4,7 +5.8,0.36,0.26,3.3,0.038,40,153,0.9911,3.34,0.55,11.3,6 +6.6,0.18,0.28,3.3,0.044,18,91,0.993,3.42,0.64,10.8,6 +5.8,0.36,0.26,3.3,0.038,40,153,0.9911,3.34,0.55,11.3,6 +5.1,0.52,0.06,2.7,0.052,30,79,0.9932,3.32,0.43,9.3,5 +6.6,0.22,0.37,1.2,0.059,45,199,0.993,3.37,0.55,10.3,7 +8.3,0.15,0.39,1.3,0.055,32,146,0.993,3.08,0.39,10.5,6 +7.6,0.16,0.44,1.4,0.043,25,109,0.9932,3.11,0.75,10.3,6 +7.7,0.16,0.41,1.7,0.048,60,173,0.9932,3.24,0.66,11.2,7 +8.3,0.16,0.48,1.7,0.057,31,98,0.9943,3.15,0.41,10.3,6 +6.2,0.25,0.47,11.6,0.048,62,210,0.9968,3.19,0.5,9.5,5 +6.1,0.16,0.27,12.6,0.064,63,162,0.9994,3.66,0.43,8.9,5 +7.6,0.39,0.22,2.8,0.036,19,113,0.9926,3.03,0.29,10.2,5 +6.8,0.37,0.47,11.2,0.071,44,136,0.9968,2.98,0.88,9.2,5 +7.6,0.16,0.44,1.4,0.043,25,109,0.9932,3.11,0.75,10.3,6 +7.1,0.18,0.42,1.4,0.045,47,157,0.9916,2.95,0.31,10.5,6 +8.3,0.14,0.26,1.5,0.049,56,189,0.9946,3.21,0.62,9.5,6 +8.6,0.2,0.42,1.5,0.041,35,125,0.9925,3.11,0.49,11.4,7 +8.6,0.2,0.42,1.5,0.041,35,125,0.9925,3.11,0.49,11.4,7 +6.8,0.19,0.32,7.05,0.019,54,188,0.9935,3.25,0.37,11.1,8 +7.6,0.19,0.38,10.6,0.06,48,174,0.9962,3.13,0.38,10.5,6 +6.8,0.34,0.74,2.8,0.088,23,185,0.9928,3.51,0.7,12,6 +6.2,0.15,0.46,1.6,0.039,38,123,0.993,3.38,0.51,9.7,6 +6.6,0.14,0.44,1.6,0.042,47,140,0.993,3.32,0.51,10.2,6 +8,0.55,0.17,8.2,0.04,13,60,0.9956,3.09,0.3,9.5,4 +7,0.24,0.35,1.5,0.052,51,128,0.9941,3.41,0.59,10.4,7 +6.3,0.6,0.44,11,0.05,50,245,0.9972,3.19,0.57,9.3,4 +7.1,0.2,0.41,2.1,0.054,24,166,0.9948,3.48,0.62,10.5,6 +6.2,0.34,0.29,7.6,0.047,45,232,0.9955,3.35,0.62,10,6 +7.1,0.3,0.36,6.8,0.055,44.5,234,0.9972,3.49,0.64,10.2,6 +7.1,0.3,0.36,6.8,0.055,44.5,234,0.9972,3.49,0.64,10.2,6 +7.9,0.64,0.46,10.6,0.244,33,227,0.9983,2.87,0.74,9.1,3 +8.8,0.17,0.38,1.8,0.04,39,148,0.9942,3.16,0.67,10.2,6 +7.5,0.17,0.37,1.5,0.06,18,75,0.9936,3.54,0.88,10.7,5 +7.1,0.47,0.24,6,0.044,11,77,0.9956,3.21,0.56,9.7,5 +7.1,0.15,0.34,5.3,0.034,33,104,0.9953,3.37,0.52,9.3,7 +7.5,0.17,0.34,1.4,0.035,13,102,0.9918,3.05,0.74,11,5 +8.2,0.68,0.3,2.1,0.047,17,138,0.995,3.22,0.71,10.8,4 +7.7,0.275,0.3,1,0.039,19,75,0.992,3.01,0.56,10.7,5 +7.3,0.49,0.32,5.2,0.043,18,104,0.9952,3.24,0.45,10.7,4 +7.5,0.33,0.48,19.45,0.048,55,243,1.001,2.95,0.4,8.8,5 +7.2,0.21,0.37,1.6,0.049,23,94,0.9924,3.16,0.48,10.9,7 +7.3,0.15,0.4,2,0.05,24,92,0.9932,3.14,0.45,10.5,5 +6.5,0.19,0.1,1.3,0.046,23,107,0.9937,3.29,0.45,10,5 +7,0.31,0.52,1.7,0.029,5,61,0.9918,3.07,0.43,10.4,5 +8.3,0.4,0.38,1.1,0.038,15,75,0.9934,3.03,0.43,9.2,5 +6.1,0.37,0.36,4.7,0.035,36,116,0.991,3.31,0.62,12.6,6 +7.3,0.24,0.34,7.5,0.048,29,152,0.9962,3.1,0.54,9,5 +6.9,0.21,0.81,1.1,0.137,52,123,0.9932,3.03,0.39,9.2,6 +7.6,0.29,0.42,1.3,0.035,18,86,0.9908,2.99,0.39,11.3,5 +9.4,0.29,0.55,2.2,0.05,17,119,0.9962,3.12,0.69,10.3,4 +7,0.31,0.52,1.7,0.029,5,61,0.9918,3.07,0.43,10.4,5 +8.6,0.26,0.41,2.2,0.049,29,111,0.9941,2.96,0.44,10,5 +7.5,0.21,0.34,1.2,0.06,26,111,0.9931,3.51,0.47,10.7,6 +7.2,0.51,0.24,10,0.093,35,197,0.9981,3.41,0.47,9,5 +7.5,0.21,0.34,1.2,0.06,26,111,0.9931,3.51,0.47,10.7,6 +5.3,0.3,0.2,1.1,0.077,48,166,0.9944,3.3,0.54,8.7,4 +8,0.26,0.36,2,0.054,30,121,0.992,3.09,0.72,11.6,7 +7,0.21,0.28,7.5,0.07,45,185,0.9966,3.34,0.55,9.4,5 +6.7,0.26,0.26,4,0.079,35.5,216,0.9956,3.31,0.68,9.5,5 +6.7,0.26,0.26,4.1,0.073,36,202,0.9956,3.3,0.67,9.5,5 +8.1,0.26,0.37,1.9,0.072,48,159,0.9949,3.37,0.7,10.9,6 +8.3,0.22,0.38,14.8,0.054,32,126,1.0002,3.22,0.5,9.7,5 +6.4,0.3,0.51,5.5,0.048,62,172,0.9942,3.08,0.45,9.1,6 +7.5,0.19,0.34,2.6,0.037,33,125,0.9923,3.1,0.49,11.1,7 +8.8,0.33,0.44,6.35,0.024,9,87,0.9917,2.96,0.4,12.6,7 +6.9,0.2,0.36,1.5,0.031,38,147,0.9931,3.35,0.56,11,6 +8,0.37,0.32,1.6,0.04,32,166,0.992,3,0.55,11.3,7 +8.3,0.22,0.38,14.8,0.054,32,126,1.0002,3.22,0.5,9.7,5 +8.2,0.29,0.33,9.1,0.036,28,118,0.9953,2.96,0.4,10.9,7 +7.7,0.34,0.3,8,0.048,25,192,0.9951,2.97,0.47,10.9,5 +6.2,0.55,0.45,12,0.049,27,186,0.9974,3.17,0.5,9.3,6 +6.4,0.4,0.19,3.2,0.033,28,124,0.9904,3.22,0.54,12.7,7 +7.5,0.28,0.33,7.7,0.048,42,180,0.9974,3.37,0.59,10.1,6 +7.8,0.26,0.44,1.3,0.037,43,132,0.9944,3.18,0.65,10,5 +6.5,0.26,0.34,16.3,0.051,56,197,1.0004,3.49,0.42,9.8,5 +6.3,0.34,0.29,6.2,0.046,29,227,0.9952,3.29,0.53,10.1,6 +6.8,0.15,0.33,4.7,0.059,31,118,0.9956,3.43,0.39,9,7 +6.3,0.27,0.25,5.8,0.038,52,155,0.995,3.28,0.38,9.4,6 +6.3,0.27,0.25,5.8,0.038,52,155,0.995,3.28,0.38,9.4,6 +7.4,0.2,0.37,16.95,0.048,43,190,0.9995,3.03,0.42,9.2,6 +6.3,0.23,0.21,5.1,0.035,29,142,0.9942,3.36,0.33,10.1,7 +7.3,0.31,0.69,10.2,0.041,58,160,0.9977,3.06,0.45,8.6,5 +5.2,0.24,0.45,3.8,0.027,21,128,0.992,3.55,0.49,11.2,8 +7,0.24,0.32,1.3,0.037,39,123,0.992,3.17,0.42,11.2,8 +7.4,0.2,0.37,16.95,0.048,43,190,0.9995,3.03,0.42,9.2,6 +7,0.17,0.33,4,0.034,17,127,0.9934,3.19,0.39,10.6,7 +8.3,0.21,0.58,17.1,0.049,62,213,1.0006,3.01,0.51,9.3,6 +7.2,0.21,0.35,14.5,0.048,35,178,0.9982,3.05,0.47,8.9,6 +7.1,0.21,0.4,1.2,0.069,24,156,0.9928,3.42,0.43,10.6,6 +8.4,0.17,0.31,6.7,0.038,29,132,0.9945,3.1,0.32,10.6,7 +7.4,0.24,0.31,8.4,0.045,52,183,0.9963,3.09,0.32,8.8,5 +5.3,0.24,0.33,1.3,0.033,25,97,0.9906,3.59,0.38,11,8 +6.5,0.28,0.26,8.8,0.04,44,139,0.9956,3.32,0.37,10.2,6 +6.3,0.23,0.21,5.1,0.035,29,142,0.9942,3.36,0.33,10.1,7 +6.5,0.29,0.25,10.6,0.039,32,120,0.9962,3.31,0.34,10.1,6 +5.8,0.29,0.21,2.6,0.025,12,120,0.9894,3.39,0.79,14,7 +6.3,0.27,0.25,5.8,0.038,52,155,0.995,3.28,0.38,9.4,6 +6.3,0.17,0.42,2.8,0.028,45,107,0.9908,3.27,0.43,11.8,6 +6.3,0.16,0.4,1.6,0.033,59,148,0.9914,3.44,0.53,11.4,5 +7.9,0.29,0.39,6.7,0.036,6,117,0.9938,3.12,0.42,10.7,5 +7.3,0.31,0.69,10.2,0.041,58,160,0.9977,3.06,0.45,8.6,5 +5.5,0.32,0.45,4.9,0.028,25,191,0.9922,3.51,0.49,11.5,7 +5.2,0.24,0.45,3.8,0.027,21,128,0.992,3.55,0.49,11.2,8 +7.2,0.37,0.15,2,0.029,27,87,0.9903,3.3,0.59,12.6,7 +6.1,0.29,0.27,1.7,0.024,13,76,0.9893,3.21,0.51,12.6,7 +9.2,0.22,0.4,2.4,0.054,18,151,0.9952,3.04,0.46,9.3,4 +7.2,0.37,0.15,2,0.029,27,87,0.9903,3.3,0.59,12.6,7 +8,0.18,0.37,1.3,0.04,15,96,0.9912,3.06,0.61,12.1,6 +6.5,0.22,0.34,12,0.053,55,177,0.9983,3.52,0.44,9.9,6 +7.4,0.18,0.4,1.6,0.047,22,102,0.9937,3.28,0.44,10.7,5 +6.5,0.52,0.17,1.4,0.047,5,26,0.9932,3.26,0.32,10,4 +7,0.15,0.38,2.2,0.047,33,96,0.9928,3.13,0.39,10.4,8 +5.9,0.415,0.13,1.4,0.04,11,64,0.9922,3.29,0.52,10.5,5 +8.1,0.45,0.34,8.3,0.037,33,216,0.9976,3.31,0.64,9.7,5 +5.8,0.415,0.13,1.4,0.04,11,64,0.9922,3.29,0.52,10.5,5 +6.4,0.5,0.16,12.9,0.042,26,138,0.9974,3.28,0.33,9,5 +6.7,0.105,0.32,12.4,0.051,34,106,0.998,3.54,0.45,9.2,6 +6,0.4,0.3,1.6,0.047,30,117,0.9931,3.17,0.48,10.1,6 +6.6,0.25,0.39,1.45,0.04,40,89,0.9911,3.35,0.4,11.4,7 +9.8,0.36,0.45,1.6,0.042,11,124,0.9944,2.93,0.46,10.8,5 +9.6,0.23,0.4,1.5,0.044,19,135,0.9937,2.96,0.49,10.9,5 +6.3,0.55,0.45,13,0.047,33,182,0.9974,3.2,0.46,9.2,6 +6.5,0.115,0.29,1.95,0.038,73,166,0.989,3.12,0.25,12.9,7 +6.4,0.125,0.29,5.85,0.042,24,99,0.992,3.23,0.32,12,7 +5.7,0.1,0.27,1.3,0.047,21,100,0.9928,3.27,0.46,9.5,5 +7.9,0.25,0.29,5.3,0.031,33,117,0.9918,3.06,0.32,11.8,7 +6.9,0.2,0.28,1.2,0.048,36,159,0.9936,3.19,0.43,9.1,6 +6.9,0.23,0.34,4,0.047,24,128,0.9944,3.2,0.52,9.7,6 +6.8,0.39,0.31,14.35,0.043,28,162,0.9988,3.17,0.54,9.1,5 +8.7,0.22,0.42,2.3,0.053,27,114,0.994,2.99,0.43,10,5 +7.4,0.41,0.34,4.7,0.042,19,127,0.9953,3.25,0.42,10.4,5 +6.7,0.25,0.34,12.85,0.048,30,161,0.9986,3.44,0.47,9.5,6 +6,0.26,0.42,5.2,0.027,70,178,0.9914,3.4,0.4,12.3,8 +6.1,0.31,0.37,8.4,0.031,70,170,0.9934,3.42,0.4,11.7,8 +9.2,0.28,0.46,3.2,0.058,39,133,0.996,3.14,0.58,9.5,5 +9,0.31,0.49,6.9,0.034,26,91,0.9937,2.99,0.34,11.5,5 +8.5,0.16,0.33,1,0.076,17,57,0.9921,3.14,0.46,10.6,6 +9.3,0.34,0.49,7.3,0.052,30,146,0.998,3.17,0.61,10.2,5 +9.2,0.28,0.46,3.2,0.058,39,133,0.996,3.14,0.58,9.5,5 +7.2,0.24,0.3,1.6,0.048,27,131,0.9933,3.25,0.45,10.5,5 +7.2,0.25,0.32,1.5,0.047,27,132,0.9933,3.26,0.44,10.5,5 +6.8,0.32,0.18,7.5,0.041,71,223,0.9959,3.14,0.41,8.9,5 +9.1,0.27,0.32,1.1,0.031,15,151,0.9936,3.03,0.41,10.6,5 +8.9,0.34,0.32,1.3,0.041,12,188,0.9953,3.17,0.49,9.5,5 +7,0.17,0.37,5.7,0.025,29,111,0.9938,3.2,0.49,10.8,6 +6.7,0.25,0.23,7.2,0.038,61,220,0.9952,3.14,0.35,9.5,5 +6.9,0.32,0.17,7.6,0.042,69,219,0.9959,3.13,0.4,8.9,5 +6.8,0.32,0.18,7.5,0.041,71,223,0.9959,3.14,0.41,8.9,5 +6.1,0.6,0,1.3,0.042,24,79,0.9937,3.31,0.38,9.4,4 +5.3,0.395,0.07,1.3,0.035,26,102,0.992,3.5,0.35,10.6,6 +7.9,0.16,0.3,4.8,0.037,37,171,0.9967,3.47,0.44,9,4 +7.6,0.33,0.36,2.1,0.034,26,172,0.9944,3.42,0.48,10.5,4 +7.8,0.3,0.29,16.85,0.054,23,135,0.9998,3.16,0.38,9,6 +7.8,0.3,0.29,16.85,0.054,23,135,0.9998,3.16,0.38,9,6 +5.7,0.26,0.27,4.1,0.201,73.5,189.5,0.9942,3.27,0.38,9.4,6 +7.8,0.3,0.29,16.85,0.054,23,135,0.9998,3.16,0.38,9,6 +7.5,0.14,0.34,1.3,0.055,50,153,0.9945,3.29,0.8,9.6,6 +7.8,0.3,0.29,16.85,0.054,23,135,0.9998,3.16,0.38,9,6 +6.6,0.25,0.41,7.4,0.043,29,151,0.9946,3.15,0.6,10.2,7 +5.7,0.26,0.27,4.1,0.201,73.5,189.5,0.9942,3.27,0.38,9.4,6 +8.2,0.23,0.49,0.9,0.057,15,73,0.9928,3.07,0.38,10.4,6 +6,0.24,0.32,6.3,0.03,34,129,0.9946,3.52,0.41,10.4,5 +6.1,0.45,0.27,0.8,0.039,13,82,0.9927,3.23,0.32,9.5,5 +7.4,0.23,0.43,1.4,0.044,22,113,0.9938,3.22,0.62,10.6,6 +7.2,0.2,0.38,1,0.037,21,74,0.9918,3.21,0.37,11,5 +7.5,0.14,0.34,1.3,0.055,50,153,0.9945,3.29,0.8,9.6,6 +7.7,0.25,0.43,4.5,0.062,20,115,0.9966,3.38,0.5,9.9,6 +8.2,0.61,0.45,5.4,0.03,15,118,0.9954,3.14,0.34,9.6,5 +7.6,0.21,0.44,1.9,0.036,10,119,0.9913,3.01,0.7,12.8,6 +7.4,0.22,0.33,2,0.045,31,101,0.9931,3.42,0.55,11.4,5 +7.2,0.26,0.26,12.7,0.036,49,214,0.9986,3.41,0.5,10,6 +6.4,0.25,0.41,8.6,0.042,57,173,0.9965,3,0.44,9.1,5 +6.3,0.32,0.35,11.1,0.039,29,198,0.9984,3.36,0.5,9.4,7 +6.8,0.25,0.29,2,0.042,19,189,0.9952,3.46,0.54,10.2,6 +9.8,0.44,0.4,2.8,0.036,35,167,0.9956,2.97,0.39,9.2,5 +7.2,0.2,0.25,4.5,0.044,31,109,0.9949,3.23,0.36,9.4,5 +8.2,0.61,0.45,5.4,0.03,15,118,0.9954,3.14,0.34,9.6,5 +7.5,0.42,0.45,9.1,0.029,20,125,0.996,3.12,0.36,10.1,6 +7.4,0.22,0.33,2,0.045,31,101,0.9931,3.42,0.55,11.4,5 +6.4,0.26,0.3,2.2,0.025,33,134,0.992,3.21,0.47,10.6,6 +7.9,0.46,0.32,4.1,0.033,40,138,0.9912,3.18,0.44,12.8,7 +6.5,0.41,0.64,11.8,0.065,65,225,0.9978,3.12,0.51,8.9,5 +7.5,0.32,0.37,1.2,0.048,22,184,0.9938,3.09,0.43,9.3,5 +6.6,0.21,0.38,2.2,0.026,40,104,0.9914,3.25,0.4,11.1,8 +7.1,0.21,0.3,1.4,0.037,45,143,0.9932,3.13,0.33,9.9,6 +7.6,0.26,0.47,1.6,0.068,5,55,0.9944,3.1,0.45,9.6,5 +7.6,0.21,0.44,1.9,0.036,10,119,0.9913,3.01,0.7,12.8,6 +6.9,0.25,0.26,5.2,0.024,36,135,0.9948,3.16,0.72,10.7,7 +7.1,0.26,0.32,14.45,0.074,29,107,0.998,2.96,0.42,9.2,6 +7.3,0.22,0.4,14.75,0.042,44.5,129.5,0.9998,3.36,0.41,9.1,7 +6.2,0.37,0.22,8.3,0.025,36,216,0.9964,3.33,0.6,9.6,6 +7.9,0.22,0.45,14.2,0.038,53,141,0.9992,3.03,0.46,9.2,6 +6.9,0.25,0.26,5.2,0.024,36,135,0.9948,3.16,0.72,10.7,7 +7.3,0.22,0.4,14.75,0.042,44.5,129.5,0.9998,3.36,0.41,9.1,7 +7.1,0.26,0.32,14.45,0.074,29,107,0.998,2.96,0.42,9.2,6 +7.4,0.25,0.37,6.9,0.02,14,93,0.9939,3,0.48,10.7,7 +6.8,0.18,0.37,1.5,0.027,37,93,0.992,3.3,0.45,10.8,6 +7,0.17,0.37,1.5,0.028,26,75,0.9922,3.3,0.46,10.8,7 +6.4,0.3,0.38,7.8,0.046,35,192,0.9955,3.1,0.37,9,5 +5,0.33,0.16,1.5,0.049,10,97,0.9917,3.48,0.44,10.7,6 +5,0.33,0.16,1.5,0.049,10,97,0.9917,3.48,0.44,10.7,6 +8.9,0.33,0.32,1.5,0.047,11,200,0.9954,3.19,0.46,9.4,5 +7,0.26,0.46,15.55,0.037,61,171,0.9986,2.94,0.35,8.8,6 +6.4,0.3,0.38,7.8,0.046,35,192,0.9955,3.1,0.37,9,5 +6.3,0.21,0.4,1.7,0.031,48,134,0.9917,3.42,0.49,11.5,6 +8,0.23,0.46,1.5,0.03,30,125,0.9907,3.23,0.47,12.5,6 +9.2,0.28,0.41,1,0.042,14,59,0.9922,2.96,0.25,10.5,6 +7.3,0.27,0.39,6.7,0.064,28,188,0.9958,3.29,0.3,9.7,5 +7.6,0.32,0.36,1.6,0.04,32,155,0.993,3.23,0.52,11.3,6 +5,0.33,0.16,1.5,0.049,10,97,0.9917,3.48,0.44,10.7,6 +9.7,0.24,0.45,1.2,0.033,11,59,0.9926,2.74,0.47,10.8,6 +8,0.28,0.42,7.1,0.045,41,169,0.9959,3.17,0.43,10.6,5 +8.2,0.37,0.36,1,0.034,17,93,0.9906,3.04,0.32,11.7,8 +8,0.61,0.38,12.1,0.301,24,220,0.9993,2.94,0.48,9.2,5 +7.2,0.26,0.44,7.1,0.027,25,126,0.993,3.02,0.34,11.1,8 +8.2,0.37,0.36,1,0.034,17,93,0.9906,3.04,0.32,11.7,8 +6.4,0.23,0.33,1.15,0.044,15.5,217.5,0.992,3.33,0.44,11,6 +5.9,0.4,0.32,6,0.034,50,127,0.992,3.51,0.58,12.5,7 +7.6,0.28,0.39,1.2,0.038,21,115,0.994,3.16,0.67,10,6 +8,0.28,0.42,7.1,0.045,41,169,0.9959,3.17,0.43,10.6,5 +7.2,0.23,0.39,2.3,0.033,29,102,0.9908,3.26,0.54,12.3,7 +6.8,0.32,0.37,3.4,0.023,19,87,0.9902,3.14,0.53,12.7,6 +7.2,0.23,0.39,2.3,0.033,29,102,0.9908,3.26,0.54,12.3,7 +6.9,0.18,0.38,6.5,0.039,20,110,0.9943,3.1,0.42,10.5,5 +9.4,0.26,0.53,1.2,0.047,25,109,0.9921,3.23,0.28,12.5,6 +8.3,0.33,0.42,1.15,0.033,18,96,0.9911,3.2,0.32,12.4,3 +7.3,0.29,0.3,13,0.043,46,238,0.9986,3.06,0.41,8.7,6 +7.9,0.41,0.37,4.5,0.03,40,114,0.992,3.17,0.54,12.4,7 +7.9,0.44,0.37,5.85,0.033,27,93,0.992,3.16,0.54,12.6,7 +7.7,0.39,0.3,5.2,0.037,29,131,0.9943,3.38,0.44,11,6 +7.7,0.26,0.31,1.3,0.043,47,155,0.9937,3.42,0.5,10.1,6 +7.8,0.32,0.31,1.7,0.036,46,195,0.993,3.03,0.48,10.5,5 +6.8,0.32,0.37,3.4,0.023,19,87,0.9902,3.14,0.53,12.7,6 +7.3,0.24,0.39,3.6,0.024,35,116,0.9928,3.17,0.51,10.9,5 +7.1,0.44,0.37,2.7,0.041,35,128,0.9896,3.07,0.43,13.5,7 +10.3,0.25,0.48,2.2,0.042,28,164,0.998,3.19,0.59,9.7,5 +7.9,0.14,0.28,1.8,0.041,44,178,0.9954,3.45,0.43,9.2,6 +7.4,0.18,0.42,2.1,0.036,33,187,0.9938,3.4,0.41,10.6,7 +8.1,0.43,0.42,6.6,0.033,36,141,0.9918,2.98,0.39,13.3,7 +7.1,0.44,0.37,2.7,0.041,35,128,0.9896,3.07,0.43,13.5,7 +6.4,0.26,0.22,5.1,0.037,23,131,0.9944,3.29,0.32,10.1,5 +8,0.66,0.72,17.55,0.042,62,233,0.9999,2.92,0.68,9.4,4 +8,0.2,0.4,5.2,0.055,41,167,0.9953,3.18,0.4,10.6,7 +7.2,0.21,0.34,1.1,0.046,25,80,0.992,3.25,0.4,11.3,6 +7.2,0.18,0.31,1.1,0.045,20,73,0.9925,3.32,0.4,10.8,7 +8.4,0.57,0.44,10.7,0.051,46,195,0.9981,3.15,0.51,10.4,5 +5.3,0.26,0.23,5.15,0.034,48,160,0.9952,3.82,0.51,10.5,7 +5.7,0.245,0.33,1.1,0.049,28,150,0.9927,3.13,0.42,9.3,5 +5.6,0.245,0.32,1.1,0.047,24,152,0.9927,3.12,0.42,9.3,6 +7.3,0.25,0.41,1.8,0.037,52,165,0.9911,3.29,0.39,12.2,7 +7,0.16,0.73,1,0.138,58,150,0.9936,3.08,0.3,9.2,5 +6.4,0.22,0.34,1.8,0.057,29,104,0.9959,3.81,0.57,10.3,6 +7.3,0.18,0.65,1.4,0.046,28,157,0.9946,3.33,0.62,9.4,6 +6.4,0.17,0.27,6.7,0.036,88,223,0.9948,3.28,0.35,10.2,6 +6.9,0.29,0.16,6.8,0.034,65,212,0.9955,3.08,0.39,9,6 +6.2,0.21,0.38,6.8,0.036,64,245,0.9951,3.06,0.36,9.3,6 +6.4,0.23,0.3,7.1,0.037,63,236,0.9952,3.06,0.34,9.2,6 +7.3,0.19,0.68,1.5,0.05,31,156,0.9946,3.32,0.64,9.4,6 +7.3,0.18,0.65,1.4,0.046,28,157,0.9946,3.33,0.62,9.4,6 +9.6,0.29,0.46,1.45,0.039,77.5,223,0.9944,2.92,0.46,9.5,6 +7.2,0.14,0.35,1.2,0.036,15,73,0.9938,3.46,0.39,9.9,5 +6.9,0.31,0.34,7.4,0.059,36,174,0.9963,3.46,0.62,11.1,7 +7.5,0.28,0.34,4.2,0.028,36,116,0.991,2.99,0.41,12.3,8 +8,0.22,0.42,14.6,0.044,45,163,1.0003,3.21,0.69,8.6,7 +7.6,0.31,0.29,10.5,0.04,21,145,0.9966,3.04,0.35,9.4,5 +8.4,0.35,0.56,13.8,0.048,55,190,0.9993,3.07,0.58,9.4,6 +8,0.22,0.42,14.6,0.044,45,163,1.0003,3.21,0.69,8.6,7 +8.1,0.5,0.47,1.1,0.037,23,126,0.9938,3.21,0.42,10.9,5 +7,0.39,0.31,5.3,0.169,32,162,0.9965,3.2,0.48,9.4,5 +8.1,0.5,0.47,1.1,0.037,23,126,0.9938,3.21,0.42,10.9,5 +8.4,0.35,0.56,13.8,0.048,55,190,0.9993,3.07,0.58,9.4,6 +6.2,0.22,0.27,1.5,0.064,20,132,0.9938,3.22,0.46,9.2,6 +8,0.22,0.42,14.6,0.044,45,163,1.0003,3.21,0.69,8.6,7 +7.6,0.31,0.29,10.5,0.04,21,145,0.9966,3.04,0.35,9.4,5 +7,0.24,0.36,4.9,0.083,10,133,0.9942,3.33,0.37,10.8,6 +6.6,0.27,0.3,1.9,0.025,14,153,0.9928,3.29,0.62,10.5,6 +7.8,0.16,0.41,1.7,0.026,29,140,0.991,3.02,0.78,12.5,6 +7.7,0.27,0.34,1.8,0.028,26,168,0.9911,2.99,0.48,12.1,7 +7.4,0.31,0.74,10.7,0.039,51,147,0.9977,3.02,0.43,8.7,5 +8,0.45,0.36,8.8,0.026,50,151,0.9927,3.07,0.25,12.7,8 +7.7,0.27,0.34,1.8,0.028,26,168,0.9911,2.99,0.48,12.1,7 +7.8,0.16,0.41,1.7,0.026,29,140,0.991,3.02,0.78,12.5,6 +6.6,0.16,0.29,1.8,0.05,40,147,0.9912,3.06,0.44,11.4,7 +8.3,0.21,0.4,1.6,0.032,35,110,0.9907,3.02,0.6,12.9,7 +7.2,0.32,0.33,1.4,0.029,29,109,0.9902,3.15,0.51,12.8,7 +6.6,0.16,0.3,1.6,0.034,15,78,0.992,3.38,0.44,11.2,6 +8.4,0.16,0.33,1.5,0.033,16,98,0.994,3.14,0.42,9.7,6 +7.5,0.23,0.32,9.2,0.038,54,191,0.9966,3.04,0.56,9.7,6 +6.2,0.17,0.3,1.1,0.037,14,79,0.993,3.5,0.54,10.3,6 +6.9,0.39,0.22,4.3,0.03,10,102,0.993,3,0.87,11.6,4 +6.9,0.41,0.22,4.2,0.031,10,102,0.993,3,0.86,11.6,4 +7.5,0.23,0.32,9.2,0.038,54,191,0.9966,3.04,0.56,9.7,6 +7.5,0.38,0.33,5,0.045,30,131,0.9942,3.32,0.44,10.9,6 +7.3,0.42,0.38,6.8,0.045,29,122,0.9925,3.19,0.37,12.6,7 +7.3,0.34,0.39,5.2,0.04,45,163,0.9925,3.3,0.47,12.4,6 +7.8,0.23,0.28,4.75,0.042,45,166,0.9928,2.96,0.4,11.5,5 +9,0.245,0.38,5.9,0.045,52,159,0.995,2.93,0.35,10.2,6 +6.9,0.2,0.4,7.7,0.032,51,176,0.9939,3.22,0.27,11.4,5 +7.4,0.19,0.42,6.4,0.067,39,212,0.9958,3.3,0.33,9.6,6 +8.2,0.2,0.36,8.1,0.035,60,163,0.9952,3.05,0.3,10.3,6 +8,0.59,0.71,17.35,0.038,61,228,1,2.95,0.75,9.3,5 +7.9,0.14,0.45,1.8,0.05,17,114,0.9948,3.33,0.49,10.7,7 +6.8,0.24,0.4,1.8,0.047,34,105,0.99,3.13,0.49,12.8,8 +9.7,0.14,0.59,1.5,0.049,23,142,0.9958,2.98,0.62,9.5,5 +9.2,0.15,0.68,1.6,0.046,22,130,0.9948,3.02,0.45,10.4,6 +9.4,0.17,0.55,1.6,0.049,14,94,0.9949,3.02,0.61,10.3,6 +5.2,0.365,0.08,13.5,0.041,37,142,0.997,3.46,0.39,9.9,6 +6.3,0.23,0.22,3.75,0.039,37,116,0.9927,3.23,0.5,10.7,6 +9.6,0.25,0.54,1.3,0.04,16,160,0.9938,2.94,0.43,10.5,5 +9.2,0.32,0.42,1.3,0.046,14,186,0.9949,3.08,0.48,9.6,5 +6.4,0.31,0.4,6.2,0.04,46,169,0.9953,3.15,0.46,9.3,6 +8.1,0.2,0.36,9.7,0.044,63,162,0.997,3.1,0.46,10,6 +7.9,0.255,0.26,2,0.026,40,190,0.9932,3.04,0.39,11.2,6 +7,0.15,0.34,1.4,0.039,21,177,0.9927,3.32,0.62,10.8,5 +6.4,0.15,0.31,1.1,0.044,25,96,0.9932,3.54,0.51,10.3,6 +6.4,0.25,0.53,6.6,0.038,59,234,0.9955,3.03,0.42,8.8,5 +7.6,0.19,0.42,1.5,0.044,6,114,0.9914,3.04,0.74,12.8,6 +7.3,0.43,0.37,4.6,0.028,17,114,0.991,3.23,0.43,13.2,6 +5.1,0.31,0.3,0.9,0.037,28,152,0.992,3.54,0.56,10.1,6 +6.2,0.2,0.26,1.7,0.093,40,161,0.9924,3.44,0.66,11,5 +6.9,0.16,0.35,1.3,0.043,21,182,0.9927,3.25,0.62,10.8,6 +7.7,0.32,0.48,2.3,0.04,28,114,0.9911,3.2,0.52,12.8,7 +6.5,0.22,0.72,6.8,0.042,33,168,0.9958,3.12,0.36,9.2,6 +6.8,0.26,0.33,1.5,0.047,44,167,0.9928,3.12,0.44,10.5,6 +5.2,0.37,0.33,1.2,0.028,13,81,0.9902,3.37,0.38,11.7,6 +8.4,0.19,0.43,2.1,0.052,20,104,0.994,2.85,0.46,9.5,5 +8.3,0.21,0.41,2.2,0.05,24,108,0.994,2.85,0.45,9.5,5 +6.8,0.15,0.32,8.8,0.058,24,110,0.9972,3.4,0.4,8.8,6 +7.9,0.16,0.64,17,0.05,69,210,1.0004,3.15,0.51,9.3,7 +7.8,0.21,0.39,1.8,0.034,62,180,0.991,3.09,0.75,12.6,8 +9,0.24,0.5,1.2,0.048,26,107,0.9918,3.21,0.34,12.4,6 +5.7,0.21,0.24,2.3,0.047,60,189,0.995,3.65,0.72,10.1,6 +7.8,0.29,0.36,7,0.042,38,161,0.9941,3.26,0.37,11.2,8 +6.7,0.18,0.3,6.4,0.048,40,251,0.9956,3.29,0.52,10,5 +6.7,0.18,0.3,6.4,0.048,40,251,0.9956,3.29,0.52,10,5 +8.4,0.58,0.27,12.15,0.033,37,116,0.9959,2.99,0.39,10.8,6 +7.2,0.16,0.32,0.8,0.04,50,121,0.9922,3.27,0.33,10,6 +7.6,0.54,0.23,2,0.029,13,151,0.9931,3.04,0.33,10.4,5 +8.4,0.58,0.27,12.15,0.033,37,116,0.9959,2.99,0.39,10.8,6 +6.6,0.25,0.31,12.4,0.059,52,181,0.9984,3.51,0.47,9.8,6 +7.3,0.23,0.37,1.9,0.041,51,165,0.9908,3.26,0.4,12.2,8 +7.3,0.39,0.37,1.1,0.043,36,113,0.991,3.39,0.48,12.7,8 +7,0.46,0.39,6.2,0.039,46,163,0.9928,3.21,0.35,12.2,7 +8.2,0.35,0.4,6.3,0.039,35,162,0.9936,3.15,0.34,11.9,7 +7.8,0.29,0.36,7,0.042,38,161,0.9941,3.26,0.37,11.2,8 +9.2,0.35,0.39,0.9,0.042,15,61,0.9924,2.96,0.28,10.4,4 +8,0.57,0.39,3.9,0.034,22,122,0.9917,3.29,0.67,12.8,7 +6.5,0.37,0.33,3.9,0.027,40,130,0.9906,3.28,0.39,12.7,7 +5.7,0.21,0.24,2.3,0.047,60,189,0.995,3.65,0.72,10.1,6 +6.7,0.18,0.3,6.4,0.048,40,251,0.9956,3.29,0.52,10,5 +7.8,0.13,0.3,1.8,0.04,43,179,0.9955,3.43,0.41,9,5 +7.6,0.19,0.41,1.1,0.04,38,143,0.9907,2.92,0.42,11.4,5 +7.3,0.22,0.41,15.4,0.05,55,191,1,3.32,0.59,8.9,6 +6.3,0.29,0.4,6.5,0.039,43,167,0.9953,3.15,0.44,9.3,6 +6.8,0.35,0.32,2.4,0.048,35,103,0.9911,3.28,0.46,12,8 +6.5,0.19,0.32,1.4,0.04,31,132,0.9922,3.36,0.54,10.8,7 +6.2,0.12,0.26,5.7,0.044,56,158,0.9951,3.52,0.37,10.5,6 +6,0.13,0.28,5.7,0.038,56,189.5,0.9948,3.59,0.43,10.6,7 +6.4,0.25,0.33,1.4,0.04,42,115,0.9906,3.19,0.48,11.3,7 +6.9,0.32,0.16,1.4,0.051,15,96,0.994,3.22,0.38,9.5,4 +7.6,0.19,0.41,1.1,0.04,38,143,0.9907,2.92,0.42,11.4,5 +6.7,0.13,0.28,1.2,0.046,35,140,0.9927,3.33,0.33,10.1,7 +7,0.14,0.41,0.9,0.037,22,95,0.9914,3.25,0.43,10.9,6 +7.6,0.27,0.24,3.8,0.058,19,115,0.9958,3.15,0.45,8.9,5 +7.3,0.22,0.41,15.4,0.05,55,191,1,3.32,0.59,8.9,6 +7.4,0.64,0.47,14.15,0.168,42,185,0.9984,2.9,0.49,9.3,5 +7.6,0.28,0.39,1.9,0.052,23,116,0.9941,3.25,0.4,10.4,6 +8.3,0.26,0.41,9.2,0.042,41,162,0.9944,3.1,0.38,12,7 +10.7,0.22,0.56,8.2,0.044,37,181,0.998,2.87,0.68,9.5,6 +10.7,0.22,0.56,8.2,0.044,37,181,0.998,2.87,0.68,9.5,6 +6.9,0.23,0.34,2.7,0.032,24,121,0.9902,3.14,0.38,12.4,7 +6.2,0.3,0.32,1.7,0.032,30,130,0.9911,3.28,0.41,11.2,7 +6.9,0.27,0.41,1.7,0.047,6,134,0.9929,3.15,0.69,11.4,6 +6.9,0.28,0.41,1.7,0.05,10,136,0.993,3.16,0.71,11.4,6 +6.9,0.28,0.3,1.6,0.047,46,132,0.9918,3.35,0.38,11.1,7 +6.9,0.46,0.2,0.9,0.054,5,126,0.992,3.1,0.42,10.4,6 +6.9,0.38,0.32,8.5,0.044,36,152,0.9932,3.38,0.35,12,7 +5.7,0.43,0.3,5.7,0.039,24,98,0.992,3.54,0.61,12.3,7 +6.6,0.56,0.16,3.1,0.045,28,92,0.994,3.12,0.35,9.1,6 +7.1,0.36,0.56,1.3,0.046,25,102,0.9923,3.24,0.33,10.5,6 +6.8,0.23,0.4,1.6,0.047,5,133,0.993,3.23,0.7,11.4,6 +6.2,0.33,0.29,1.3,0.042,26,138,0.9956,3.77,0.64,9.5,5 +5.6,0.49,0.13,4.5,0.039,17,116,0.9907,3.42,0.9,13.7,7 +6.6,0.42,0.33,2.8,0.034,15,85,0.99,3.28,0.51,13.4,6 +7.3,0.18,0.29,1.2,0.044,12,143,0.9918,3.2,0.48,11.3,7 +8.1,0.19,0.4,0.9,0.037,73,180,0.9926,3.06,0.34,10,6 +5.9,0.19,0.26,7.4,0.034,33,123,0.995,3.49,0.42,10.1,6 +6.2,0.16,0.47,1.4,0.029,23,81,0.99,3.26,0.42,12.2,6 +6.6,0.42,0.33,2.8,0.034,15,85,0.99,3.28,0.51,13.4,6 +5.7,0.135,0.3,4.6,0.042,19,101,0.9946,3.31,0.42,9.3,6 +5.6,0.49,0.13,4.5,0.039,17,116,0.9907,3.42,0.9,13.7,7 +6.9,0.19,0.33,1.6,0.039,27,98,0.9898,3.09,0.46,12.3,7 +7.3,0.18,0.29,1.2,0.044,12,143,0.9918,3.2,0.48,11.3,7 +7.3,0.25,0.36,13.1,0.05,35,200,0.9986,3.04,0.46,8.9,7 +7.3,0.25,0.36,13.1,0.05,35,200,0.9986,3.04,0.46,8.9,7 +7,0.2,0.34,5.7,0.035,32,83,0.9928,3.19,0.46,11.5,6 +7.3,0.25,0.36,13.1,0.05,35,200,0.9986,3.04,0.46,8.9,7 +6.3,0.67,0.48,12.6,0.052,57,222,0.9979,3.17,0.52,9.3,6 +7.4,0.4,0.29,5.4,0.044,31,122,0.994,3.3,0.5,11.1,8 +7.1,0.26,0.31,2.2,0.044,29,128,0.9937,3.34,0.64,10.9,8 +9,0.31,0.48,6.6,0.043,11,73,0.9938,2.9,0.38,11.6,5 +6.3,0.39,0.24,6.9,0.069,9,117,0.9942,3.15,0.35,10.2,4 +8.2,0.22,0.36,6.8,0.034,12,90,0.9944,3.01,0.38,10.5,8 +7.1,0.19,0.28,3.6,0.033,16,78,0.993,2.91,0.78,11.4,6 +7.3,0.25,0.36,13.1,0.05,35,200,0.9986,3.04,0.46,8.9,7 +7.9,0.2,0.34,1.2,0.04,29,118,0.9932,3.14,0.41,10.6,6 +7.1,0.26,0.32,5.9,0.037,39,97,0.9934,3.31,0.4,11.6,6 +7,0.2,0.34,5.7,0.035,32,83,0.9928,3.19,0.46,11.5,6 +6.9,0.3,0.33,4.1,0.035,26,155,0.9925,3.25,0.79,12.3,8 +8.1,0.29,0.49,7.1,0.042,22,124,0.9944,3.14,0.41,10.8,6 +5.8,0.17,0.3,1.4,0.037,55,130,0.9909,3.29,0.38,11.3,6 +5.9,0.415,0.02,0.8,0.038,22,63,0.9932,3.36,0.36,9.3,5 +6.6,0.23,0.26,1.3,0.045,16,128,0.9934,3.36,0.6,10,6 +8.6,0.55,0.35,15.55,0.057,35.5,366.5,1.0001,3.04,0.63,11,3 +6.9,0.35,0.74,1,0.044,18,132,0.992,3.13,0.55,10.2,5 +7.6,0.14,0.74,1.6,0.04,27,103,0.9916,3.07,0.4,10.8,7 +9.2,0.28,0.49,11.8,0.042,29,137,0.998,3.1,0.34,10.1,4 +6.2,0.18,0.49,4.5,0.047,17,90,0.9919,3.27,0.37,11.6,6 +5.3,0.165,0.24,1.1,0.051,25,105,0.9925,3.32,0.47,9.1,5 +9.8,0.25,0.74,10,0.056,36,225,0.9977,3.06,0.43,10,4 +8.1,0.29,0.49,7.1,0.042,22,124,0.9944,3.14,0.41,10.8,6 +6.8,0.22,0.49,0.9,0.052,26,128,0.991,3.25,0.35,11.4,6 +7.2,0.22,0.49,1,0.045,34,140,0.99,3.05,0.34,12.7,6 +7.4,0.25,0.49,1.1,0.042,35,156,0.9917,3.13,0.55,11.3,5 +8.2,0.18,0.49,1.1,0.033,28,81,0.9923,3,0.68,10.4,7 +6.1,0.22,0.49,1.5,0.051,18,87,0.9928,3.3,0.46,9.6,5 +7,0.39,0.24,1,0.048,8,119,0.9923,3,0.31,10.1,4 +6.1,0.22,0.49,1.5,0.051,18,87,0.9928,3.3,0.46,9.6,5 +6.5,0.36,0.49,2.9,0.03,16,94,0.9902,3.1,0.49,12.1,7 +7.1,0.29,0.49,1.2,0.031,32,99,0.9893,3.07,0.33,12.2,6 +7.4,0.25,0.49,1.1,0.042,35,156,0.9917,3.13,0.55,11.3,5 +6.9,0.23,0.24,14.2,0.053,19,94,0.9982,3.17,0.5,9.6,5 +8.5,0.56,0.74,17.85,0.051,51,243,1.0005,2.99,0.7,9.2,5 +8.2,0.18,0.49,1.1,0.033,28,81,0.9923,3,0.68,10.4,7 +6.3,0.23,0.49,7.1,0.05,67,210,0.9951,3.23,0.34,9.5,5 +6.1,0.25,0.49,7.6,0.052,67,226,0.9956,3.16,0.47,8.9,5 +7.2,0.26,0.74,13.6,0.05,56,162,0.998,3.03,0.44,8.8,5 +7.2,0.31,0.24,1.4,0.057,17,117,0.9928,3.16,0.35,10.5,5 +8,0.25,0.49,1.2,0.061,27,117,0.9938,3.08,0.34,9.4,5 +7,0.18,0.49,5.3,0.04,34,125,0.9914,3.24,0.4,12.2,6 +7.8,0.43,0.49,13,0.033,37,158,0.9955,3.14,0.35,11.3,6 +8.3,0.2,0.74,4.45,0.044,33,130,0.9924,3.25,0.42,12.2,6 +6.3,0.27,0.49,1.2,0.063,35,92,0.9911,3.38,0.42,12.2,6 +7.4,0.16,0.49,1.2,0.055,18,150,0.9917,3.23,0.47,11.2,6 +7.4,0.16,0.49,1.2,0.055,18,150,0.9917,3.23,0.47,11.2,6 +6.9,0.19,0.49,6.6,0.036,49,172,0.9932,3.2,0.27,11.5,6 +7.8,0.43,0.49,13,0.033,37,158,0.9955,3.14,0.35,11.3,6 +7.2,0.4,0.49,1.1,0.048,11,138,0.9929,3.01,0.42,9.3,5 +7.8,0.43,0.49,13,0.033,37,158,0.9955,3.14,0.35,11.3,6 +7.6,0.52,0.49,14,0.034,37,156,0.9958,3.14,0.38,11.8,7 +8.3,0.21,0.49,19.8,0.054,50,231,1.0012,2.99,0.54,9.2,5 +6.9,0.34,0.74,11.2,0.069,44,150,0.9968,3,0.81,9.2,5 +6.3,0.27,0.49,1.2,0.063,35,92,0.9911,3.38,0.42,12.2,6 +8.3,0.2,0.74,4.45,0.044,33,130,0.9924,3.25,0.42,12.2,6 +7.1,0.22,0.74,2.7,0.044,42,144,0.991,3.31,0.41,12.2,6 +7.9,0.11,0.49,4.5,0.048,27,133,0.9946,3.24,0.42,10.6,6 +8.5,0.17,0.74,3.6,0.05,29,128,0.9928,3.28,0.4,12.4,6 +6.4,0.145,0.49,5.4,0.048,54,164,0.9946,3.56,0.44,10.8,6 +7.4,0.16,0.49,1.2,0.055,18,150,0.9917,3.23,0.47,11.2,6 +8.3,0.19,0.49,1.2,0.051,11,137,0.9918,3.06,0.46,11,6 +8,0.44,0.49,9.1,0.031,46,151,0.9926,3.16,0.27,12.7,8 +7,0.2,0.74,0.8,0.044,19,163,0.9931,3.46,0.53,10.2,5 +6.9,0.19,0.49,6.6,0.036,49,172,0.9932,3.2,0.27,11.5,6 +7.1,0.25,0.49,3,0.03,30,96,0.9903,3.13,0.39,12.3,7 +6.5,0.24,0.24,1.6,0.046,15,60,0.9928,3.19,0.39,9.8,5 +7.2,0.4,0.49,1.1,0.048,11,138,0.9929,3.01,0.42,9.3,5 +7.6,0.52,0.49,14,0.034,37,156,0.9958,3.14,0.38,11.8,7 +7.8,0.43,0.49,13,0.033,37,158,0.9955,3.14,0.35,11.3,6 +7.8,0.21,0.49,1.35,0.052,6,48,0.9911,3.15,0.28,11.4,5 +7,0.2,0.49,5.9,0.038,39,128,0.9938,3.21,0.48,10.8,6 +6.9,0.25,0.24,3.6,0.057,13,85,0.9942,2.99,0.48,9.5,4 +7.2,0.08,0.49,1.3,0.05,18,148,0.9945,3.46,0.44,10.2,6 +7.1,0.85,0.49,8.7,0.028,40,184,0.9962,3.22,0.36,10.7,5 +7.6,0.51,0.24,1.2,0.04,10,104,0.992,3.05,0.29,10.8,6 +7.9,0.22,0.24,4.6,0.044,39,159,0.9927,2.99,0.28,11.5,6 +7.7,0.16,0.49,2,0.056,20,124,0.9948,3.32,0.49,10.7,6 +7.2,0.08,0.49,1.3,0.05,18,148,0.9945,3.46,0.44,10.2,6 +6.6,0.25,0.24,1.7,0.048,26,124,0.9942,3.37,0.6,10.1,6 +6.7,0.16,0.49,2.4,0.046,57,187,0.9952,3.62,0.81,10.4,6 +6.9,0.25,0.24,3.6,0.057,13,85,0.9942,2.99,0.48,9.5,4 +7.5,0.32,0.24,4.6,0.053,8,134,0.9958,3.14,0.5,9.1,3 +7.4,0.28,0.49,1.5,0.034,20,126,0.9918,2.98,0.39,10.6,6 +6.2,0.15,0.49,0.9,0.033,17,51,0.9932,3.3,0.7,9.4,6 +6.7,0.25,0.74,19.4,0.054,44,169,1.0004,3.51,0.45,9.8,6 +6.5,0.26,0.74,13.3,0.044,68,224,0.9972,3.18,0.54,9.5,6 +7.9,0.16,0.74,17.85,0.037,52,187,0.9998,2.99,0.41,9.3,5 +5.6,0.185,0.49,1.1,0.03,28,117,0.9918,3.55,0.45,10.3,6 +7.5,0.2,0.49,1.3,0.031,8,97,0.9918,3.06,0.62,11.1,5 +8,0.3,0.49,9.4,0.046,47,188,0.9964,3.14,0.48,10,5 +8,0.34,0.49,9,0.033,39,180,0.9936,3.13,0.38,12.3,8 +7.7,0.35,0.49,8.65,0.033,42,186,0.9931,3.14,0.38,12.4,8 +7.6,0.29,0.49,9.6,0.03,45,197,0.9938,3.13,0.38,12.3,7 +6.7,0.62,0.24,1.1,0.039,6,62,0.9934,3.41,0.32,10.4,5 +6.8,0.27,0.49,1.2,0.044,35,126,0.99,3.13,0.48,12.1,7 +7.7,0.27,0.49,1.8,0.041,23,86,0.9914,3.16,0.42,12.5,6 +6.7,0.51,0.24,2.1,0.043,14,155,0.9904,3.22,0.6,13,6 +7.4,0.19,0.49,9.3,0.03,26,132,0.994,2.99,0.32,11,7 +8.3,0.2,0.49,1.7,0.04,34,169,0.9938,3.05,0.37,10.1,5 +6.6,0.3,0.24,1.2,0.034,17,121,0.9933,3.13,0.36,9.2,5 +6.8,0.36,0.24,4.6,0.039,24,124,0.9909,3.27,0.34,12.6,7 +7,0.17,0.74,12.8,0.045,24,126,0.9942,3.26,0.38,12.2,8 +9.2,0.18,0.49,1.5,0.041,39,130,0.9945,3.04,0.49,9.8,7 +8.1,0.2,0.49,8.1,0.051,51,205,0.9954,3.1,0.52,11,6 +7.8,0.26,0.74,7.5,0.044,59,160,0.996,3.22,0.64,10,6 +6.8,0.21,0.49,14.5,0.06,50,170,0.9991,3.55,0.44,9.8,6 +7.9,0.2,0.49,1.6,0.053,15,144,0.993,3.16,0.47,10.5,5 +8,0.18,0.49,1.8,0.061,10,145,0.9942,3.23,0.48,10,5 +8.8,0.23,0.74,3.2,0.042,15,126,0.9934,3.02,0.51,11.2,6 +7.3,0.22,0.49,9.4,0.034,29,134,0.9939,2.99,0.32,11,7 +7.3,0.22,0.49,9.9,0.031,48,161,0.9937,3.01,0.28,11.2,6 +7.4,0.19,0.49,9.3,0.03,26,132,0.994,2.99,0.32,11,7 +7.3,0.155,0.49,1.3,0.039,34,136,0.9926,3.14,0.77,10.5,6 +8.2,0.22,0.49,9.6,0.037,53,154,0.9951,3.02,0.33,10.6,6 +8.2,0.24,0.49,9.3,0.038,52,163,0.9952,3.02,0.33,10.6,6 +8.4,0.23,0.49,7.8,0.035,22,95,0.9935,3.04,0.34,12,6 +8.3,0.2,0.49,1.7,0.04,34,169,0.9938,3.05,0.37,10.1,5 +8.3,0.2,0.49,1.7,0.038,38,167,0.9939,3.05,0.37,10.1,6 +6.6,0.3,0.24,1.2,0.034,17,121,0.9933,3.13,0.36,9.2,5 +6.9,0.21,0.49,1.4,0.041,15,164,0.9927,3.25,0.63,11,5 +8,0.25,0.49,9,0.044,31,185,0.998,3.34,0.49,10,6 +6.6,0.21,0.49,18.15,0.042,41,158,0.9997,3.28,0.39,8.7,6 +7.2,0.27,0.74,12.5,0.037,47,156,0.9981,3.04,0.44,8.7,5 +14.2,0.27,0.49,1.1,0.037,33,156,0.992,3.15,0.54,11.1,6 +7.9,0.28,0.49,7.7,0.045,48,195,0.9954,3.04,0.55,11,6 +7.4,0.27,0.49,1.1,0.037,33,156,0.992,3.15,0.54,11.1,6 +6.6,0.21,0.49,18.15,0.042,41,158,0.9997,3.28,0.39,8.7,6 +7.2,0.27,0.74,12.5,0.037,47,156,0.9981,3.04,0.44,8.7,5 +8.1,0.3,0.49,8.1,0.037,26,174,0.9943,3.1,0.3,11.2,7 +7.5,0.23,0.49,7.7,0.049,61,209,0.9941,3.14,0.3,11.1,7 +7.3,0.26,0.49,5,0.028,32,107,0.9936,3.24,0.54,10.8,6 +7.1,0.18,0.74,15.6,0.044,44,176,0.9996,3.38,0.67,9,6 +8.5,0.15,0.49,1.5,0.031,17,122,0.9932,3.03,0.4,10.3,6 +8.9,0.13,0.49,1,0.028,6,24,0.9926,2.91,0.32,9.9,5 +8.1,0.28,0.49,1,0.04,32,148,0.9936,3.13,0.41,10,6 +6,0.17,0.49,1,0.034,26,106,0.992,3.21,0.42,9.8,6 +7.3,0.26,0.49,5,0.028,32,107,0.9936,3.24,0.54,10.8,6 +7.1,0.18,0.74,15.6,0.044,44,176,0.9996,3.38,0.67,9,6 +7.1,0.53,0.24,0.8,0.029,29,86,0.993,3.16,0.32,9.1,4 +7.2,0.16,0.49,1.3,0.037,27,104,0.9924,3.23,0.57,10.6,6 +7.3,0.14,0.49,1.1,0.038,28,99,0.9928,3.2,0.72,10.6,6 +8.9,0.13,0.49,1,0.028,6,24,0.9926,2.91,0.32,9.9,5 +7.9,0.12,0.49,5.2,0.049,33,152,0.9952,3.18,0.47,10.6,6 +6.7,0.29,0.49,4.7,0.034,35,156,0.9945,3.13,0.45,9.9,6 +6.7,0.3,0.49,4.8,0.034,36,158,0.9945,3.12,0.45,9.9,6 +7.1,0.36,0.24,1.8,0.025,32,102,0.9903,3.34,0.59,12.8,6 +8.5,0.15,0.49,1.5,0.031,17,122,0.9932,3.03,0.4,10.3,6 +7.9,0.18,0.49,5.2,0.051,36,157,0.9953,3.18,0.48,10.6,6 +6.6,0.19,0.99,1.2,0.122,45,129,0.9936,3.09,0.31,8.7,6 +7.3,0.21,0.49,1.8,0.038,44,152,0.9912,3.32,0.44,12.6,7 +6.9,0.3,0.49,7.6,0.057,25,156,0.9962,3.43,0.63,11,7 +7.9,0.42,0.49,8.2,0.056,32,164,0.9965,3.29,0.6,11.2,7 +6.9,0.24,0.49,1.3,0.032,35,148,0.9932,3.45,0.57,10.7,7 +7.6,0.23,0.49,10,0.036,45,182,0.9967,3.08,0.58,9.6,6 +7.9,0.18,0.49,5.2,0.051,36,157,0.9953,3.18,0.48,10.6,6 +6.2,0.43,0.49,6.4,0.045,12,115,0.9963,3.27,0.57,9,4 +8.8,0.35,0.49,1,0.036,14,56,0.992,2.96,0.33,10.5,4 +7.8,0.3,0.74,1.8,0.033,33,156,0.991,3.29,0.52,12.8,6 +9.1,0.28,0.49,2,0.059,10,112,0.9958,3.15,0.46,10.1,5 +7.1,0.34,0.49,1.5,0.027,26,126,0.99,3.3,0.33,12.2,7 +7.8,0.3,0.74,1.8,0.033,33,156,0.991,3.29,0.52,12.8,6 +9.1,0.28,0.49,2,0.059,10,112,0.9958,3.15,0.46,10.1,5 +8.5,0.19,0.49,3.5,0.044,29,117,0.9938,3.14,0.51,10.1,6 +7.6,0.18,0.49,18.05,0.046,36,158,0.9996,3.06,0.41,9.2,5 +7.5,0.19,0.49,1.8,0.055,19,110,0.9946,3.33,0.44,9.9,5 +7.4,0.3,0.49,8.2,0.055,49,188,0.9974,3.52,0.58,9.7,6 +6.7,0.3,0.74,5,0.038,35,157,0.9945,3.21,0.46,9.9,5 +6.6,0.3,0.74,4.6,0.041,36,159,0.9946,3.21,0.45,9.9,5 +7.4,0.3,0.49,8.2,0.055,49,188,0.9974,3.52,0.58,9.7,6 +6.9,0.22,0.49,7,0.063,50,168,0.9957,3.54,0.5,10.3,6 +7.8,0.26,0.49,3.1,0.045,21,116,0.9931,3.16,0.35,10.3,5 +8.5,0.17,0.49,8.8,0.048,23,108,0.9947,2.88,0.34,10.5,4 +6.8,0.17,0.74,2.4,0.053,61,182,0.9953,3.63,0.76,10.5,6 +6.2,0.27,0.49,1.4,0.05,20,74,0.9931,3.32,0.44,9.8,6 +7.1,0.64,0.49,1.8,0.05,17,128,0.9946,3.31,0.58,10.6,4 +6.4,0.18,0.74,11.9,0.046,54,168,0.9978,3.58,0.68,10.1,5 +7.6,0.31,0.49,13.4,0.062,50,191,0.9989,3.22,0.53,9,4 +9.8,0.31,0.49,15.4,0.046,13,119,1.0004,3.18,0.45,9.5,5 +9,0.3,0.49,7.2,0.039,32,84,0.9938,2.94,0.32,11.5,6 +8.4,0.24,0.49,7.4,0.039,46,108,0.9934,3.03,0.33,11.9,7 +6.4,0.18,0.74,11.9,0.046,54,168,0.9978,3.58,0.68,10.1,5 +6.4,0.25,0.74,7.8,0.045,52,209,0.9956,3.21,0.42,9.2,6 +7.3,0.3,0.74,13.5,0.039,46,165,0.9982,3.02,0.4,8.7,5 +9.3,0.31,0.49,1.3,0.042,34,147,0.9948,3.11,0.46,9.8,5 +6.4,0.25,0.74,7.8,0.045,52,209,0.9956,3.21,0.42,9.2,6 +7.3,0.3,0.74,13.5,0.039,46,165,0.9982,3.02,0.4,8.7,5 +7,0.27,0.74,1.5,0.036,27,122,0.9926,3.35,0.48,11.2,6 +7.9,0.14,0.74,1.2,0.028,30,165,0.991,3.08,0.82,12.3,6 +6.4,0.12,0.49,6.4,0.042,49,161,0.9945,3.34,0.44,10.4,6 +6.8,0.21,0.74,1.2,0.047,25,111,0.9916,3.13,0.41,10.7,6 +8.6,0.16,0.49,7.3,0.043,9,63,0.9953,3.13,0.59,10.5,6 +7,0.29,0.49,3.8,0.047,37,136,0.9938,2.95,0.4,9.4,6 +6.4,0.27,0.49,7.3,0.046,53,206,0.9956,3.24,0.43,9.2,6 +6.6,0.55,0.01,2.7,0.034,56,122,0.9906,3.15,0.3,11.9,5 +6.4,0.27,0.49,7.3,0.046,53,206,0.9956,3.24,0.43,9.2,6 +6.3,0.24,0.74,1.4,0.172,24,108,0.9932,3.27,0.39,9.9,6 +6.7,0.33,0.49,1.6,0.167,20,94,0.9914,3.11,0.5,11.4,6 +7,0.29,0.49,3.8,0.047,37,136,0.9938,2.95,0.4,9.4,6 +8.2,0.34,0.49,8,0.046,55,223,0.996,3.08,0.52,10.7,6 +5.6,0.39,0.24,4.7,0.034,27,77,0.9906,3.28,0.36,12.7,5 +5.6,0.41,0.24,1.9,0.034,10,53,0.98815,3.32,0.5,13.5,7 +6.7,0.41,0.01,2.8,0.048,39,137,0.9942,3.24,0.35,9.5,5 +7.1,0.26,0.49,2.2,0.032,31,113,0.9903,3.37,0.42,12.9,9 +7.5,0.32,0.49,1.7,0.031,44,109,0.9906,3.07,0.46,12.5,6 +5.8,0.19,0.49,4.9,0.04,44,118,0.9935,3.34,0.38,9.5,7 +6.9,0.27,0.49,23.5,0.057,59,235,1.0024,2.98,0.47,8.6,5 +8.1,0.2,0.49,11.8,0.048,46,212,0.9968,3.09,0.46,10,7 +7.5,0.32,0.49,1.7,0.031,44,109,0.9906,3.07,0.46,12.5,6 +8.2,0.26,0.49,5.2,0.04,19,100,0.9941,3.12,0.34,10.1,6 +7.8,0.26,0.49,3.2,0.027,28,87,0.9919,3.03,0.32,11.3,7 +8,0.14,0.49,1.5,0.035,42,120,0.9928,3.26,0.4,10.6,7 +8,0.29,0.49,11.7,0.035,40,131,0.9958,3.14,0.34,10.8,5 +7.5,0.19,0.49,1.6,0.047,42,140,0.9932,3.4,0.47,10.7,6 +6.9,0.34,0.49,7.3,0.045,61,206,0.9957,3.09,0.4,9,6 +6.2,0.2,0.49,1.6,0.065,17,143,0.9937,3.22,0.52,9.2,6 +6.4,0.37,0.49,13.3,0.045,53,243,0.9982,3.14,0.48,8.5,6 +6.2,0.22,0.49,6,0.029,31,128,0.9928,3.41,0.36,11.3,8 +7.8,0.26,0.49,3.2,0.027,28,87,0.9919,3.03,0.32,11.3,7 +8.9,0.32,0.49,1.6,0.05,17,131,0.9956,3.13,0.34,9.4,5 +6.5,0.44,0.49,7.7,0.045,16,169,0.9957,3.11,0.37,8.7,6 +7,0.14,0.49,5.9,0.053,22,118,0.9954,3.36,0.36,9.4,6 +9,0.17,0.49,1,0.039,46,131,0.993,3.09,0.51,10.5,7 +6.4,0.26,0.49,6.4,0.037,37,161,0.9954,3.38,0.53,9.7,6 +9,0.22,0.49,10.4,0.048,52,195,0.9987,3.31,0.44,10.2,6 +8.9,0.32,0.49,1.6,0.05,17,131,0.9956,3.13,0.34,9.4,5 +8.2,0.2,0.49,3.5,0.057,14,108,0.9928,3.19,0.35,11.5,6 +7.8,0.15,0.24,7.7,0.047,21,98,0.9951,2.94,0.31,9.6,6 +6.9,0.25,0.24,1.8,0.053,6,121,0.993,3.23,0.7,11.4,5 +8.2,0.2,0.49,3.5,0.057,14,108,0.9928,3.19,0.35,11.5,6 +7.1,0.28,0.49,6.5,0.041,28,111,0.9926,3.41,0.58,12.2,8 +7.4,0.19,0.49,6.7,0.037,15,110,0.9938,3.2,0.38,11,7 +8.3,0.25,0.49,16.8,0.048,50,228,1.0001,3.03,0.52,9.2,6 +7.5,0.14,0.74,1.6,0.035,21,126,0.9933,3.26,0.45,10.2,6 +7.8,0.49,0.49,7,0.043,29,149,0.9952,3.21,0.33,10,5 +8.1,0.12,0.49,1.2,0.042,43,160,0.9934,3.13,0.48,9.7,6 +7.6,0.47,0.49,13,0.239,42,220,0.9988,2.96,0.51,9.2,5 +7.9,0.22,0.49,3.8,0.042,26,105,0.993,3.1,0.39,10.5,5 +7.8,0.49,0.49,7,0.043,29,149,0.9952,3.21,0.33,10,5 +6.4,0.22,0.49,7.5,0.054,42,151,0.9948,3.27,0.52,10.1,6 +7.3,0.19,0.49,15.55,0.058,50,134,0.9998,3.42,0.36,9.1,7 +8.1,0.3,0.49,12.3,0.049,50,144,0.9971,3.09,0.57,10.2,7 +7.3,0.19,0.49,15.55,0.058,50,134,0.9998,3.42,0.36,9.1,7 +7.5,0.24,0.49,9.4,0.048,50,149,0.9962,3.17,0.59,10.5,7 +6.4,0.22,0.49,7.5,0.054,42,151,0.9948,3.27,0.52,10.1,6 +7.8,0.21,0.49,1.2,0.036,20,99,0.99,3.05,0.28,12.1,7 +7.1,0.3,0.49,1.6,0.045,31,100,0.9942,3.4,0.59,10.2,5 +6.9,0.26,0.49,1.6,0.058,39,166,0.9965,3.65,0.52,9.4,4 +7.6,0.31,0.49,3.95,0.044,27,131,0.9912,3.08,0.67,12.8,7 +6.4,0.42,0.74,12.8,0.076,48,209,0.9978,3.12,0.58,9,6 +8.2,0.29,0.49,1,0.044,29,118,0.9928,3.24,0.36,10.9,4 +7.9,0.33,0.28,31.6,0.053,35,176,1.0103,3.15,0.38,8.8,6 +6.6,0.46,0.49,7.4,0.052,19,184,0.9956,3.11,0.38,9,5 +7.8,0.28,0.49,1.3,0.046,27,142,0.9936,3.09,0.59,10.2,5 +5.8,0.15,0.49,1.1,0.048,21,98,0.9929,3.19,0.48,9.2,5 +7.8,0.4,0.49,7.8,0.06,34,162,0.9966,3.26,0.58,11.3,6 +6.6,0.31,0.49,7.7,0.05,52,220,0.9964,3.12,0.45,8.8,5 +6.6,0.325,0.49,7.7,0.049,53,217,0.996,3.16,0.4,9.3,5 +6.6,0.27,0.49,7.8,0.049,62,217,0.9959,3.17,0.45,9.4,6 +6.7,0.26,0.49,8.3,0.047,54,191,0.9954,3.23,0.4,10.3,6 +6.7,0.21,0.49,1.4,0.047,30,114,0.9914,2.92,0.42,10.8,7 +7.9,0.33,0.28,31.6,0.053,35,176,1.0103,3.15,0.38,8.8,6 +8.1,0.28,0.46,15.4,0.059,32,177,1.0004,3.27,0.58,9,4 +6.5,0.13,0.37,1,0.036,48,114,0.9911,3.41,0.51,11.5,8 +7.8,0.445,0.56,1,0.04,8,84,0.9938,3.25,0.43,10.8,5 +8.8,0.39,0.34,5.9,0.055,33,128,0.9927,2.95,0.51,11.8,6 +7.9,0.18,0.33,1.2,0.033,20,72,0.9922,3.12,0.38,10.5,7 +7.1,0.31,0.38,1.2,0.036,10,124,0.9924,3.14,0.44,9.9,6 +7.8,0.24,0.18,6.7,0.046,33,160,0.9963,3.2,0.56,9.8,6 +7,0.35,0.3,6.5,0.028,27,87,0.9936,3.4,0.42,11.4,7 +6.6,0.26,0.31,4.8,0.138,41,168,0.9951,3.2,0.38,9.3,5 +6.6,0.27,0.31,5.3,0.137,35,163,0.9951,3.2,0.38,9.3,5 +6.8,0.22,0.29,8.9,0.046,82,188,0.9955,3.3,0.44,10.3,6 +6.2,0.27,0.32,8.8,0.047,65,224,0.9961,3.17,0.47,8.9,5 +7,0.35,0.3,6.5,0.028,27,87,0.9936,3.4,0.42,11.4,7 +7.3,0.23,0.37,1.8,0.032,60,156,0.992,3.11,0.35,11.1,6 +6.2,0.3,0.2,6.6,0.045,42,170,0.9944,3.36,0.45,10.4,6 +6.4,0.35,0.2,5.7,0.034,18,117,0.9944,3.33,0.43,10.1,5 +7.6,0.32,0.34,18.35,0.054,44,197,1.0008,3.22,0.55,9,5 +6.3,0.31,0.3,10,0.046,49,212,0.9962,3.74,0.55,11.9,6 +7.2,0.25,0.28,14.4,0.055,55,205,0.9986,3.12,0.38,9,7 +7.2,0.25,0.28,14.4,0.055,55,205,0.9986,3.12,0.38,9,7 +7.3,0.26,0.33,17.85,0.049,41.5,195,1,3.06,0.44,9.1,7 +7.2,0.25,0.28,14.4,0.055,55,205,0.9986,3.12,0.38,9,7 +7.4,0.26,0.37,9.4,0.047,42,147,0.9982,3.46,0.72,10,5 +7.3,0.26,0.33,17.85,0.049,41.5,195,1,3.06,0.44,9.1,7 +6.7,0.25,0.26,1.55,0.041,118.5,216,0.9949,3.55,0.63,9.4,3 +7.1,0.16,0.25,1.3,0.034,28,123,0.9915,3.27,0.55,11.4,6 +9,0.43,0.3,1.5,0.05,7,175,0.9951,3.11,0.45,9.7,4 +7.2,0.25,0.28,14.4,0.055,55,205,0.9986,3.12,0.38,9,7 +7,0.24,0.3,4.2,0.04,41,213,0.9927,3.28,0.49,11.8,6 +6.7,0.265,0.22,8.6,0.048,54,198,0.9955,3.25,0.41,10.2,5 +7.7,0.12,0.32,1.4,0.06,47,150,0.9952,3.37,0.42,9.2,6 +7.2,0.21,0.33,3,0.036,35,132,0.9928,3.25,0.4,11,6 +8.5,0.32,0.36,14.9,0.041,47,190,0.9982,3.08,0.31,10,6 +6.9,0.18,0.3,2,0.038,39,190,0.9914,3.32,0.37,12.2,6 +7,0.24,0.3,4.2,0.04,41,213,0.9927,3.28,0.49,11.8,6 +6.3,0.26,0.29,2.2,0.043,35,175,0.9918,3.38,0.43,11.6,6 +6.7,0.26,0.3,1.8,0.043,25,121,0.9944,3.44,0.61,10.2,6 +7.9,0.29,0.36,11.1,0.033,43,208,0.9969,3.14,0.46,10.3,5 +6.5,0.27,0.19,4.2,0.046,6,114,0.9955,3.25,0.35,8.6,4 +6.7,0.33,0.42,6.4,0.058,27,151,0.9954,3.16,0.44,9.6,5 +6.7,0.31,0.42,6.4,0.057,25,148,0.9955,3.16,0.45,9.6,5 +6.6,0.25,0.31,1.5,0.035,32,127,0.9921,3.41,0.47,11.3,6 +6.4,0.24,0.22,1.5,0.038,38,157,0.9934,3.41,0.55,9.9,6 +6.8,0.26,0.29,16.95,0.056,48,179,0.9998,3.45,0.4,9.6,5 +7,0.61,0.26,1.7,0.051,25,161,0.9946,3.36,0.6,10.6,4 +6.8,0.22,0.3,13.6,0.055,50,180,0.9984,3.44,0.39,9.8,5 +8.1,0.31,0.24,1.6,0.032,10,67,0.9924,3.08,0.47,10.5,5 +7,0.2,0.3,6.1,0.037,31,120,0.9939,3.24,0.51,10.8,5 +7.9,0.18,0.37,3,0.061,25,178,0.995,3.22,0.51,10,6 +6.6,0.34,0.27,6.2,0.059,23,136,0.9957,3.3,0.49,10.1,6 +6.8,0.3,0.24,6.6,0.123,35,116,0.9953,3.07,0.48,9.4,5 +6.5,0.18,0.34,1.6,0.04,43,148,0.9912,3.32,0.59,11.5,8 +7,0.21,0.31,6,0.046,29,108,0.9939,3.26,0.5,10.8,6 +6.8,0.27,0.32,1.5,0.044,19,142,0.9921,3.1,0.43,9.9,6 +9.3,0.2,0.33,1.7,0.05,28,178,0.9954,3.16,0.43,9,4 +5.8,0.23,0.27,1.8,0.043,24,69,0.9933,3.38,0.31,9.4,6 +7.6,0.2,0.39,2.6,0.044,30,180,0.9941,3.46,0.44,10.8,7 +8.2,0.15,0.48,2.7,0.052,24,190,0.995,3.5,0.45,10.9,7 +7.5,0.4,1,19.5,0.041,33,148,0.9977,3.24,0.38,12,6 +6.5,0.18,0.34,1.6,0.04,43,148,0.9912,3.32,0.59,11.5,8 +7,0.13,0.3,5,0.056,31,122,0.9945,3.47,0.42,10.5,6 +6.9,0.17,0.22,4.6,0.064,55,152,0.9952,3.29,0.37,9.3,6 +7,0.3,0.32,6.4,0.034,28,97,0.9924,3.23,0.44,11.8,6 +7.6,0.445,0.44,14.5,0.045,68,212,0.9986,3.48,0.36,10,6 +6.8,0.3,0.24,6.6,0.123,35,116,0.9953,3.07,0.48,9.4,5 +7.5,0.22,0.33,6.7,0.036,45,138,0.9939,3.2,0.68,11.4,6 +9.2,0.23,0.3,1.1,0.031,40,99,0.9929,2.94,0.3,10.4,6 +8.7,0.34,0.46,13.8,0.055,68,198,0.9988,3.36,0.37,9.5,6 +6.6,0.545,0.04,2.5,0.031,48,111,0.9906,3.14,0.32,11.9,5 +8.1,0.3,0.31,1.1,0.041,49,123,0.9914,2.99,0.45,11.1,6 +6.9,0.16,0.3,9.6,0.057,50,185,0.9978,3.39,0.38,9.6,6 +8,0.32,0.36,4.6,0.042,56,178,0.9928,3.29,0.47,12,6 +6.1,0.22,0.23,3.1,0.052,15,104,0.9948,3.14,0.42,8.7,5 +6.9,0.16,0.3,9.6,0.057,50,185,0.9978,3.39,0.38,9.6,6 +7.5,0.15,0.38,1.8,0.054,19,101,0.9946,3.24,0.44,10,5 +8.4,0.29,0.29,1.05,0.032,4,55,0.9908,2.91,0.32,11.4,4 +6.6,0.37,0.47,6.5,0.061,23,150,0.9954,3.14,0.45,9.6,6 +7.7,0.38,0.4,2,0.038,28,152,0.9906,3.18,0.32,12.9,6 +6.3,0.25,0.23,14.9,0.039,47,142,0.99705,3.14,0.35,9.7,6 +8.3,0.3,0.36,10,0.042,33,169,0.9982,3.23,0.51,9.3,6 +6.6,0.22,0.58,1.1,0.133,52,136,0.9932,3.1,0.3,9.1,5 +6.1,0.34,0.31,12,0.053,46,238,0.9977,3.16,0.48,8.6,5 +7.5,0.22,0.29,4.8,0.05,33,87,0.994,3.14,0.42,9.9,5 +8.3,0.3,0.36,10,0.042,33,169,0.9982,3.23,0.51,9.3,6 +8,0.27,0.24,1.2,0.044,20,102,0.9929,3.28,0.42,10.9,5 +6.1,0.17,0.27,1.5,0.056,45,135,0.9924,3.2,0.43,10.2,6 +7.4,0.18,0.3,10.4,0.045,44,174,0.9966,3.11,0.57,9.7,6 +6.7,0.16,0.28,2.5,0.046,40,153,0.9921,3.38,0.51,11.4,7 +6.1,0.255,0.44,12.3,0.045,53,197,0.9967,3.24,0.54,9.5,6 +7.4,0.23,0.25,1.4,0.049,43,141,0.9934,3.42,0.54,10.2,7 +6.4,0.16,0.28,2.2,0.042,33,93,0.9914,3.31,0.43,11.1,6 +6.3,0.25,0.23,14.9,0.039,47,142,0.99705,3.14,0.35,9.7,6 +6.7,0.27,0.25,8,0.053,54,202,0.9961,3.22,0.43,9.3,5 +6.9,0.29,0.23,8.6,0.056,56,215,0.9967,3.17,0.44,8.8,5 +9.6,0.21,0.28,1.2,0.038,12,53,0.9926,2.8,0.46,10.6,5 +6.6,0.62,0.2,8.7,0.046,81,224,0.99605,3.17,0.44,9.3,5 +6.4,0.28,0.19,5.4,0.042,67,181,0.99435,3.31,0.35,10.2,6 +8,0.3,0.28,5.7,0.044,31,124,0.9948,3.16,0.51,10.2,6 +6.4,0.17,0.27,1.5,0.037,20,98,0.9916,3.46,0.42,11,7 +7.3,0.21,0.3,10.9,0.037,18,112,0.997,3.4,0.5,9.6,6 +6.7,0.27,0.25,8,0.053,54,202,0.9961,3.22,0.43,9.3,5 +6.9,0.29,0.23,8.6,0.056,56,215,0.9967,3.17,0.44,8.8,5 +6.6,0.32,0.26,7.7,0.054,56,209,0.9961,3.17,0.45,8.8,5 +7.4,0.32,0.22,1.7,0.051,50,179,0.9955,3.28,0.69,8.9,5 +6.6,0.37,0.07,1.4,0.048,58,144,0.9922,3.17,0.38,10,5 +7.7,0.43,0.28,4.5,0.046,33,102,0.9918,3.16,0.56,12.2,7 +7.8,0.39,0.26,9.9,0.059,33,181,0.9955,3.04,0.42,10.9,6 +6.5,0.18,0.26,1.4,0.041,40,141,0.9941,3.34,0.72,9.5,6 +7.8,0.4,0.26,9.5,0.059,32,178,0.9955,3.04,0.43,10.9,6 +7.8,0.39,0.26,9.9,0.059,33,181,0.9955,3.04,0.42,10.9,6 +6.9,0.19,0.28,3,0.054,33,99,0.9924,3.16,0.4,10.8,6 +7.7,0.49,1,19.6,0.03,28,135,0.9973,3.24,0.4,12,6 +6.6,0.25,0.35,14,0.069,42,163,0.999,3.56,0.47,9.8,5 +6.5,0.18,0.26,1.4,0.041,40,141,0.9941,3.34,0.72,9.5,6 +6.4,0.15,0.36,1.8,0.034,43,150,0.9922,3.42,0.69,11,8 +6.4,0.15,0.36,1.8,0.034,43,150,0.9922,3.42,0.69,11,8 +8.4,0.17,0.31,5.4,0.052,47,150,0.9953,3.24,0.38,9.8,5 +6.1,0.32,0.37,1.8,0.051,13,200,0.9945,3.49,0.44,10.5,4 +8.5,0.21,0.26,9.25,0.034,73,142,0.9945,3.05,0.37,11.4,6 +8.7,0.45,0.4,1.5,0.067,17,100,0.9957,3.27,0.57,10.1,6 +6.7,0.24,0.29,6.8,0.038,54,127,0.9932,3.33,0.46,11.6,7 +8.5,0.21,0.26,9.25,0.034,73,142,0.9945,3.05,0.37,11.4,6 +7.4,0.33,0.26,2.6,0.04,29,115,0.9913,3.07,0.52,11.8,7 +7.2,0.26,0.3,2.1,0.033,50,158,0.9909,3.33,0.43,12.1,7 +8.2,0.36,0.29,7.6,0.035,37,122,0.9939,3.16,0.34,12,5 +7.8,0.2,0.24,1.6,0.026,26,189,0.991,3.08,0.74,12.1,7 +9.4,0.16,0.3,1.4,0.042,26,176,0.9954,3.15,0.46,9.1,5 +6.4,0.33,0.24,1.6,0.054,25,117,0.9943,3.36,0.5,9.3,5 +7.8,0.22,0.36,1.4,0.056,21,153,0.993,3.2,0.53,10.4,6 +7.4,0.35,0.31,17.95,0.062,42,187,1.0002,3.27,0.64,9.1,5 +6.6,0.37,0.24,2,0.064,23,120,0.9946,3.32,0.54,9.4,5 +6.7,0.37,0.41,6.3,0.061,22,149,0.9953,3.16,0.47,9.6,6 +7.1,0.37,0.32,1.4,0.037,27,126,0.9918,3.19,0.62,12,5 +6.9,0.25,0.27,9.05,0.039,37,128,0.9936,3.27,0.34,11.3,8 +6.8,0.23,0.29,15.4,0.073,56,173,0.9984,3.06,0.41,8.7,6 +6.4,0.26,0.21,7.1,0.04,35,162,0.9956,3.39,0.58,9.9,6 +7.6,0.3,0.22,10.2,0.049,57,191,0.9966,3.08,0.4,9.3,6 +9.4,0.16,0.23,1.6,0.042,14,67,0.9942,3.07,0.32,9.5,5 +6.8,0.23,0.29,15.4,0.073,56,173,0.9984,3.06,0.41,8.7,6 +6.4,0.26,0.21,7.1,0.04,35,162,0.9956,3.39,0.58,9.9,6 +7.6,0.3,0.22,10.2,0.049,57,191,0.9966,3.08,0.4,9.3,6 +7.5,0.33,0.39,12.4,0.065,29,119,0.9974,3.16,0.39,9.4,5 +7.6,0.38,0.2,3.4,0.046,9,116,0.9944,3.15,0.41,9.4,5 +8.8,0.2,0.43,15,0.053,60,184,1.0008,3.28,0.79,8.8,6 +7.5,0.33,0.39,12.4,0.065,29,119,0.9974,3.16,0.39,9.4,5 +8.8,0.2,0.43,15,0.053,60,184,1.0008,3.28,0.79,8.8,6 +6.6,0.36,0.21,1.5,0.049,39,184,0.9928,3.18,0.41,9.9,6 +7.6,0.38,0.2,3.4,0.046,9,116,0.9944,3.15,0.41,9.4,5 +5.6,0.46,0.24,4.8,0.042,24,72,0.9908,3.29,0.37,12.6,6 +7.2,0.15,0.38,1.2,0.038,18,110,0.9917,3.19,0.43,11.1,6 +8.2,0.42,0.29,4.1,0.03,31,100,0.9911,3,0.32,12.8,7 +6.8,0.3,0.35,2.8,0.038,10,164,0.9912,3.09,0.53,12,6 +6.7,0.27,0.3,13.9,0.029,34,131,0.9953,3.36,0.5,12,7 +7.2,0.5,0,0.8,0.034,46,114,0.9932,3.19,0.34,9.2,4 +6,0.26,0.29,1,0.032,27,96,0.9896,3.38,0.44,12.3,6 +6.8,0.33,0.28,1.2,0.032,38,131,0.9889,3.19,0.41,13,6 +6.8,0.3,0.35,2.8,0.038,10,164,0.9912,3.09,0.53,12,6 +7.4,0.29,0.31,1.7,0.035,23,110,0.9926,3.07,0.38,10.9,5 +8.2,0.42,0.29,4.1,0.03,31,100,0.9911,3,0.32,12.8,7 +7.3,0.19,0.24,6.3,0.054,34,231,0.9964,3.36,0.54,10,6 +6.5,0.32,0.12,11.5,0.033,35,165,0.9974,3.22,0.32,9,5 +7.1,0.32,0.4,1.5,0.034,13,84,0.9944,3.42,0.6,10.4,5 +6.5,0.32,0.12,11.5,0.033,35,165,0.9974,3.22,0.32,9,5 +7.3,0.19,0.24,6.3,0.054,34,231,0.9964,3.36,0.54,10,6 +7.3,0.17,0.23,6.3,0.051,35,240,0.9963,3.36,0.54,10,6 +7.7,0.44,0.24,11.2,0.031,41,167,0.9948,3.12,0.43,11.3,7 +7.7,0.44,0.24,11.2,0.031,41,167,0.9948,3.12,0.43,11.3,7 +7.4,0.49,0.24,15.1,0.03,34,153,0.9953,3.13,0.51,12,7 +7.7,0.44,0.24,11.2,0.031,41,167,0.9948,3.12,0.43,11.3,7 +7.4,0.49,0.24,15.1,0.03,34,153,0.9953,3.13,0.51,12,7 +6.4,0.21,0.3,5.6,0.044,43,160,0.9949,3.6,0.41,10.6,6 +8,0.55,0.42,12.6,0.211,37,213,0.9988,2.99,0.56,9.3,5 +7,0.19,0.23,5.7,0.123,27,104,0.9954,3.04,0.54,9.4,6 +7.2,0.24,0.29,2.2,0.037,37,102,0.992,3.27,0.64,11,7 +6.5,0.34,0.36,11,0.052,53,247,0.9984,3.44,0.55,9.3,6 +7,0.19,0.23,5.7,0.123,27,104,0.9954,3.04,0.54,9.4,6 +6.9,0.18,0.33,1,0.054,24,164,0.9926,3.42,0.51,10.5,5 +7.2,0.24,0.29,2.2,0.037,37,102,0.992,3.27,0.64,11,7 +8.2,0.18,0.31,11.8,0.039,96,249,0.9976,3.07,0.52,9.5,6 +8.3,0.28,0.45,7.8,0.059,32,139,0.9972,3.33,0.77,11.2,6 +6.1,0.34,0.46,4.7,0.029,21,94,0.991,3.29,0.62,12.3,6 +7.4,0.44,0.2,11.5,0.049,44,157,0.998,3.27,0.44,9,5 +7.6,0.26,0.58,7.9,0.041,62,180,0.9966,3.07,0.38,9,5 +7.4,0.44,0.2,11.5,0.049,44,157,0.998,3.27,0.44,9,5 +8.7,0.49,0.57,17.8,0.052,34,243,1.0007,2.98,0.82,9,5 +7,0.24,0.25,1.7,0.042,48,189,0.992,3.25,0.42,11.4,6 +7.1,0.25,0.25,1.6,0.046,50,181,0.9925,3.2,0.42,11,7 +6.1,0.34,0.46,4.7,0.029,21,94,0.991,3.29,0.62,12.3,6 +6.4,0.18,0.31,1.6,0.049,36,127,0.9934,3.6,0.67,10.4,7 +8.3,0.27,0.39,2.4,0.058,16,107,0.9955,3.28,0.59,10.3,5 +6.8,0.24,0.35,6.4,0.048,44,172,0.9944,3.29,0.55,10.5,7 +8,0.22,0.28,14,0.053,83,197,0.9981,3.14,0.45,9.8,6 +10,0.91,0.42,1.6,0.056,34,181,0.9968,3.11,0.46,10,4 +8.9,0.34,0.34,1.6,0.056,13,176,0.9946,3.14,0.47,9.7,5 +8.9,0.33,0.34,1.4,0.056,14,171,0.9946,3.13,0.47,9.7,5 +8,0.22,0.28,14,0.053,83,197,0.9981,3.14,0.45,9.8,6 +6.7,0.18,0.19,4.7,0.046,57,161,0.9946,3.32,0.66,10.5,6 +7.8,0.2,0.28,10.2,0.054,78,186,0.997,3.14,0.46,10,6 +7.3,0.13,0.31,2.3,0.054,22,104,0.9924,3.24,0.92,11.5,7 +6.6,0.28,0.3,7.8,0.049,57,202,0.9958,3.24,0.39,9.5,5 +7.1,0.25,0.3,2.4,0.042,25,122,0.994,3.43,0.61,10.5,6 +7.6,0.36,0.44,8.3,0.255,28,142,0.9958,3.12,0.43,10.2,6 +7.6,0.27,0.25,13.9,0.05,45,199,0.9984,3.34,0.5,9.8,6 +6.9,0.37,0.28,13.8,0.031,34,137,0.9948,3.1,0.37,11.6,6 +7.4,0.21,0.27,7.3,0.031,41,144,0.9932,3.15,0.38,11.8,7 +8.2,0.18,0.28,8.5,0.035,41,140,0.9952,3.04,0.37,10.1,7 +6.3,0.19,0.21,1.8,0.049,35,163,0.9924,3.31,0.5,10.3,6 +7,0.21,0.22,5.1,0.048,38,168,0.9945,3.34,0.49,10.4,6 +5.8,0.33,0.2,16.05,0.047,26,166,0.9976,3.09,0.46,8.9,5 +5.8,0.33,0.2,16.05,0.047,26,166,0.9976,3.09,0.46,8.9,5 +7.9,0.29,0.31,7.35,0.034,37,154,0.9938,3.06,0.31,10.8,5 +6.6,0.31,0.38,16.05,0.058,16,165,0.9997,3.38,0.6,9.2,5 +8,0.19,0.3,2,0.053,48,140,0.994,3.18,0.49,9.6,6 +8,0.2,0.36,1.2,0.032,21,78,0.9921,3.08,0.37,10.4,6 +8,0.25,0.26,14,0.043,41,248,0.9986,3.03,0.57,8.7,6 +7.2,0.2,0.61,16.2,0.043,14,103,0.9987,3.06,0.36,9.2,6 +7.7,0.3,0.42,14.3,0.045,45,213,0.9991,3.18,0.63,9.2,5 +7.2,0.2,0.61,16.2,0.043,14,103,0.9987,3.06,0.36,9.2,6 +7.7,0.3,0.42,14.3,0.045,45,213,0.9991,3.18,0.63,9.2,5 +7.7,0.3,0.42,14.3,0.045,45,213,0.9991,3.18,0.63,9.2,5 +6.4,0.22,0.32,7.9,0.029,34,124,0.9948,3.4,0.39,10.2,5 +7.2,0.2,0.61,16.2,0.043,14,103,0.9987,3.06,0.36,9.2,6 +7,0.53,0.02,1,0.036,39,107,0.993,3.2,0.32,9,5 +7.3,0.24,0.41,13.6,0.05,41,178,0.9988,3.37,0.43,9.7,5 +7.2,0.24,0.4,17.85,0.049,50,185,1,3.34,0.42,9.6,5 +7.6,0.15,0.4,1.3,0.036,24,112,0.9932,3.14,0.76,10,5 +7.7,0.3,0.42,14.3,0.045,45,213,0.9991,3.18,0.63,9.2,5 +7.6,0.33,0.41,13.7,0.045,44,197,0.9989,3.18,0.64,9.1,5 +6.8,0.24,0.31,18.3,0.046,40,142,1,3.3,0.41,8.7,5 +6.8,0.24,0.31,18.3,0.046,40,142,1,3.3,0.41,8.7,5 +6.8,0.35,0.44,6.5,0.056,31,161,0.9952,3.14,0.44,9.5,5 +7.9,0.26,0.33,10.3,0.039,73,212,0.9969,2.93,0.49,9.5,6 +7.5,0.29,0.67,8.1,0.037,53,166,0.9966,2.9,0.41,8.9,6 +7.5,0.29,0.67,8.1,0.037,53,166,0.9966,2.9,0.41,8.9,6 +7.2,0.31,0.41,8.6,0.053,15,89,0.9976,3.29,0.64,9.9,6 +6.7,0.44,0.31,1.9,0.03,41,104,0.99,3.29,0.62,12.6,7 +10,0.23,0.27,14.1,0.033,45,166,0.9988,2.72,0.43,9.7,6 +7.4,0.21,0.3,7.9,0.039,14,118,0.9942,2.96,0.34,10.4,5 +8.8,0.23,0.35,10.7,0.04,26,183,0.9984,2.93,0.49,9.1,6 +7.8,0.34,0.27,1.2,0.04,25,106,0.9932,3.01,0.55,10.4,5 +7.9,0.26,0.33,10.3,0.039,73,212,0.9969,2.93,0.49,9.5,6 +7.5,0.29,0.67,8.1,0.037,53,166,0.9966,2.9,0.41,8.9,6 +6,0.28,0.35,1.9,0.037,16,120,0.9933,3.16,0.69,10.6,5 +7.9,0.37,0.3,2.7,0.029,64,158,0.9916,3.12,0.59,12,7 +7.2,0.36,0.36,5.7,0.038,26,98,0.9914,2.93,0.59,12.5,7 +7.6,0.13,0.34,9.3,0.062,40,126,0.9966,3.21,0.39,9.6,5 +6.6,0.25,0.36,8.1,0.045,54,180,0.9958,3.08,0.42,9.2,5 +7.1,0.18,0.26,1.3,0.041,20,71,0.9926,3.04,0.74,9.9,6 +7.9,0.3,0.27,8.5,0.036,20,112,0.9939,2.96,0.46,11.7,6 +8.3,0.23,0.3,2.1,0.049,21,153,0.9953,3.09,0.5,9.6,6 +6.8,0.43,0.3,3.5,0.033,27,135,0.9906,3,0.37,12,6 +7.2,0.36,0.36,5.7,0.038,26,98,0.9914,2.93,0.59,12.5,7 +6.6,0.25,0.36,8.1,0.045,54,180,0.9958,3.08,0.42,9.2,5 +7.1,0.18,0.26,1.3,0.041,20,71,0.9926,3.04,0.74,9.9,6 +6.6,0.35,0.29,14.4,0.044,54,177,0.9991,3.17,0.58,8.9,6 +7.3,0.22,0.5,13.7,0.049,56,189,0.9994,3.24,0.66,9,6 +8.1,0.26,0.33,11.1,0.052,52.5,158,0.9976,3.03,0.49,10.2,7 +7.6,0.13,0.34,9.3,0.062,40,126,0.9966,3.21,0.39,9.6,5 +7,0.12,0.19,4.9,0.055,27,127,0.9953,3.29,0.41,9.4,5 +8.2,0.37,0.27,1.7,0.028,10,59,0.9923,2.97,0.48,10.4,5 +7.6,0.26,0.36,1.6,0.032,6,106,0.993,3.15,0.4,10.4,4 +6.3,0.2,0.58,1.4,0.204,15,97,0.9931,3.16,0.43,10,6 +6.3,0.22,0.57,1.4,0.208,14,96,0.9932,3.16,0.43,10,6 +7.1,0.25,0.28,1.6,0.052,46,169,0.9926,3.05,0.41,10.5,5 +7,0.27,0.32,6.8,0.047,47,193,0.9938,3.23,0.39,11.4,6 +8.8,0.34,0.33,9.7,0.036,46,172,0.9966,3.08,0.4,10.2,5 +9.2,0.27,0.34,10.5,0.043,49,228,0.9974,3.04,0.41,10.4,6 +7.1,0.49,0.22,2,0.047,146.5,307.5,0.9924,3.24,0.37,11,3 +9.2,0.71,0.23,6.2,0.042,15,93,0.9948,2.89,0.34,10.1,6 +7.2,0.47,0.65,8.3,0.083,27,182,0.9964,3,0.35,9.2,5 +6.8,0.28,0.36,1.6,0.04,25,87,0.9924,3.23,0.66,10.3,6 +8.8,0.34,0.33,9.7,0.036,46,172,0.9966,3.08,0.4,10.2,5 +9.2,0.27,0.34,10.5,0.043,49,228,0.9974,3.04,0.41,10.4,6 +7.3,0.13,0.27,4.6,0.08,34,172,0.9938,3.23,0.39,11.1,7 +7.2,0.16,0.35,1.2,0.031,27,84,0.9928,3.33,0.34,9.9,5 +6.8,0.31,0.32,7.6,0.052,35,143,0.9959,3.14,0.38,9,5 +8.3,0.36,0.57,15,0.052,35,256,1.0001,2.93,0.64,8.6,5 +6.8,0.31,0.32,7.6,0.052,35,143,0.9959,3.14,0.38,9,5 +8.3,0.36,0.57,15,0.052,35,256,1.0001,2.93,0.64,8.6,5 +6.3,0.25,0.44,11.6,0.041,48,195,0.9968,3.18,0.52,9.5,5 +6,0.45,0.42,1.1,0.051,61,197,0.9932,3.02,0.4,9,5 +8.1,0.26,0.3,7.8,0.049,39,152,0.9954,2.99,0.58,10,6 +6.4,0.22,0.32,12,0.066,57,158,0.9992,3.6,0.43,9,6 +5.7,0.45,0.42,1.1,0.051,61,197,0.9932,3.02,0.4,9,5 +7.2,0.19,0.31,1.4,0.046,37,135,0.9939,3.34,0.57,10.2,7 +6.7,0.31,0.44,6.7,0.054,29,160,0.9952,3.04,0.44,9.6,5 +8,0.25,0.13,17.2,0.036,49,219,0.9996,2.96,0.46,9.7,5 +9.9,1.005,0.46,1.4,0.046,34,185,0.9966,3.02,0.49,10.2,4 +8.1,0.31,0.36,8.2,0.028,29,142,0.9925,3.01,0.34,13,7 +8.1,0.24,0.38,4.3,0.044,49,172,0.996,3.37,0.74,10.8,6 +8,0.25,0.13,17.2,0.036,49,219,0.9996,2.96,0.46,9.7,5 +6.4,0.29,0.28,11.1,0.063,66,169,0.9973,2.89,0.57,9,5 +7.2,0.15,0.33,1.1,0.027,16,63,0.9937,3.37,0.4,9.9,5 +7,0.12,0.32,7.2,0.058,22,89,0.9966,3.29,0.38,9.2,6 +7.4,0.32,0.55,16.6,0.056,53,238,1.0017,2.96,0.58,8.7,6 +8.5,0.17,0.31,1,0.024,13,91,0.993,2.79,0.37,10.1,5 +8.5,0.17,0.31,1,0.024,13,91,0.993,2.79,0.37,10.1,5 +9.5,0.21,0.47,1.3,0.039,21,123,0.9959,2.9,0.64,9.5,5 +8.2,0.21,0.48,1.4,0.041,11,99,0.9958,3.17,0.57,9.9,5 +7.4,0.32,0.55,16.6,0.056,53,238,1.0017,2.96,0.58,8.7,6 +6.8,0.31,0.42,6.9,0.046,50,173,0.9958,3.19,0.46,9,5 +6.8,0.27,0.28,13.3,0.076,50,163,0.9979,3.03,0.38,8.6,6 +7.4,0.21,0.3,8.1,0.047,13,114,0.9941,3.12,0.35,10.5,6 +8,0.23,0.35,9.2,0.044,53,186,0.997,3.09,0.56,9.5,7 +7.6,0.2,0.31,1.4,0.047,41,142,0.9934,3.43,0.53,10.1,6 +6.3,0.41,0.3,3.2,0.03,49,164,0.9927,3.53,0.79,11.7,7 +8.3,0.49,0.43,2.5,0.036,32,116,0.9944,3.23,0.47,10.7,6 +6.3,0.41,0.3,3.2,0.03,49,164,0.9927,3.53,0.79,11.7,7 +7.6,0.2,0.26,4.5,0.086,37,133,0.9963,3.15,0.42,9.2,5 +7.5,0.26,0.26,18.35,0.084,33,139,1.0011,3.17,0.39,8.8,5 +7.5,0.26,0.26,18.35,0.084,33,139,1.0011,3.17,0.39,8.8,5 +6.8,0.27,0.35,7.8,0.048,76,197,0.9959,3.24,0.43,9.5,6 +6.8,0.28,0.37,7,0.057,35,208,0.9973,3.57,0.55,10.2,5 +8.4,0.2,0.27,6.3,0.048,30,143,0.9966,3.25,0.5,9.1,6 +7.9,0.33,0.26,1.2,0.044,23,103,0.9932,3.19,0.54,10.5,6 +7.5,0.38,0.5,12.8,0.042,57,184,0.9984,3.09,0.46,9,6 +7.6,0.2,0.3,14.2,0.056,53,212.5,0.999,3.14,0.46,8.9,8 +7.6,0.2,0.3,14.2,0.056,53,212.5,0.999,3.14,0.46,8.9,8 +7.6,0.2,0.3,14.2,0.056,53,212.5,0.999,3.14,0.46,8.9,8 +7.6,0.2,0.3,14.2,0.056,53,212.5,0.999,3.14,0.46,8.9,8 +7.6,0.2,0.3,14.2,0.056,53,212.5,0.999,3.14,0.46,8.9,8 +8.1,0.19,0.58,16.65,0.049,48,181,1.0006,3.2,0.62,9.1,6 +7.6,0.16,0.41,1.9,0.047,27,151,0.9937,3.2,0.53,10.1,6 +8.1,0.22,0.28,7.7,0.043,57,176,0.9954,3.12,0.55,10,5 +8,0.22,0.32,10.4,0.043,63,201,0.997,3.11,0.53,9.5,6 +7.1,0.33,0.3,3.3,0.034,30,102,0.9912,3.08,0.31,12.3,7 +6.4,0.43,0.27,1.1,0.054,5,110,0.9939,3.24,0.52,9.1,4 +7.6,0.2,0.3,14.2,0.056,53,212.5,0.999,3.14,0.46,8.9,8 +7,0.12,0.28,6.3,0.057,17,103,0.9957,3.5,0.44,9.6,5 +7.4,0.3,0.22,5.25,0.053,33,180,0.9926,3.13,0.45,11.6,6 +7,0.28,0.33,14.6,0.043,47,168,0.9994,3.34,0.67,8.8,6 +8.4,0.2,0.38,11.8,0.055,51,170,1.0004,3.34,0.82,8.9,6 +7,0.28,0.33,14.6,0.043,47,168,0.9994,3.34,0.67,8.8,6 +8.4,0.2,0.38,11.8,0.055,51,170,1.0004,3.34,0.82,8.9,6 +8.4,0.2,0.38,11.8,0.055,51,170,1.0004,3.34,0.82,8.9,6 +7.3,0.18,0.31,17.3,0.055,32,197,1.0002,3.13,0.46,9,6 +6.8,0.31,0.09,1.4,0.04,56,145,0.9922,3.19,0.46,10,5 +6.7,0.31,0.08,1.3,0.038,58,147,0.9922,3.18,0.46,10,5 +7.6,0.17,0.35,1.6,0.047,43,154,0.9934,3.36,0.69,11.1,6 +7.4,0.3,0.22,5.25,0.053,33,180,0.9926,3.13,0.45,11.6,6 +7.4,0.26,0.31,2.4,0.043,58,178,0.9941,3.42,0.68,10.6,6 +7,0.28,0.33,14.6,0.043,47,168,0.9994,3.34,0.67,8.8,6 +8.4,0.2,0.38,11.8,0.055,51,170,1.0004,3.34,0.82,8.9,6 +5.6,0.18,0.31,1.5,0.038,16,84,0.9924,3.34,0.58,10.1,6 +7.2,0.15,0.39,1.8,0.043,21,159,0.9948,3.52,0.47,10,5 +8,0.4,0.33,7.7,0.034,27,98,0.9935,3.18,0.41,12.2,7 +7,0.25,0.56,2,0.035,20,95,0.9918,3.23,0.53,11,6 +7.2,0.15,0.39,1.8,0.043,21,159,0.9948,3.52,0.47,10,5 +6.8,0.18,0.46,1.4,0.064,37,160,0.9924,3.37,0.45,11.1,5 +6.6,0.32,0.22,16.7,0.046,38,133,0.9979,3.22,0.67,10.4,6 +9,0.55,0.3,8.1,0.026,14,71,0.993,2.94,0.36,11.8,5 +6.9,0.19,0.39,8,0.028,22,84,0.994,3.11,0.66,10.8,6 +6.3,0.41,0.33,4.7,0.023,28,110,0.991,3.3,0.38,12.5,7 +9,0.55,0.3,8.1,0.026,14,71,0.993,2.94,0.36,11.8,5 +7,0.2,0.34,2.1,0.049,12,136,0.9922,3.25,0.46,11.6,7 +6.6,0.32,0.22,16.7,0.046,38,133,0.9979,3.22,0.67,10.4,6 +7.7,0.26,0.34,6.4,0.05,36,163,0.9937,3.19,0.7,11.5,6 +6.3,0.21,0.28,1.5,0.051,46,142,0.9928,3.23,0.42,10.1,6 +7.6,0.34,0.39,7.6,0.04,45,215,0.9965,3.11,0.53,9.2,6 +6.3,0.21,0.28,1.5,0.051,46,142,0.9928,3.23,0.42,10.1,6 +8,0.43,0.4,12.4,0.168,29,190,0.9991,3.07,0.64,9.2,5 +7.5,0.3,0.71,1.3,0.16,44,149,0.9948,3.08,0.42,8.9,5 +6.4,0.26,0.4,1.7,0.179,5,60,0.9925,3.09,0.54,10.1,5 +6.9,0.32,0.15,8.1,0.046,51,180,0.9958,3.13,0.45,8.9,5 +8.9,0.21,0.34,7.1,0.037,33,150,0.9962,3.1,0.45,9.7,6 +7.6,0.34,0.39,7.6,0.04,45,215,0.9965,3.11,0.53,9.2,6 +9.5,0.42,0.41,2.3,0.034,22,145,0.9951,3.06,0.52,11,6 +7.6,0.29,0.26,6.5,0.042,32,160,0.9944,3.14,0.47,10.7,5 +6.5,0.25,0.2,1.4,0.024,29,101,0.9916,3.24,0.54,10.8,6 +7.2,0.23,0.33,12.7,0.049,50,183,0.9987,3.41,0.4,9.8,5 +7.9,0.35,0.36,1.6,0.038,11,124,0.9928,3.25,0.48,11,5 +8.8,0.2,0.28,1.1,0.018,18,72,0.9926,2.97,0.35,10.4,5 +5.7,0.27,0.32,1.2,0.046,20,155,0.9934,3.8,0.41,10.2,6 +7.6,0.29,0.26,6.5,0.042,32,160,0.9944,3.14,0.47,10.7,5 +5.5,0.14,0.27,4.6,0.029,22,104,0.9949,3.34,0.44,9,5 +8.7,0.24,0.35,0.6,0.042,11,71,0.9926,3.08,0.38,10.6,5 +6.7,0.3,0.45,10.6,0.032,56,212,0.997,3.22,0.59,9.5,6 +5.5,0.14,0.27,4.6,0.029,22,104,0.9949,3.34,0.44,9,5 +5.6,0.13,0.27,4.8,0.028,22,104,0.9948,3.34,0.45,9.2,6 +7.4,0.18,0.34,2.7,0.03,30,107,0.992,2.97,0.53,11,6 +5.7,0.385,0.04,12.6,0.034,22,115,0.9964,3.28,0.63,9.9,6 +8.7,0.24,0.35,0.6,0.042,11,71,0.9926,3.08,0.38,10.6,5 +8.3,0.33,0.43,9.2,0.046,22,126,0.9982,3.38,0.47,9.3,5 +6.8,0.34,0.44,6.6,0.052,28,156,0.9955,3.14,0.41,9.6,5 +6.8,0.33,0.44,7,0.05,29,155,0.9955,3.14,0.42,9.5,5 +6.3,0.28,0.24,8.45,0.031,32,172,0.9958,3.39,0.57,9.7,7 +11.8,0.23,0.38,11.1,0.034,15,123,0.9997,2.93,0.55,9.7,3 +6.8,0.21,0.27,18.15,0.042,41,146,1.0001,3.3,0.36,8.7,5 +6.8,0.21,0.27,18.15,0.042,41,146,1.0001,3.3,0.36,8.7,5 +8.6,0.485,0.29,4.1,0.026,19,101,0.9918,3.01,0.38,12.4,5 +8.6,0.485,0.29,4.1,0.026,19,101,0.9918,3.01,0.38,12.4,5 +7.3,0.29,0.29,4.6,0.029,27,155,0.9931,3.07,0.26,10.6,6 +6.8,0.21,0.27,18.15,0.042,41,146,1.0001,3.3,0.36,8.7,5 +6.7,0.31,0.31,4.9,0.031,20,151,0.9926,3.36,0.82,12,7 +7.3,0.29,0.37,8.3,0.044,45,227,0.9966,3.12,0.47,9,5 +5.7,0.46,0.46,1.4,0.04,31,169,0.9932,3.13,0.47,8.8,5 +6.8,0.28,0.44,11.5,0.04,58,223,0.9969,3.22,0.56,9.5,5 +6.7,0.23,0.33,1.8,0.036,23,96,0.9925,3.32,0.4,10.8,6 +6.9,0.17,0.25,1.6,0.047,34,132,0.9914,3.16,0.48,11.4,5 +7.6,0.18,0.36,2.4,0.049,38,123,0.996,3.6,0.46,10.3,5 +6.6,0.22,0.28,4.9,0.042,51,180,0.9952,3.3,0.75,9.5,6 +7.8,0.27,0.28,1.8,0.05,21,127,0.9934,3.15,0.44,9.9,5 +7.7,0.28,0.29,4.3,0.051,25,142,0.9939,3.16,0.39,10.2,5 +7.6,0.29,0.29,4.4,0.051,26,146,0.9939,3.16,0.39,10.2,5 +5.7,0.32,0.18,1.4,0.029,26,104,0.9906,3.44,0.37,11,6 +7.1,0.33,0.25,1.6,0.03,25,126,0.9901,3.22,0.34,12.1,7 +7.3,0.34,0.3,1.3,0.057,25,173,0.9948,3.26,0.51,9.1,6 +6.5,0.19,0.26,5.2,0.04,31,140,0.995,3.26,0.68,9.5,6 +6.6,0.23,0.27,5.6,0.043,43,164,0.9953,3.27,0.76,9.5,5 +6.6,0.27,0.29,5.3,0.045,57,189,0.9953,3.31,0.79,9.8,5 +6.6,0.22,0.28,4.9,0.042,51,180,0.9952,3.3,0.75,9.5,6 +7.6,0.18,0.36,2.4,0.049,38,123,0.996,3.6,0.46,10.3,5 +6.8,0.36,0.32,1.6,0.039,10,124,0.9948,3.3,0.67,9.6,5 +7,0.22,0.39,2.1,0.055,39,198,0.9951,3.52,0.54,10.2,6 +5.9,0.17,0.3,1.4,0.042,25,119,0.9931,3.68,0.72,10.5,6 +7.4,0.45,0.32,7.1,0.044,17,117,0.9962,3.32,0.41,10.4,4 +6.8,0.36,0.32,1.6,0.039,10,124,0.9948,3.3,0.67,9.6,5 +7.5,0.42,0.14,10.7,0.046,18,95,0.9959,3.22,0.33,10.7,5 +7.5,0.33,0.32,11.1,0.036,25,119,0.9962,3.15,0.34,10.5,6 +9.4,0.3,0.32,10.7,0.029,14,111,0.9958,2.85,0.42,10.6,5 +7.9,0.17,0.32,1.6,0.053,47,150,0.9948,3.29,0.76,9.6,6 +7.9,0.17,0.32,1.6,0.053,47,150,0.9948,3.29,0.76,9.6,6 +8.2,0.17,0.32,1.5,0.05,17,101,0.994,3.14,0.58,9.5,5 +8.3,0.17,0.31,1.5,0.049,48,153,0.9942,3.12,0.58,9.4,6 +8.7,0.15,0.3,1.6,0.046,29,130,0.9942,3.22,0.38,9.8,6 +7.9,0.17,0.32,1.6,0.053,47,150,0.9948,3.29,0.76,9.6,6 +7.2,0.25,0.19,8,0.044,51,172,0.9964,3.16,0.44,9.2,5 +7.2,0.24,0.19,7.7,0.045,53,176,0.9958,3.17,0.38,9.5,5 +5.3,0.76,0.03,2.7,0.043,27,93,0.9932,3.34,0.38,9.2,5 +6.6,0.22,0.53,15.1,0.052,22,136,0.9986,2.94,0.35,9.4,5 +6.6,0.22,0.53,15.1,0.052,22,136,0.9986,2.94,0.35,9.4,5 +8.4,0.28,0.4,8.9,0.048,33,146,0.9988,3.4,0.46,9.3,5 +6.8,0.32,0.34,6,0.05,5,129,0.9953,3.19,0.4,9.1,5 +6.7,0.24,0.33,12.3,0.046,31,145,0.9983,3.36,0.4,9.5,5 +7.4,0.18,0.36,13.1,0.056,72,163,1,3.42,0.35,9.1,6 +6,0.16,0.3,6.7,0.043,43,153,0.9951,3.63,0.46,10.6,5 +6.7,0.24,0.33,12.3,0.046,31,145,0.9983,3.36,0.4,9.5,5 +6.8,0.28,0.35,2.3,0.042,16,85,0.9906,3.19,0.56,12.4,6 +6.2,0.34,0.3,11.1,0.047,28,237,0.9981,3.18,0.49,8.7,5 +6,0.27,0.15,1.5,0.056,35,128,0.9936,3.12,0.45,8.8,5 +6,0.16,0.3,6.7,0.043,43,153,0.9951,3.63,0.46,10.6,5 +6.8,0.32,0.34,6,0.05,5,129,0.9953,3.19,0.4,9.1,5 +8.5,0.24,0.47,15.2,0.057,40,234,1.0005,3.02,0.66,9,5 +8.1,0.24,0.33,10.2,0.048,46,141,0.9972,3.16,0.48,10.3,6 +7.4,0.18,0.36,13.1,0.056,72,163,1,3.42,0.35,9.1,6 +7.7,0.23,0.31,10.7,0.038,59,186,0.9969,3.12,0.55,9.5,6 +6.5,0.22,0.25,17.1,0.05,44,138,1.0001,3.3,0.37,8.8,5 +6.5,0.22,0.25,17.1,0.05,44,138,1.0001,3.3,0.37,8.8,5 +6.5,0.22,0.25,17.1,0.05,44,138,1.0001,3.3,0.37,8.8,5 +5.7,0.33,0.15,1.9,0.05,20,93,0.9934,3.38,0.62,9.9,5 +7.7,0.23,0.31,10.7,0.038,59,186,0.9969,3.12,0.55,9.5,6 +6.5,0.22,0.25,17.1,0.05,44,138,1.0001,3.3,0.37,8.8,5 +6.8,0.2,0.27,1.2,0.034,19,68,0.9902,3.14,0.37,11.7,4 +7.7,0.26,0.32,1.2,0.04,26,117,0.993,3.21,0.56,10.8,5 +6.4,0.2,0.32,3.1,0.041,18,126,0.9914,3.43,0.42,12,6 +8,0.16,0.36,1.5,0.033,14,122,0.9941,3.2,0.39,10.3,4 +6.8,0.25,0.27,10.7,0.076,47,154,0.9967,3.05,0.38,9,5 +7.7,0.39,0.28,4.9,0.035,36,109,0.9918,3.19,0.58,12.2,7 +6.9,0.26,0.33,12.6,0.051,59,173,0.998,3.39,0.38,9.9,5 +6.8,0.25,0.27,10.7,0.076,47,154,0.9967,3.05,0.38,9,5 +7.7,0.39,0.28,4.9,0.035,36,109,0.9918,3.19,0.58,12.2,7 +6,0.28,0.22,12.15,0.048,42,163,0.9957,3.2,0.46,10.1,5 +6.5,0.43,0.28,12,0.056,23,174,0.9986,3.31,0.55,9.3,5 +9.1,0.33,0.38,1.7,0.062,50.5,344,0.9958,3.1,0.7,9.5,5 +5.9,0.5,0.05,2.6,0.054,36,146,0.9948,3.43,0.5,9.2,6 +6.8,0.28,0.39,1.4,0.036,15,115,0.9918,3.27,0.72,11.7,5 +7,0.35,0.24,1.9,0.04,21,144,0.9923,3.35,0.38,11,5 +7.1,0.22,0.32,16.9,0.056,49,158,0.9998,3.37,0.38,9.6,6 +7.1,0.22,0.32,16.9,0.056,49,158,0.9998,3.37,0.38,9.6,6 +8.3,0.24,0.27,2.1,0.03,22,162,0.9914,2.99,0.68,11.9,6 +6.8,0.26,0.32,7,0.041,38,118,0.9939,3.25,0.52,10.8,6 +7.2,0.16,0.26,7.1,0.054,41,224,0.9966,3.38,0.55,10.1,5 +7.9,0.18,0.36,5.9,0.058,31,132,0.995,3.25,0.52,10.9,6 +7.2,0.16,0.26,7.1,0.054,41,224,0.9966,3.38,0.55,10.1,5 +5.5,0.24,0.32,8.7,0.06,19,102,0.994,3.27,0.31,10.4,5 +7.1,0.33,0.64,13.2,0.056,12,105,0.9972,3.05,0.39,9.2,5 +7.7,0.28,0.35,15.3,0.056,31,117,0.9998,3.27,0.5,9.6,5 +7.7,0.28,0.35,15.3,0.056,31,117,0.9998,3.27,0.5,9.6,5 +7.5,0.26,0.52,13.2,0.047,64,179,0.9982,3.1,0.46,9,5 +6.5,0.14,0.32,2.7,0.037,18,89,0.9924,3.4,0.74,11.5,7 +8.2,0.21,0.32,10.65,0.053,53,145,0.9972,3.17,0.48,10.2,6 +7.2,0.2,0.31,10,0.054,49,165,0.997,3.4,0.42,9.9,6 +7.2,0.115,0.3,6.8,0.056,26,105,0.9954,3.44,0.4,9.6,6 +6.4,0.29,0.2,15.6,0.04,20,142,0.9962,3.1,0.54,10.6,5 +7.1,0.33,0.64,13.2,0.056,12,105,0.9972,3.05,0.39,9.2,5 +6.8,0.24,0.34,5.1,0.038,31,99,0.9921,3.24,0.46,11.8,6 +7,0.24,0.34,3,0.035,36,102,0.9905,3.18,0.43,12.2,6 +7.7,0.28,0.35,15.3,0.056,31,117,0.9998,3.27,0.5,9.6,5 +7,0.22,0.33,2.1,0.052,15,76,0.993,3.2,0.41,10.6,6 +7.5,0.18,0.39,1.9,0.054,23,91,0.9941,3.27,0.45,10.3,6 +9.8,0.93,0.45,8.6,0.052,34,187,0.9994,3.12,0.59,10.2,4 +7.8,0.29,0.33,8.75,0.035,33,181,0.9962,3.11,0.46,10.7,5 +7.9,0.28,0.32,3.6,0.038,9,76,0.992,3.05,0.31,11.7,4 +8.5,0.25,0.27,4.7,0.031,31,92,0.9922,3.01,0.33,12,6 +7.4,0.18,0.27,1.3,0.048,26,105,0.994,3.52,0.66,10.6,6 +6.3,0.24,0.37,1.8,0.031,6,61,0.9897,3.3,0.34,12.2,4 +6,0.33,0.38,9.7,0.04,29,124,0.9954,3.47,0.48,11,6 +6.8,0.37,0.28,4,0.03,29,79,0.99,3.23,0.46,12.4,7 +9.9,0.49,0.23,2.4,0.087,19,115,0.9948,2.77,0.44,9.4,6 +8.5,0.25,0.27,4.7,0.031,31,92,0.9922,3.01,0.33,12,6 +8.4,0.22,0.28,18.8,0.028,55,130,0.998,2.96,0.35,11.6,5 +7,0.35,0.31,1.8,0.069,15,162,0.9944,3.18,0.47,9.4,5 +7,0.35,0.31,1.8,0.069,15,162,0.9944,3.18,0.47,9.4,5 +7.4,0.19,0.3,12.8,0.053,48.5,229,0.9986,3.14,0.49,9.1,7 +7.4,0.19,0.3,12.8,0.053,48.5,229,0.9986,3.14,0.49,9.1,7 +7.4,0.19,0.3,12.8,0.053,48.5,229,0.9986,3.14,0.49,9.1,7 +7.4,0.19,0.3,12.8,0.053,48.5,229,0.9986,3.14,0.49,9.1,7 +7.4,0.19,0.3,12.8,0.053,48.5,229,0.9986,3.14,0.49,9.1,7 +6.9,0.32,0.13,7.8,0.042,11,117,0.996,3.23,0.37,9.2,5 +7.6,0.32,0.58,16.75,0.05,43,163,0.9999,3.15,0.54,9.2,5 +7.4,0.19,0.3,12.8,0.053,48.5,229,0.9986,3.14,0.49,9.1,7 +7.4,0.19,0.3,12.8,0.053,48.5,212,0.9986,3.14,0.49,9.1,7 +6.9,0.32,0.13,7.8,0.042,11,117,0.996,3.23,0.37,9.2,5 +6,0.34,0.24,5.4,0.06,23,126,0.9951,3.25,0.44,9,7 +7.6,0.32,0.58,16.75,0.05,43,163,0.9999,3.15,0.54,9.2,5 +7.7,0.24,0.31,1.3,0.047,33,106,0.993,3.22,0.55,10.8,6 +8,0.36,0.43,10.1,0.053,29,146,0.9982,3.4,0.46,9.5,6 +7.4,0.29,0.25,3.8,0.044,30,114,0.992,3.11,0.4,11,6 +6.6,0.32,0.27,10.9,0.041,37,146,0.9963,3.24,0.47,10,5 +6.3,0.3,0.24,6.6,0.04,38,141,0.995,3.22,0.47,9.5,5 +6.4,0.33,0.24,9.8,0.041,29,109,0.9956,3.29,0.47,10.1,6 +7.5,0.18,0.31,11.7,0.051,24,94,0.997,3.19,0.44,9.5,7 +6.5,0.39,0.81,1.2,0.217,14,74,0.9936,3.08,0.53,9.5,5 +6.8,0.25,0.18,1.4,0.056,13,137,0.9935,3.11,0.42,9.5,5 +6.4,0.18,0.32,9.6,0.052,24,90,0.9963,3.35,0.49,9.4,6 +7.1,0.18,0.32,12.2,0.048,36,125,0.9967,2.92,0.54,9.4,6 +7.6,0.27,0.42,2.6,0.044,29,110,0.9912,3.31,0.51,12.7,6 +9.2,0.23,0.35,10.7,0.037,34,145,0.9981,3.09,0.32,9.7,5 +7.9,0.28,0.41,4.9,0.058,31,153,0.9966,3.27,0.51,9.7,6 +7.1,0.18,0.32,12.2,0.048,36,125,0.9967,2.92,0.54,9.4,6 +6.4,0.18,0.32,9.6,0.052,24,90,0.9963,3.35,0.49,9.4,6 +6.8,0.25,0.18,1.4,0.056,13,137,0.9935,3.11,0.42,9.5,5 +7,0.22,0.26,1.1,0.037,20,71,0.9902,3.1,0.38,11.7,6 +7.3,0.18,0.29,1,0.036,26,101,0.99,3.09,0.37,11.7,6 +7.1,0.26,0.19,8.2,0.051,53,187,0.996,3.16,0.52,9.7,5 +6.6,0.25,0.42,11.3,0.049,77,231,0.9966,3.24,0.52,9.5,6 +6.4,0.24,0.23,7.3,0.069,31,157,0.9962,3.25,0.53,9.1,5 +6,0.28,0.27,2.3,0.051,23,147,0.994,3.23,0.67,10.3,6 +7.1,0.26,0.19,8.2,0.051,53,187,0.996,3.16,0.52,9.7,5 +7.8,0.24,0.38,2.1,0.058,14,167,0.994,3.21,0.55,9.9,5 +7.6,0.27,0.33,2,0.059,19,175,0.9944,3.22,0.56,9.9,5 +7.7,0.39,0.34,10,0.056,35,178,0.9974,3.26,0.6,10.2,5 +8.9,0.24,0.33,15.75,0.035,16,132,0.996,3,0.37,12.1,6 +6.6,0.23,0.24,3.9,0.045,36,138,0.9922,3.15,0.64,11.3,7 +7.1,0.26,0.3,2,0.031,13,128,0.9917,3.19,0.49,11.4,5 +7,0.32,0.35,1.5,0.039,24,125,0.9918,3.17,0.64,12.2,6 +7.4,0.24,0.26,1.6,0.058,53,150,0.9936,3.18,0.5,9.9,7 +6.9,0.21,0.33,1.4,0.056,35,136,0.9938,3.63,0.78,10.3,6 +7,0.32,0.35,1.5,0.039,24,125,0.9918,3.17,0.64,12.2,6 +7.4,0.17,0.29,1.4,0.047,23,107,0.9939,3.52,0.65,10.4,6 +7.1,0.26,0.3,2,0.031,13,128,0.9917,3.19,0.49,11.4,5 +8.5,0.28,0.34,13.8,0.041,32,161,0.9981,3.13,0.4,9.9,6 +7.8,0.3,0.37,1.3,0.051,16,96,0.9941,3.32,0.62,10,5 +8.1,0.25,0.38,3.8,0.051,18,129,0.9928,3.21,0.38,11.5,6 +7.7,0.28,0.29,6.9,0.041,29,163,0.9952,3.44,0.6,10.5,6 +6.5,0.24,0.36,2.2,0.027,36,134,0.9898,3.28,0.36,12.5,7 +7,0.22,0.32,1.6,0.045,40,120,0.9914,2.98,0.44,10.5,6 +8.5,0.28,0.34,13.8,0.041,32,161,0.9981,3.13,0.4,9.9,6 +8,0.45,0.28,10.8,0.051,25,157,0.9957,3.06,0.47,11.4,7 +6.9,0.23,0.33,12.8,0.056,44,169,0.998,3.42,0.42,9.8,6 +8,0.45,0.28,10.8,0.051,25,157,0.9957,3.06,0.47,11.4,7 +7.6,0.23,0.26,15.3,0.067,32,166,0.9986,3.03,0.44,9.2,4 +7.7,0.28,0.58,12.1,0.046,60,177,0.9983,3.08,0.46,8.9,5 +7.7,0.27,0.61,12,0.046,64,179,0.9982,3.07,0.46,8.9,5 +7.1,0.2,0.36,11.6,0.042,45,124,0.997,2.92,0.59,9.5,7 +6.9,0.25,0.35,9.2,0.034,42,150,0.9947,3.21,0.36,11.5,6 +7.1,0.2,0.36,11.6,0.042,45,124,0.997,2.92,0.59,9.5,7 +6.9,0.25,0.35,9.2,0.034,42,150,0.9947,3.21,0.36,11.5,6 +8.4,0.2,0.31,2.8,0.054,16,89,0.99416,2.96,0.45,9.5,6 +6.5,0.39,0.35,1.6,0.049,10,164,0.99516,3.35,0.51,9.7,5 +7.2,0.23,0.38,6.1,0.067,20,90,0.99496,3.17,0.79,9.7,5 +6.9,0.44,0.42,8.5,0.048,10,147,0.9974,3.32,0.46,9.5,6 +7.1,0.28,0.19,7.8,0.04,48,184,0.99579,3.16,0.5,9.4,5 +6.4,0.34,0.2,14.9,0.06,37,162,0.9983,3.13,0.45,9,4 +6.1,0.15,0.29,6.2,0.046,39,151,0.99471,3.6,0.44,10.6,6 +6.9,0.44,0.42,8.5,0.048,10,147,0.9974,3.32,0.46,9.5,6 +7.2,0.29,0.18,8.2,0.042,41,180,0.99644,3.16,0.49,9.1,5 +7.1,0.28,0.19,7.8,0.04,48,184,0.99579,3.16,0.5,9.4,5 +6.1,0.23,0.45,10.6,0.094,49,169,0.99699,3.05,0.54,8.8,5 +6.7,0.23,0.42,11.2,0.047,52,171,0.99758,3.54,0.74,10.4,5 +7,0.36,0.14,11.6,0.043,35,228,0.9977,3.13,0.51,8.9,5 +7.5,0.31,0.24,7.1,0.031,28,141,0.99397,3.16,0.38,10.6,7 +6.4,0.34,0.2,14.9,0.06,37,162,0.9983,3.13,0.45,9,4 +6.1,0.15,0.29,6.2,0.046,39,151,0.99471,3.6,0.44,10.6,6 +7.4,0.2,0.29,1.7,0.047,16,100,0.99243,3.28,0.45,10.6,6 +6.3,0.27,0.18,7.7,0.048,45,186,0.9962,3.23,0.47,9,5 +9.2,0.34,0.54,17.3,0.06,46,235,1.00182,3.08,0.61,8.8,6 +7.4,0.18,0.29,1.4,0.042,34,101,0.99384,3.54,0.6,10.5,7 +7.2,0.29,0.2,7.7,0.046,51,174,0.99582,3.16,0.52,9.5,5 +6.3,0.27,0.18,7.7,0.048,45,186,0.9962,3.23,0.47,9,5 +6.2,0.26,0.19,3.4,0.049,47,172,0.9924,3.14,0.43,10.4,6 +7.3,0.21,0.21,1.6,0.046,35,133,0.99466,3.38,0.46,10,6 +7.1,0.14,0.35,1.4,0.039,24,128,0.99212,2.97,0.68,10.4,5 +7.2,0.39,0.54,1.4,0.157,34,132,0.99449,3.11,0.53,9,6 +7.6,0.48,0.28,10.4,0.049,57,205,0.99748,3.24,0.45,9.3,5 +7.2,0.39,0.54,1.4,0.157,34,132,0.99449,3.11,0.53,9,6 +7.6,0.48,0.28,10.4,0.049,57,205,0.99748,3.24,0.45,9.3,5 +6.5,0.36,0.31,4.1,0.061,20,134,0.99475,3.18,0.45,9,6 +8.5,0.25,0.31,2.8,0.032,11,61,0.99189,3.06,0.44,11.5,6 +6.9,0.3,0.21,15.7,0.056,49,159,0.99827,3.11,0.48,9,5 +6.6,0.19,0.43,10.9,0.045,53,154,0.99752,3.52,0.77,10.4,6 +6.9,0.3,0.21,15.7,0.056,49,159,0.99827,3.11,0.48,9,5 +9.4,0.42,0.32,6.5,0.027,20,167,0.99479,3.08,0.43,10.6,5 +6.6,0.19,0.43,10.9,0.045,53,154,0.99752,3.52,0.77,10.4,6 +6.3,0.2,0.3,5.9,0.034,35,152,0.99642,3.47,0.4,8.5,6 +8.5,0.19,0.56,17.3,0.055,47,169,1.00047,3.07,0.67,9.3,6 +7.3,0.19,0.25,1.4,0.051,41,107,0.99382,3.53,0.66,10.5,7 +6.7,0.25,0.26,13.5,0.06,50,156,0.99784,3.39,0.46,9.9,6 +6.2,0.25,0.28,8.5,0.035,28,108,0.99486,3.4,0.42,10.4,6 +6.1,0.46,0.32,6.2,0.053,10,94,0.99537,3.35,0.47,10.1,5 +7.3,0.19,0.25,1.4,0.051,41,107,0.99382,3.53,0.66,10.5,7 +7.5,0.29,0.26,14.95,0.067,47,178,0.99838,3.04,0.49,9.2,4 +6.7,0.31,0.18,7.7,0.043,57,200,0.99566,3.17,0.44,9.4,6 +7.4,0.14,0.3,1.3,0.033,25,91,0.99268,3.53,0.39,10.6,6 +6.7,0.31,0.18,7.7,0.043,57,200,0.99566,3.17,0.44,9.4,6 +7.1,0.4,0.52,1.3,0.148,45,149,0.99468,3.08,0.56,8.7,5 +6.4,0.16,0.25,1.3,0.047,20,77,0.9933,3.61,0.54,10.2,6 +6.3,0.16,0.22,1.3,0.046,18,66,0.99307,3.61,0.55,10.3,6 +7.4,0.33,0.26,15.6,0.049,67,210,0.99907,3.06,0.68,9.5,5 +7.4,0.33,0.26,15.6,0.049,67,210,0.99907,3.06,0.68,9.5,5 +7.4,0.33,0.26,15.6,0.049,67,210,0.99907,3.06,0.68,9.5,5 +7.4,0.33,0.26,15.6,0.049,67,210,0.99907,3.06,0.68,9.5,5 +6.6,0.41,0.24,4.9,0.158,47,144,0.99471,3.17,0.49,9.4,5 +6.7,0.43,0.23,5,0.157,49,145,0.99471,3.17,0.49,9.4,5 +7.4,0.33,0.26,15.6,0.049,67,210,0.99907,3.06,0.68,9.5,5 +7.3,0.4,0.28,6.5,0.037,26,97,0.99148,3.16,0.58,12.6,7 +7.4,0.18,0.24,1.4,0.047,21,106,0.99383,3.52,0.64,10.5,7 +8.6,0.17,0.28,2.7,0.047,38,150,0.99365,3.1,0.56,10.8,6 +6.5,0.32,0.23,1.2,0.054,39,208,0.99272,3.18,0.46,9.9,6 +7.3,0.4,0.28,6.5,0.037,26,97,0.99148,3.16,0.58,12.6,7 +7,0.32,0.31,6.4,0.031,38,115,0.99235,3.38,0.58,12.2,7 +7.5,0.42,0.19,6.9,0.041,62,150,0.99508,3.23,0.37,10,6 +6.9,0.28,0.31,7.2,0.04,47,168,0.9946,3.29,0.57,10.6,7 +6.5,0.29,0.42,10.6,0.042,66,202,0.99674,3.24,0.53,9.5,6 +6.3,0.41,0.18,3.5,0.027,23,109,0.99018,3.34,0.54,12.8,8 +7,0.32,0.31,6.4,0.031,38,115,0.99235,3.38,0.58,12.2,7 +7.3,0.3,0.33,2.3,0.043,28,125,0.99084,3.34,0.44,12.6,7 +6.6,0.22,0.28,12.05,0.058,25,125,0.99856,3.45,0.45,9.4,5 +6,0.26,0.18,7,0.055,50,194,0.99591,3.21,0.43,9,5 +6.9,0.44,0.18,11.8,0.051,26,126,0.9975,3.23,0.48,9.1,5 +7.5,0.42,0.2,1.4,0.06,15,168,0.9944,3.06,0.4,9.4,6 +7,0.36,0.3,5,0.04,40,143,0.99173,3.33,0.42,12.2,7 +5.6,0.295,0.2,2.2,0.049,18,134,0.99378,3.21,0.68,10,5 +6.8,0.21,0.55,14.6,0.053,34,159,0.99805,2.93,0.44,9.2,5 +9.4,0.28,0.3,1.6,0.045,36,139,0.99534,3.11,0.49,9.3,5 +8.1,0.28,0.34,1.3,0.035,11,126,0.99232,3.14,0.5,9.8,6 +6.8,0.21,0.55,14.6,0.053,34,159,0.99805,2.93,0.44,9.2,5 +7,0.22,0.26,2.8,0.036,44,132,0.99078,3.34,0.41,12,7 +9.4,0.28,0.3,1.6,0.045,36,139,0.99534,3.11,0.49,9.3,5 +6.8,0.32,0.3,3.3,0.029,15,80,0.99061,3.33,0.63,12.6,7 +7,0.19,0.33,6.3,0.032,42,127,0.99182,3.31,0.38,12.2,6 +7.7,0.42,0.38,8.1,0.061,49,144,0.9966,3.4,0.58,11,6 +7.4,0.2,0.31,1.6,0.038,34,116,0.9912,3.25,0.39,12,7 +7.5,0.24,0.62,10.6,0.045,51,153,0.99779,3.16,0.44,8.8,5 +7.5,0.26,0.59,11.8,0.046,58,164,0.99814,3.17,0.46,8.9,4 +6.6,0.4,0.32,1.7,0.035,39,84,0.99096,3.59,0.48,12.7,7 +8,0.2,0.3,8.1,0.037,42,130,0.99379,3.1,0.67,11.8,6 +4.6,0.445,0,1.4,0.053,11,178,0.99426,3.79,0.55,10.2,5 +6.1,0.41,0.04,1.3,0.036,23,121,0.99228,3.24,0.61,9.9,6 +7.6,0.2,0.34,1.8,0.041,42,148,0.99335,3.35,0.66,11.1,6 +6.9,0.3,0.21,7.2,0.045,54,190,0.99595,3.22,0.48,9.4,5 +7,0.35,0.17,1.1,0.049,7,119,0.99297,3.13,0.36,9.7,6 +6.9,0.35,0.55,11.95,0.038,22,111,0.99687,3.11,0.29,9.7,5 +7,0.35,0.17,1.1,0.049,7,119,0.99297,3.13,0.36,9.7,6 +6.9,0.35,0.55,11.95,0.038,22,111,0.99687,3.11,0.29,9.7,5 +7.6,0.3,0.4,2.2,0.054,29,175,0.99445,3.19,0.53,9.8,5 +7.5,0.38,0.29,12.7,0.05,25,209,0.9986,3.25,0.59,9.3,6 +7.5,0.3,0.32,1.4,0.032,31,161,0.99154,2.95,0.42,10.5,5 +6.3,0.4,0.32,10.6,0.049,38,209,0.9981,3.47,0.59,9.3,6 +6.8,0.37,0.28,1.9,0.024,64,106,0.98993,3.45,0.6,12.6,8 +7.5,0.23,0.35,17.8,0.058,128,212,1.00241,3.44,0.43,8.9,5 +8.3,0.27,0.34,10.2,0.048,50,118,0.99716,3.18,0.51,10.3,5 +6.8,0.26,0.22,4.8,0.041,110,198,0.99437,3.29,0.67,10.6,5 +6.5,0.28,0.35,9.8,0.067,61,180,0.9972,3.15,0.57,9,4 +7.2,0.34,0.3,8.4,0.051,40,167,0.99756,3.48,0.62,9.7,5 +7,0.23,0.26,7.2,0.041,21,90,0.99509,3.22,0.55,9.5,6 +7.7,0.29,0.29,4.8,0.06,27,156,0.99572,3.49,0.59,10.3,6 +7.2,0.34,0.3,8.4,0.051,40,167,0.99756,3.48,0.62,9.7,5 +7.7,0.4,0.27,4.5,0.034,27,95,0.99175,3.21,0.59,12.3,8 +6.7,0.17,0.27,1.4,0.032,39,149,0.99254,3.4,0.52,10.5,5 +7,0.23,0.26,7.2,0.041,21,90,0.99509,3.22,0.55,9.5,6 +8.1,0.24,0.26,11,0.043,41,211,0.99676,3.11,0.49,10,6 +7.7,0.28,0.63,11.1,0.039,58,179,0.9979,3.08,0.44,8.8,4 +7.5,0.23,0.29,2.6,0.031,24,98,0.99194,3,0.54,10.9,6 +8.3,0.26,0.31,2,0.029,14,141,0.99077,2.95,0.77,12.2,6 +7.9,0.46,0.4,10.1,0.168,19,184,0.99782,3.06,0.62,9.5,5 +7.9,0.31,0.22,13.3,0.048,46,212,0.99942,3.47,0.59,10,5 +7.9,0.25,0.34,11.4,0.04,53,202,0.99708,3.11,0.57,9.6,6 +6.1,0.28,0.16,1.3,0.06,36,126,0.99353,3.13,0.46,8.7,6 +7,0.18,0.26,1.4,0.044,46,89,0.99256,3.39,0.48,10.7,7 +6.5,0.21,0.28,1.4,0.046,26,66,0.99199,3.43,0.48,11.1,6 +7.6,0.48,0.33,7,0.024,14,130,0.9918,3.25,0.45,12.5,7 +7.1,0.34,0.32,2,0.051,29,130,0.99354,3.3,0.5,10.4,6 +8.9,0.21,0.37,1.2,0.028,20,93,0.99244,3.2,0.37,11.5,5 +7.4,0.32,0.27,12.9,0.04,60,221,0.99831,3.05,0.66,9.4,5 +6,0.495,0.27,5,0.157,17,129,0.99396,3.03,0.36,9.3,5 +8.1,0.25,0.34,10.1,0.05,30,121,0.99724,3.17,0.49,10.1,6 +8.2,0.25,0.46,3.75,0.05,14,102,0.99524,3.28,0.58,9.7,5 +6.5,0.18,0.29,1.7,0.035,39,144,0.9927,3.49,0.5,10.5,6 +6.7,0.24,0.26,12.6,0.053,44,182,0.99802,3.42,0.42,9.7,5 +6.6,0.32,0.24,1.3,0.06,42.5,204,0.99512,3.59,0.51,9.2,5 +7.6,0.32,0.35,1.6,0.092,24,138,0.99438,3.19,0.44,9.8,5 +7.4,0.33,0.44,7.6,0.05,40,227,0.99679,3.12,0.52,9,5 +7.2,0.3,0.3,8.1,0.05,40,188,0.99652,3.15,0.49,9.1,6 +7.4,0.34,0.3,14.9,0.037,70,169,0.99698,3.25,0.37,10.4,6 +6.1,0.16,0.29,6,0.03,29,144,0.99474,3.68,0.46,10.7,6 +6.3,0.1,0.24,6,0.039,25,107,0.99511,3.59,0.49,10.5,7 +6.2,0.45,0.73,7.2,0.099,47,202,0.99582,3.21,0.43,9.2,5 +6,0.33,0.18,3,0.036,5,85,0.99125,3.28,0.4,11.5,4 +7.6,0.48,0.37,1.2,0.034,5,57,0.99256,3.05,0.54,10.4,3 +7.2,0.2,0.3,2,0.039,43,188,0.9911,3.3,0.41,12,6 +7,0.32,0.29,4.9,0.036,41,150,0.99168,3.38,0.43,12.2,6 +7.2,0.2,0.3,2,0.039,43,188,0.9911,3.3,0.41,12,6 +7,0.22,0.29,8.9,0.05,24,90,0.99556,3.29,0.46,9.8,6 +9.4,0.23,0.56,16.45,0.063,52.5,282,1.00098,3.1,0.51,9.3,5 +6.4,0.27,0.19,2,0.084,21,191,0.99516,3.49,0.63,9.6,4 +6.4,0.27,0.19,1.9,0.085,21,196,0.99516,3.49,0.64,9.5,4 +7,0.23,0.42,5.1,0.042,37,144,0.99518,3.5,0.59,10.2,6 +6.9,0.15,0.28,4.4,0.029,14,107,0.99347,3.24,0.46,10.4,8 +6.7,0.26,0.29,5.8,0.025,26,74,0.9929,3.28,0.53,11,6 +6.9,0.15,0.28,4.4,0.029,14,107,0.99347,3.24,0.46,10.4,8 +7.6,0.2,0.68,12.9,0.042,56,160,0.99841,3.05,0.41,8.7,5 +6.9,0.3,0.29,1.3,0.053,24,189,0.99362,3.29,0.54,9.9,4 +6.9,0.3,0.3,1.3,0.053,24,186,0.99361,3.29,0.54,9.9,4 +7.6,0.21,0.35,1.2,0.041,7,106,0.9914,3.06,0.45,11.3,4 +6.8,0.46,0.26,2.7,0.042,28,83,0.99114,3.38,0.51,12,8 +7,0.28,0.26,1.7,0.042,34,130,0.9925,3.43,0.5,10.7,8 +6.5,0.24,0.29,8.2,0.043,32,156,0.99453,3.13,0.7,10.1,6 +6.4,0.17,0.34,1.5,0.091,42,135,0.9938,3.25,0.49,9.6,7 +6.4,0.17,0.34,1.5,0.093,43,136,0.9938,3.25,0.49,9.6,6 +6.3,0.695,0.55,12.9,0.056,58,252,0.99806,3.29,0.49,8.7,5 +7,0.27,0.29,3.9,0.059,28,199,0.9961,3.54,0.59,10.3,5 +8.4,0.3,0.25,17.75,0.047,25,218,1.00016,2.98,0.66,9.1,5 +6.5,0.19,0.27,4.9,0.037,13,101,0.9916,3.17,0.41,11.8,6 +8,0.36,0.39,1.6,0.024,26,93,0.99116,3.15,0.49,11.9,6 +6.1,0.16,0.24,1.4,0.046,17,77,0.99319,3.66,0.57,10.3,6 +9.2,0.19,0.42,2,0.047,16,104,0.99517,3.09,0.66,10,4 +9.2,0.16,0.49,2,0.044,18,107,0.99514,3.1,0.53,10.2,4 +8,0.26,0.28,8.2,0.038,72,202,0.99566,3.12,0.56,10,6 +8.8,0.33,0.36,2.1,0.034,19,125,0.99166,2.96,0.98,12.7,6 +9.8,0.16,0.46,1.8,0.046,23,130,0.99587,3.04,0.67,9.6,5 +6.6,0.23,0.18,8.5,0.044,59,188,0.99558,3.16,0.49,9.5,5 +7.9,0.44,0.26,4.45,0.033,23,100,0.99117,3.17,0.52,12.7,6 +7.6,0.31,0.27,5.8,0.036,23,109,0.99399,3.34,0.54,11,6 +7.5,0.705,0.1,13,0.044,44,214,0.99741,3.1,0.5,9.1,5 +7.1,0.21,0.28,2.7,0.034,23,111,0.99405,3.35,0.64,10.2,4 +7,0.16,0.26,7.3,0.047,30,220,0.99622,3.38,0.58,10.1,6 +8,0.27,0.25,19.1,0.045,50,208,1.00051,3.05,0.5,9.2,6 +6.3,0.38,0.17,8.8,0.08,50,212,0.99803,3.47,0.66,9.4,4 +7.1,0.21,0.28,2.7,0.034,23,111,0.99405,3.35,0.64,10.2,4 +6.2,0.38,0.18,7.4,0.095,28,195,0.99773,3.53,0.71,9.2,4 +8.2,0.24,0.3,2.3,0.05,23,106,0.99397,2.98,0.5,10,5 +7,0.16,0.26,6.85,0.047,30,220,0.99622,3.38,0.58,10.1,6 +7.3,0.815,0.09,11.4,0.044,45,204,0.99713,3.15,0.46,9,5 +6.3,0.41,0.16,0.9,0.032,25,98,0.99274,3.16,0.42,9.5,5 +6.1,0.36,0.41,19.35,0.07,67,207,1.00118,3.39,0.53,9.1,5 +8.1,0.4,0.32,7.9,0.031,23,118,0.99176,3.05,0.46,13.3,7 +6.8,0.26,0.43,11.75,0.045,53,198,0.9969,3.26,0.55,9.5,5 +6.2,0.44,0.18,7.7,0.096,28,210,0.99771,3.56,0.72,9.2,5 +7.2,0.24,0.29,3,0.036,17,117,0.99411,3.36,0.68,10.1,6 +6.2,0.44,0.18,7.7,0.096,28,210,0.99771,3.56,0.72,9.2,5 +7.2,0.24,0.29,3,0.036,17,117,0.99411,3.36,0.68,10.1,6 +7.3,0.22,0.26,1.5,0.04,32,172,0.99194,3.27,0.48,11.2,6 +8.1,0.34,0.28,7.5,0.04,70,230,0.99558,3.14,0.55,9.8,6 +7.3,0.22,0.26,1.5,0.04,32,172,0.99194,3.27,0.48,11.2,6 +8.1,0.34,0.28,7.5,0.04,70,230,0.99558,3.14,0.55,9.8,6 +6.4,0.28,0.17,8.3,0.042,61,195,0.99577,3.22,0.46,9.4,5 +6.3,0.29,0.14,7.05,0.045,50,177,0.99564,3.23,0.42,9,5 +6.4,0.27,0.17,8.4,0.044,60,198,0.99578,3.21,0.47,9.4,5 +7.4,0.35,0.2,13.9,0.054,63,229,0.99888,3.11,0.5,8.9,6 +8.3,0.28,0.27,17.5,0.045,48,253,1.00014,3.02,0.56,9.1,6 +6.4,0.35,0.35,5.6,0.034,9,148,0.99441,3.17,0.5,9.8,4 +6.9,0.43,0.28,9.4,0.056,29,183,0.99594,3.17,0.43,9.4,5 +8,0.26,0.28,4.8,0.05,34,150,0.99437,3.13,0.5,10,6 +6.9,0.43,0.28,9.4,0.056,29,183,0.99594,3.17,0.43,9.4,5 +7.3,0.27,0.37,9.7,0.042,36,130,0.9979,3.48,0.75,9.9,6 +6.8,0.46,0.26,6.3,0.147,49,159,0.99434,3.14,0.47,10,5 +7.2,0.2,0.28,1.6,0.028,13,168,0.99203,3.17,1.06,11.5,6 +7.6,0.285,0.32,14.6,0.063,32,201,0.998,3,0.45,9.2,5 +6.6,0.32,0.33,2.5,0.052,40,219.5,0.99316,3.15,0.6,10,5 +7.6,0.285,0.32,14.6,0.063,32,201,0.998,3,0.45,9.2,5 +6.6,0.34,0.34,2.6,0.051,40.5,210,0.99314,3.15,0.61,10,5 +6.6,0.32,0.33,2.5,0.052,40,210,0.99316,3.15,0.6,10,5 +6.5,0.27,0.26,8.2,0.042,21,133,0.99612,3.43,0.64,9.8,6 +6.6,0.26,0.27,1.5,0.04,19,114,0.99295,3.36,0.62,10.5,6 +6.7,0.27,0.26,2.3,0.043,61,181,0.99394,3.45,0.63,10.6,6 +6.6,0.56,0.15,10,0.037,38,157,0.99642,3.28,0.52,9.4,5 +6.6,0.56,0.15,10,0.037,38,157,0.99642,3.28,0.52,9.4,5 +7.3,0.19,0.27,1.6,0.027,35,136,0.99248,3.38,0.54,11,7 +6.3,0.2,0.26,1.6,0.027,36,141,0.99268,3.53,0.56,10.8,6 +7.1,0.29,0.3,16,0.036,58,201,0.99954,3.3,0.67,9,5 +7.8,0.32,0.33,10.4,0.031,47,194,0.99692,3.07,0.58,9.6,6 +8.1,0.33,0.36,7.4,0.037,36,156,0.99592,3.19,0.54,10.6,6 +8.1,0.33,0.36,7.4,0.037,36,156,0.99592,3.19,0.54,10.6,6 +7.8,0.32,0.33,10.4,0.031,47,194,0.99692,3.07,0.58,9.6,6 +6.6,0.33,0.24,16.05,0.045,31,147,0.99822,3.08,0.52,9.2,5 +6.6,0.33,0.24,16.05,0.045,31,147,0.99822,3.08,0.52,9.2,5 +8.2,0.26,0.33,2.6,0.053,11,71,0.99402,2.89,0.49,9.5,5 +8.3,0.25,0.33,2.5,0.053,12,72,0.99404,2.89,0.48,9.5,5 +7,0.26,0.26,10.8,0.039,37,184,0.99787,3.47,0.58,10.3,7 +6,0.26,0.15,1.2,0.053,35,124,0.99347,3.08,0.46,8.8,5 +7.5,0.28,0.78,12.1,0.041,53,161,0.99838,2.98,0.44,8.7,5 +7.5,0.27,0.79,11.95,0.04,51,159,0.99839,2.98,0.44,8.7,5 +7,0.28,0.32,1.7,0.038,27,128,0.99375,3.2,0.62,10.2,6 +5.2,0.16,0.34,0.8,0.029,26,77,0.99155,3.25,0.51,10.1,6 +6.8,0.34,0.1,1.4,0.049,29,118,0.9936,3.21,0.41,9.5,5 +7.6,0.25,0.34,1.3,0.056,34,176,0.99434,3.1,0.51,9.5,5 +5.6,0.35,0.4,6.3,0.022,23,174,0.9922,3.54,0.5,11.6,7 +8.8,0.24,0.23,10.3,0.032,12,97,0.99571,3.13,0.4,10.7,6 +6,0.29,0.21,15.55,0.043,20,142,0.99658,3.11,0.54,10.1,6 +6.1,0.27,0.31,1.5,0.035,17,83,0.99076,3.32,0.44,11.1,7 +7.4,0.56,0.09,1.5,0.071,19,117,0.99496,3.22,0.53,9.8,5 +6.8,0.29,0.49,1.4,0.142,52,148,0.9937,3.08,0.49,9,6 +6.1,0.27,0.31,1.5,0.035,17,83,0.99076,3.32,0.44,11.1,7 +6.3,0.27,0.37,7.9,0.047,58,215,0.99542,3.19,0.48,9.5,6 +6.6,0.24,0.3,13,0.052,18,143,0.99825,3.37,0.49,9.4,6 +6.8,0.32,0.3,1,0.049,22,113,0.99289,3.24,0.61,10.2,5 +6.4,0.37,0.37,4.85,0.041,39.5,216.5,0.99432,3.1,0.5,9.8,6 +6.2,0.26,0.37,7.1,0.047,54,201,0.99523,3.19,0.48,9.5,6 +6.3,0.27,0.37,7.9,0.047,58,215,0.99542,3.19,0.48,9.5,6 +6.4,0.3,0.16,7.5,0.05,55,191,0.9959,3.17,0.49,9,5 +8,0.28,0.32,7.6,0.045,61,204,0.99543,3.1,0.55,10.1,6 +6.7,0.24,0.32,10.3,0.079,37,122,0.99662,3.02,0.45,8.8,5 +7.9,0.27,0.27,1.7,0.034,25,122,0.99088,2.97,0.51,11.9,6 +7.9,0.27,0.27,1.7,0.034,25,122,0.99088,2.97,0.51,11.9,6 +6.1,0.28,0.24,19.95,0.074,32,174,0.99922,3.19,0.44,9.3,6 +7.7,0.39,0.49,7.7,0.036,11,110,0.9966,3.33,0.76,10,6 +6,0.2,0.24,5.3,0.075,49,201,0.99466,3.21,0.43,9.5,5 +6.1,0.28,0.24,19.95,0.074,32,174,0.99922,3.19,0.44,9.3,6 +7.6,0.31,0.23,12.7,0.054,20,139,0.99836,3.16,0.5,9.7,4 +7.6,0.31,0.23,12.7,0.054,20,139,0.99836,3.16,0.5,9.7,4 +6.3,0.18,0.22,1.5,0.043,45,155,0.99238,3.19,0.48,10.2,5 +8.6,0.23,0.25,11.3,0.031,13,96,0.99645,3.11,0.4,10.8,5 +6.8,0.21,0.36,18.1,0.046,32,133,1,3.27,0.48,8.8,5 +6.8,0.21,0.36,18.1,0.046,32,133,1,3.27,0.48,8.8,5 +6.9,0.26,0.31,7,0.039,37,175,0.99376,3.32,0.49,11.4,6 +6.8,0.21,0.36,18.1,0.046,32,133,1,3.27,0.48,8.8,5 +6.4,0.31,0.4,6.4,0.039,39,191,0.99513,3.14,0.52,9.8,5 +8.6,0.34,0.36,1.4,0.045,11,119,0.99556,3.17,0.47,9.4,4 +8.6,0.34,0.36,1.4,0.045,11,119,0.99556,3.17,0.47,9.4,4 +8.5,0.3,0.28,3.1,0.054,54,174,0.99543,3.21,0.43,9.4,6 +7.4,0.4,0.41,14.1,0.053,37,194,0.99886,3.2,0.63,9.4,6 +6.6,0.32,0.34,7.7,0.044,63,212,0.99526,3.22,0.48,9.7,6 +7.1,0.34,0.31,5.2,0.032,36,140,0.99166,3.35,0.47,12.3,7 +6.6,0.26,0.25,11.6,0.045,45,178,0.99691,3.33,0.43,9.8,6 +8,0.27,0.57,10.4,0.053,18,134,0.99732,3.12,0.68,9,5 +6.2,0.28,0.45,7.5,0.045,46,203,0.99573,3.26,0.46,9.2,6 +6.2,0.3,0.49,11.2,0.058,68,215,0.99656,3.19,0.6,9.4,6 +5.6,0.175,0.29,0.8,0.043,20,67,0.99112,3.28,0.48,9.9,6 +6.9,0.34,0.36,1.4,0.032,13,145,0.99214,3.07,0.52,9.8,5 +6.9,0.34,0.3,4.7,0.029,34,148,0.99165,3.36,0.49,12.3,7 +7.1,0.12,0.3,3.1,0.018,15,37,0.99004,3.02,0.52,11.9,7 +7.1,0.32,0.29,4,0.038,33,170,0.99463,3.27,0.64,10.2,6 +7.3,0.51,0.29,11.3,0.034,61,224,0.99683,3.14,0.56,9.5,6 +7.1,0.12,0.3,3.1,0.018,15,37,0.99004,3.02,0.52,11.9,7 +6.3,0.24,0.55,8.1,0.04,67,216,0.99596,3.24,0.5,9.2,5 +7.5,0.41,0.23,14.8,0.054,28,174,0.99898,3.18,0.49,9.7,5 +6.5,0.18,0.33,1.4,0.029,35,138,0.99114,3.36,0.6,11.5,7 +7.3,0.17,0.24,8.1,0.121,32,162,0.99508,3.17,0.38,10.4,8 +8.2,0.2,0.38,3.5,0.053,41,174,0.99306,3.22,0.41,11.6,5 +7.5,0.41,0.23,14.8,0.054,28,174,0.99898,3.18,0.49,9.7,5 +7.3,0.17,0.24,8.1,0.121,32,162,0.99508,3.17,0.38,10.4,8 +6.5,0.18,0.33,1.4,0.029,35,138,0.99114,3.36,0.6,11.5,7 +7.3,0.16,0.35,1.5,0.036,29,108,0.99342,3.27,0.51,10.2,6 +6.4,0.16,0.37,1.5,0.037,27,109,0.99345,3.38,0.5,9.8,6 +6.6,0.42,0.13,12.8,0.044,26,158,0.99772,3.24,0.47,9,5 +5.8,0.3,0.12,1.6,0.036,57,163,0.99239,3.38,0.59,10.5,6 +6.7,0.54,0.27,7.1,0.049,8,178,0.99502,3.16,0.38,9.4,4 +6.7,0.54,0.27,7.1,0.049,8,178,0.99502,3.16,0.38,9.4,4 +6.4,0.22,0.3,11.2,0.046,53,149,0.99479,3.21,0.34,10.8,5 +6.8,0.23,0.3,1.7,0.043,19,95,0.99207,3.17,0.46,10.7,7 +9,0.26,0.34,6.7,0.029,21,162,0.99497,3.08,0.5,10.6,6 +6.5,0.23,0.25,17.3,0.046,15,110,0.99828,3.15,0.42,9.2,6 +5.9,0.28,0.14,8.6,0.032,30,142,0.99542,3.28,0.44,9.5,6 +5.9,0.28,0.14,8.6,0.032,30,142,0.99542,3.28,0.44,9.5,6 +6.2,0.27,0.18,1.5,0.028,20,111,0.99228,3.41,0.5,10,5 +9,0.29,0.34,12.1,0.03,34,177,0.99706,3.13,0.47,10.6,5 +9,0.26,0.34,6.7,0.029,21,162,0.99497,3.08,0.5,10.6,6 +8.9,0.27,0.34,10.7,0.029,19.5,166,0.99669,3.13,0.48,10.6,5 +6.5,0.23,0.25,17.3,0.046,15,110,0.99828,3.15,0.42,9.2,6 +6.9,0.32,0.3,1.8,0.036,28,117,0.99269,3.24,0.48,11,6 +7.2,0.22,0.24,1.4,0.041,17,159,0.99196,3.25,0.53,11.2,6 +6.7,0.5,0.38,7.5,0.046,26,175,0.99662,3.32,0.54,9.6,5 +6.2,0.33,0.14,4.8,0.052,27,128,0.99475,3.21,0.48,9.4,5 +6.3,0.26,0.42,7.1,0.045,62,209,0.99544,3.2,0.53,9.5,6 +7.5,0.2,0.47,16.9,0.052,51,188,0.99944,3.09,0.62,9.3,5 +6.2,0.33,0.14,4.8,0.052,27,128,0.99475,3.21,0.48,9.4,5 +6.3,0.26,0.42,7.1,0.045,62,209,0.99544,3.2,0.53,9.5,6 +6.6,0.36,0.52,11.3,0.046,8,110,0.9966,3.07,0.46,9.4,5 +6.3,0.13,0.42,1.1,0.043,63,146,0.99066,3.13,0.72,11.2,7 +6.4,0.15,0.44,1.2,0.043,67,150,0.9907,3.14,0.73,11.2,7 +6.3,0.13,0.42,1.1,0.043,63,146,0.99066,3.13,0.72,11.2,7 +7.6,0.23,0.64,12.9,0.033,54,170,0.998,3,0.53,8.8,5 +6.4,0.15,0.44,1.2,0.043,67,150,0.9907,3.14,0.73,11.2,7 +6.3,0.13,0.42,1.1,0.043,63,146,0.99066,3.13,0.72,11.2,7 +5.7,0.255,0.65,1.2,0.079,17,137,0.99307,3.2,0.42,9.4,5 +6.9,0.32,0.26,2.3,0.03,11,103,0.99106,3.06,0.42,11.1,6 +6.9,0.28,0.22,10,0.052,36,131,0.99696,3.08,0.46,9.6,5 +6.9,0.32,0.26,2.3,0.03,11,103,0.99106,3.06,0.42,11.1,6 +5.7,0.255,0.65,1.2,0.079,17,137,0.99307,3.2,0.42,9.4,5 +6.6,0.41,0.16,1.4,0.037,28,160,0.99167,2.95,0.45,10.6,6 +7.3,0.37,0.16,14.9,0.048,59,240,0.99902,3.13,0.45,8.9,5 +6.9,0.21,0.24,1.8,0.021,17,80,0.98992,3.15,0.46,12.3,7 +6.6,0.24,0.28,1.8,0.028,39,132,0.99182,3.34,0.46,11.4,5 +6.8,0.28,0.36,7,0.043,60,207,0.99556,3.16,0.49,9.6,6 +6.6,0.24,0.24,8.6,0.034,25,135,0.99582,3.33,0.59,10.3,6 +6.6,0.24,0.28,1.8,0.028,39,132,0.99182,3.34,0.46,11.4,5 +7,0.16,0.32,1.1,0.032,29,80,0.98972,3.23,0.36,12.1,6 +7,0.14,0.28,1.3,0.026,10,56,0.99352,3.46,0.45,9.9,5 +6.3,0.34,0.36,4.9,0.035,31,185,0.9946,3.15,0.49,9.7,5 +6.8,0.26,0.24,1.9,0.043,70,154,0.99273,3.18,0.52,10.5,5 +6.7,0.17,0.42,10.4,0.038,85,182,0.99628,3.04,0.44,8.9,6 +6.5,0.27,0.4,10,0.039,74,227,0.99582,3.18,0.5,9.4,5 +6.7,0.25,0.36,8.6,0.037,63,206,0.99553,3.18,0.5,9.6,5 +5.8,0.3,0.27,1.7,0.014,45,104,0.98914,3.4,0.56,12.6,7 +6.4,0.28,0.56,1.7,0.156,49,106,0.99354,3.1,0.37,9.2,6 +7.7,0.3,0.26,18.95,0.053,36,174,0.99976,3.2,0.5,10.4,5 +6.8,0.18,0.3,12.8,0.062,19,171,0.99808,3,0.52,9,7 +6.8,0.18,0.3,12.8,0.062,19,171,0.99808,3,0.52,9,7 +6.8,0.18,0.3,12.8,0.062,19,171,0.99808,3,0.52,9,7 +6.8,0.18,0.3,12.8,0.062,19,171,0.99808,3,0.52,9,7 +6.8,0.18,0.3,12.8,0.062,19,171,0.99808,3,0.52,9,7 +6.8,0.18,0.3,12.8,0.062,19,171,0.99808,3,0.52,9,7 +5.1,0.14,0.25,0.7,0.039,15,89,0.9919,3.22,0.43,9.2,6 +6.8,0.18,0.3,12.8,0.062,19,171,0.99808,3,0.52,9,7 +7.2,0.615,0.1,1.4,0.068,25,154,0.99499,3.2,0.48,9.7,4 +6.9,0.13,0.28,13.3,0.05,47,132,0.99655,3.34,0.42,10.1,6 +6.7,0.34,0.3,8.5,0.059,24,152,0.99615,3.46,0.64,11,7 +7.3,0.32,0.29,1.5,0.038,32,144,0.99296,3.2,0.55,10.8,5 +6.3,0.21,0.29,11.7,0.048,49,147,0.99482,3.22,0.38,10.8,5 +5.4,0.5,0.13,5,0.028,12,107,0.99079,3.48,0.88,13.5,7 +8.2,0.52,0.34,1.2,0.042,18,167,0.99366,3.24,0.39,10.6,5 +7.8,0.28,0.31,2.1,0.046,28,208,0.99434,3.23,0.64,9.8,5 +6.4,0.22,0.34,1.4,0.023,56,115,0.98958,3.18,0.7,11.7,6 +7.8,0.28,0.31,2.1,0.046,28,208,0.99434,3.23,0.64,9.8,5 +6.9,0.32,0.27,16,0.034,58,185,0.99938,3.34,0.6,9,6 +6.8,0.11,0.42,1.1,0.042,51,132,0.99059,3.18,0.74,11.3,7 +6.2,0.26,0.32,15.3,0.031,64,185,0.99835,3.31,0.61,9.4,5 +6.4,0.22,0.34,1.4,0.023,56,115,0.98958,3.18,0.7,11.7,6 +6.7,0.3,0.29,2.8,0.025,37,107,0.99159,3.31,0.63,11.3,7 +6.7,0.3,0.29,2.8,0.025,37,107,0.99159,3.31,0.63,11.3,7 +7.1,0.2,0.3,0.9,0.019,4,28,0.98931,3.2,0.36,12,6 +7.2,0.2,0.36,2.5,0.028,22,157,0.9938,3.48,0.49,10.6,6 +8.9,0.26,0.33,8.1,0.024,47,202,0.99558,3.13,0.46,10.8,6 +7.5,0.25,0.32,8.2,0.024,53,209,0.99563,3.12,0.46,10.8,6 +7.1,0.2,0.3,0.9,0.019,4,28,0.98931,3.2,0.36,12,6 +6.3,0.27,0.46,11.1,0.053,44,177,0.99691,3.18,0.67,9.4,5 +6.5,0.3,0.39,7.8,0.038,61,219,0.9959,3.19,0.5,9.4,5 +6.7,0.3,0.29,2.8,0.025,37,107,0.99159,3.31,0.63,11.3,7 +6.6,0.36,0.52,10.1,0.05,29,140,0.99628,3.07,0.4,9.4,5 +6.15,0.21,0.37,3.2,0.021,20,80,0.99076,3.39,0.47,12,5 +6.5,0.18,0.41,14.2,0.039,47,129,0.99678,3.28,0.72,10.3,7 +6.5,0.18,0.41,14.2,0.039,47,129,0.99678,3.28,0.72,10.3,7 +6.5,0.18,0.41,14.2,0.039,47,129,0.99678,3.28,0.72,10.3,7 +6.6,0.26,0.21,2.9,0.026,48,126,0.99089,3.22,0.38,11.3,7 +6.6,0.35,0.35,6,0.063,31,150,0.99537,3.1,0.47,9.4,6 +6.5,0.28,0.28,20.4,0.041,40,144,1.0002,3.14,0.38,8.7,5 +6.6,0.36,0.52,10.1,0.05,29,140,0.99628,3.07,0.4,9.4,5 +6.6,0.26,0.21,2.9,0.026,48,126,0.99089,3.22,0.38,11.3,7 +6.5,0.18,0.41,14.2,0.039,47,129,0.99678,3.28,0.72,10.3,7 +6.15,0.21,0.37,3.2,0.021,20,80,0.99076,3.39,0.47,12,5 +4.5,0.19,0.21,0.95,0.033,89,159,0.99332,3.34,0.42,8,5 +8,0.24,0.26,1.7,0.033,36,136,0.99316,3.44,0.51,10.4,7 +7.8,0.17,0.23,1.7,0.029,39,128,0.99272,3.37,0.41,10.7,7 +7,0.24,0.24,9,0.03,42,219,0.99636,3.47,0.46,10.2,6 +5.8,0.6,0,1.3,0.044,72,197,0.99202,3.56,0.43,10.9,5 +5.9,0.445,0.26,1.4,0.027,23,109,0.99148,3.3,0.36,10.5,6 +6.7,0.28,0.28,2.4,0.012,36,100,0.99064,3.26,0.39,11.7,7 +6.8,0.44,0.2,16,0.065,61,186,0.99884,3.13,0.45,8.6,5 +7.2,0.24,0.27,11.4,0.034,40,174,0.99773,3.2,0.44,9,5 +8.7,0.31,0.73,14.35,0.044,27,191,1.00013,2.96,0.88,8.7,5 +8.2,0.32,0.26,2.1,0.062,26,87,0.98974,3.1,0.47,12.8,6 +7.2,0.24,0.27,11.4,0.034,40,174,0.99773,3.2,0.44,9,5 +8.7,0.31,0.73,14.35,0.044,27,191,1.00013,2.96,0.88,8.7,5 +7.5,0.13,0.38,1.1,0.023,42,104,0.99112,3.28,0.53,11.8,6 +9.2,0.14,0.37,1.1,0.034,36,84,0.99136,3.05,0.55,11.6,6 +7.4,0.2,0.37,1.2,0.028,28,89,0.99132,3.14,0.61,11.8,6 +6.1,0.15,0.35,15.8,0.042,55,158,0.99642,3.24,0.37,10.6,5 +7.6,0.23,0.4,5.2,0.066,14,91,0.99488,3.17,0.8,9.7,5 +8.1,0.33,0.22,5.2,0.047,24,151,0.99527,3.22,0.47,10.3,5 +7.15,0.17,0.24,9.6,0.119,56,178,0.99578,3.15,0.44,10.2,6 +6.7,0.12,0.3,5.2,0.048,38,113,0.99352,3.33,0.44,10.1,7 +5.7,0.18,0.36,1.2,0.046,9,71,0.99199,3.7,0.68,10.9,7 +5.8,0.15,0.28,0.8,0.037,43,127,0.99198,3.24,0.51,9.3,5 +6.6,0.23,0.29,14.45,0.057,29,144,0.99756,3.33,0.54,10.2,6 +7.15,0.17,0.24,9.6,0.119,56,178,0.99578,3.15,0.44,10.2,6 +7,0.34,0.39,6.9,0.066,43,162,0.99561,3.11,0.53,9.5,5 +6.4,0.68,0.26,3.4,0.069,25,146,0.99347,3.18,0.4,9.3,5 +7.3,0.22,0.31,2.3,0.018,45,80,0.98936,3.06,0.34,12.9,7 +6.4,0.28,0.27,11,0.042,45,148,0.99786,3.14,0.46,8.7,5 +6.9,0.4,0.22,5.95,0.081,76,303,0.99705,3.4,0.57,9.4,5 +6.8,0.19,0.23,5.1,0.034,71,204,0.9942,3.23,0.69,10.1,5 +7.1,0.23,0.24,5.4,0.039,60,196,0.9948,3.19,0.78,10,4 +6.45,0.14,0.42,1.2,0.05,51,129,0.99116,3.27,0.69,11.1,7 +6.5,0.15,0.44,12.6,0.052,65,158,0.99688,3.26,0.7,10.3,7 +7.1,0.15,0.34,1,0.033,27,73,0.98974,3.24,0.41,12.2,6 +6.7,0.33,0.34,6.6,0.067,35,156,0.99542,3.11,0.48,9.3,6 +7.2,0.3,0.26,1.5,0.041,46,178,0.99154,3.19,0.56,11.3,6 +7,0.23,0.33,1,0.043,46,110,0.99118,3.04,0.65,10.8,6 +8,0.13,0.25,1.1,0.033,15,86,0.99044,2.98,0.39,11.2,8 +6.2,0.21,0.34,6.6,0.03,36,91,0.9914,3.32,0.45,12.5,7 +8.3,0.4,0.41,8.2,0.05,15,122,0.9979,3.39,0.49,9.3,5 +5.9,0.34,0.31,2,0.03,38,142,0.98892,3.4,0.41,12.9,7 +6.6,0.12,0.25,1.4,0.039,21,131,0.99114,3.2,0.45,11.2,7 +9.6,0.655,0.21,2,0.039,21,120,0.99188,3,1,12.6,6 +6.8,0.26,0.4,7.5,0.046,45,179,0.99583,3.2,0.49,9.3,5 +5.9,0.34,0.31,2,0.03,38,142,0.98892,3.4,0.41,12.9,7 +5.9,0.3,0.3,2,0.03,38,142,0.98892,3.41,0.41,12.9,7 +7,0.15,0.3,13.3,0.049,46,120,0.99704,3.2,0.36,9.5,7 +7.9,0.37,0.31,2.85,0.037,5,24,0.9911,3.19,0.36,11.9,6 +7.2,0.35,0.25,5.6,0.032,23,120,0.99334,2.93,0.66,10.3,7 +7.2,0.32,0.24,5.6,0.033,23,120,0.99334,2.92,0.66,10.3,7 +7.6,0.1,0.33,1,0.031,33,93,0.99094,3.06,0.68,11.2,6 +6.2,0.25,0.31,3.2,0.03,32,150,0.99014,3.18,0.31,12,6 +7.1,0.31,0.17,1,0.042,21,144,0.99304,3.13,0.4,9.6,5 +7.6,0.18,0.28,7.1,0.041,29,110,0.99652,3.2,0.42,9.2,6 +8,0.17,0.29,2.4,0.029,52,119,0.98944,3.03,0.33,12.9,6 +7.2,0.19,0.27,11.2,0.061,46,149,0.99772,2.99,0.59,9.3,6 +7.6,0.32,0.25,9.5,0.03,15,136,0.99367,3.1,0.44,12.1,6 +7.1,0.31,0.17,1,0.042,21,144,0.99304,3.13,0.4,9.6,5 +6.6,0.21,0.29,1.8,0.026,35,128,0.99183,3.37,0.48,11.2,6 +7,0.16,0.36,2.6,0.029,28,98,0.99126,3.11,0.37,11.2,7 +8,0.17,0.29,2.4,0.029,52,119,0.98944,3.03,0.33,12.9,6 +6.6,0.24,0.38,8,0.042,56,187,0.99577,3.21,0.46,9.2,5 +7.2,0.19,0.27,11.2,0.061,46,149,0.99772,2.99,0.59,9.3,6 +7.6,0.18,0.28,7.1,0.041,29,110,0.99652,3.2,0.42,9.2,6 +6.9,0.3,0.25,3.3,0.041,26,124,0.99428,3.18,0.5,9.3,6 +6.2,0.28,0.27,10.3,0.03,26,108,0.99388,3.2,0.36,10.7,6 +6.9,0.31,0.32,1.2,0.024,20,166,0.99208,3.05,0.54,9.8,6 +6.7,0.23,0.25,1.6,0.036,28,143,0.99256,3.3,0.54,10.3,6 +6.2,0.28,0.27,10.3,0.03,26,108,0.99388,3.2,0.36,10.7,6 +5.7,0.23,0.28,9.65,0.025,26,121,0.9925,3.28,0.38,11.3,6 +6.5,0.22,0.5,16.4,0.048,36,182,0.99904,3.02,0.49,8.8,6 +7,0.18,0.37,1.5,0.043,16,104,0.99216,3.18,0.5,10.8,5 +6.9,0.31,0.32,1.2,0.024,20,166,0.99208,3.05,0.54,9.8,6 +6.9,0.3,0.25,3.3,0.041,26,124,0.99428,3.18,0.5,9.3,6 +6.5,0.46,0.31,5,0.027,15,72,0.99165,3.26,0.6,11.5,7 +6.5,0.23,0.36,16.3,0.038,43,133,0.99924,3.26,0.41,8.8,5 +6.5,0.23,0.36,16.3,0.038,43,133,0.99924,3.26,0.41,8.8,5 +6.5,0.23,0.36,16.3,0.038,43,133,0.99924,3.26,0.41,8.8,5 +6.6,0.26,0.38,6.5,0.17,68,201,0.9956,3.19,0.38,9.4,6 +6.7,0.26,0.39,6.4,0.171,64,200,0.99562,3.19,0.38,9.4,6 +7.5,0.28,0.39,10.2,0.045,59,209,0.9972,3.16,0.63,9.6,6 +6.5,0.23,0.36,16.3,0.038,43,133,0.99924,3.26,0.41,8.8,5 +6.8,0.23,0.42,7.4,0.044,56,189,0.9958,3.22,0.48,9.3,6 +7.8,0.25,0.34,13.7,0.044,66,184,0.99976,3.22,0.75,8.9,5 +7.8,0.25,0.34,13.7,0.044,66,184,0.99976,3.22,0.75,8.9,5 +5.6,0.2,0.22,1.3,0.049,25,155,0.99296,3.74,0.43,10,5 +6.4,0.21,0.44,7.4,0.045,47,182,0.9957,3.24,0.46,9.1,5 +6.8,0.23,0.42,7.4,0.044,56,189,0.9958,3.22,0.48,9.3,6 +6.8,0.24,0.37,7.45,0.043,59,188,0.99579,3.2,0.5,9.4,6 +7.8,0.25,0.28,7.2,0.04,46,179,0.99541,3.14,0.6,10.1,6 +7.8,0.25,0.34,13.7,0.044,66,184,0.99976,3.22,0.75,8.9,5 +6.8,0.16,0.29,10.4,0.046,59,143,0.99518,3.2,0.4,10.8,6 +5.2,0.28,0.29,1.1,0.028,18,69,0.99168,3.24,0.54,10,6 +7.5,0.18,0.31,6.5,0.029,53,160,0.99276,3.03,0.38,10.9,6 +7.5,0.26,0.3,4.6,0.027,29,92,0.99085,3.15,0.38,12,7 +8.2,0.37,0.64,13.9,0.043,22,171,0.99873,2.99,0.8,9.3,5 +7.6,0.4,0.27,5.2,0.03,32,101,0.99172,3.22,0.62,12.3,7 +7.5,0.26,0.25,1.7,0.038,29,129,0.99312,3.45,0.56,10.4,6 +7.5,0.18,0.31,6.5,0.029,53,160,0.99276,3.03,0.38,10.9,6 +6.9,0.23,0.32,16.4,0.045,62,153,0.9972,3.22,0.42,10.5,5 +5.3,0.2,0.31,3.6,0.036,22,91,0.99278,3.41,0.5,9.8,6 +6.5,0.17,0.31,1.5,0.041,34,121,0.99092,3.06,0.46,10.5,6 +6.5,0.35,0.28,12.4,0.051,86,213,0.9962,3.16,0.51,9.9,6 +6.5,0.29,0.31,1.7,0.035,24,79,0.99053,3.27,0.69,11.4,7 +6.8,0.3,0.22,6.2,0.06,41,190,0.99858,3.18,0.51,9.2,5 +7.9,0.51,0.36,6.2,0.051,30,173,0.9984,3.09,0.53,9.7,5 +7.9,0.51,0.34,2.6,0.049,13,135,0.99335,3.09,0.51,10,5 +6.5,0.29,0.31,1.7,0.035,24,79,0.99053,3.27,0.69,11.4,7 +7.1,0.29,0.28,9.3,0.048,50,141,0.9949,3.13,0.49,10.3,6 +6.5,0.35,0.28,12.4,0.051,86,213,0.9962,3.16,0.51,9.9,6 +6.5,0.17,0.31,1.5,0.041,34,121,0.99092,3.06,0.46,10.5,6 +7.4,0.2,0.28,9.1,0.047,29,95,0.99532,3.16,0.47,9.8,7 +6.9,0.615,0.42,12,0.067,24,131,0.99727,3.19,0.34,9.3,5 +6.8,0.32,0.28,4.8,0.034,25,100,0.99026,3.08,0.47,12.4,7 +6.3,0.2,0.19,12.3,0.048,54,145,0.99668,3.16,0.42,9.3,6 +6.9,0.615,0.42,12,0.067,24,131,0.99727,3.19,0.34,9.3,5 +8,0.23,0.28,2.7,0.048,49,165,0.9952,3.26,0.72,9.5,6 +6.7,0.27,0.33,3.6,0.034,9,45,0.99144,3.08,0.4,10.5,6 +6.7,0.27,0.33,3.6,0.034,9,45,0.99144,3.08,0.4,10.5,6 +6.7,0.44,0.22,4.3,0.032,19,99,0.99015,3.26,0.53,12.8,7 +7,0.34,0.3,1.8,0.045,44,142,0.9914,2.99,0.45,10.8,6 +7.3,0.26,0.33,11.8,0.057,48,127,0.99693,3.1,0.55,10,6 +5.8,0.17,0.34,1.8,0.045,96,170,0.99035,3.38,0.9,11.8,8 +7.3,0.26,0.33,11.8,0.057,48,127,0.99693,3.1,0.55,10,6 +5.8,0.17,0.34,1.8,0.045,96,170,0.99035,3.38,0.9,11.8,8 +6.8,0.17,0.36,1.4,0.036,38,108,0.99006,3.19,0.66,12,6 +7.1,0.43,0.3,6.6,0.025,15,138,0.99126,3.18,0.46,12.6,6 +5.8,0.315,0.27,1.55,0.026,15,70,0.98994,3.37,0.4,11.9,8 +5.9,0.17,0.28,0.7,0.027,5,28,0.98985,3.13,0.32,10.6,5 +6.6,0.34,0.18,6.4,0.082,47,240,0.9971,3.42,0.48,9.2,5 +8.6,0.33,0.34,11.8,0.059,42,240,0.99882,3.17,0.52,10,6 +5.6,0.12,0.26,4.3,0.038,18,97,0.99477,3.36,0.46,9.2,5 +5.8,0.13,0.26,5.1,0.039,19,103,0.99478,3.36,0.47,9.3,6 +7.7,0.18,0.35,5.8,0.055,25,144,0.99576,3.24,0.54,10.2,6 +7.7,0.16,0.36,5.9,0.054,25,148,0.99578,3.25,0.54,10.2,6 +6,0.26,0.15,1.3,0.06,51,154,0.99354,3.14,0.51,8.7,5 +7.3,0.32,0.35,1.4,0.05,8,163,0.99244,3.24,0.42,10.7,5 +7.7,0.3,0.34,1.2,0.048,4,119,0.99084,3.18,0.34,12.1,6 +7.9,0.16,0.3,7.4,0.05,58,152,0.99612,3.12,0.37,9.5,6 +6.4,0.27,0.29,10.8,0.028,17,118,0.99356,3.18,0.37,11.2,6 +6.9,0.16,0.37,1.8,0.034,36,95,0.98952,2.93,0.59,12,6 +7.9,0.16,0.3,7.4,0.05,58,152,0.99612,3.12,0.37,9.5,6 +7.7,0.3,0.34,1.2,0.048,4,119,0.99084,3.18,0.34,12.1,6 +7.3,0.32,0.35,1.4,0.05,8,163,0.99244,3.24,0.42,10.7,5 +6.4,0.44,0.44,14.4,0.048,29,228,0.99955,3.26,0.54,8.8,7 +6.3,0.2,0.24,1.7,0.052,36,135,0.99374,3.8,0.66,10.8,6 +6.2,0.29,0.32,3.6,0.026,39,138,0.9892,3.31,0.37,13.1,7 +7.6,0.39,0.32,3.6,0.035,22,93,0.99144,3.08,0.6,12.5,7 +7,0.36,0.32,10.05,0.045,37,131,0.99352,3.09,0.33,11.7,8 +7,0.36,0.32,10.05,0.045,37,131,0.99352,3.09,0.33,11.7,8 +7,0.36,0.32,10.5,0.045,35,135,0.9935,3.09,0.33,11.6,8 +7.6,0.2,0.36,1.9,0.043,24,111,0.99237,3.29,0.54,11.3,6 +7.6,0.39,0.32,3.6,0.035,22,93,0.99144,3.08,0.6,12.5,7 +6.7,0.2,0.37,1.65,0.025,42,103,0.99022,3.11,0.45,11.4,5 +6.2,0.235,0.34,1.9,0.036,4,117,0.99032,3.4,0.44,12.2,5 +7.8,0.965,0.6,65.8,0.074,8,160,1.03898,3.39,0.69,11.7,6 +7.1,0.2,0.31,6.85,0.053,32,211,0.99587,3.31,0.59,10.4,6 +7.1,0.2,0.31,7.4,0.053,32,211,0.99587,3.31,0.59,10.4,6 +7.1,0.2,0.31,7.4,0.053,32,211,0.99587,3.31,0.59,10.4,6 +6.4,0.24,0.25,20.2,0.083,35,157,0.99976,3.17,0.5,9.1,5 +8,0.3,0.36,11,0.034,8,70,0.99354,3.05,0.41,12.2,6 +6.4,0.24,0.25,20.2,0.083,35,157,0.99976,3.17,0.5,9.1,5 +6.9,0.4,0.42,6.2,0.066,41,176,0.99552,3.12,0.54,9.4,5 +6.9,0.4,0.43,6.2,0.065,42,178,0.99552,3.11,0.53,9.4,5 +7.1,0.2,0.31,6.85,0.053,32,211,0.99587,3.31,0.59,10.4,6 +6.6,0.25,0.51,8,0.047,61,189,0.99604,3.22,0.49,9.2,5 +6.8,0.26,0.44,8.2,0.046,52,183,0.99584,3.2,0.51,9.4,5 +6.5,0.37,0.3,2.2,0.033,39,107,0.98894,3.22,0.53,13.5,7 +6.8,0.35,0.53,10.1,0.053,37,151,0.9963,3.07,0.4,9.4,5 +6.4,0.22,0.32,7.2,0.028,15,83,0.993,3.13,0.55,10.9,8 +6.5,0.37,0.3,2.2,0.033,39,107,0.98894,3.22,0.53,13.5,7 +6.8,0.35,0.53,10.1,0.053,37,151,0.9963,3.07,0.4,9.4,5 +6.9,0.31,0.32,1.6,0.036,34,114,0.99068,3.19,0.45,11.4,7 +6.7,0.16,0.37,1.3,0.036,45,125,0.98964,3.19,0.51,12.4,7 +6.6,0.25,0.51,8,0.047,61,189,0.99604,3.22,0.49,9.2,5 +6.8,0.26,0.44,8.2,0.046,52,183,0.99584,3.2,0.51,9.4,5 +5.6,0.15,0.31,5.3,0.038,8,79,0.9923,3.3,0.39,10.5,6 +5.5,0.15,0.32,14,0.031,16,99,0.99437,3.26,0.38,11.5,8 +6.4,0.22,0.32,7.2,0.028,15,83,0.993,3.13,0.55,10.9,8 +7.3,0.2,0.26,1.6,0.04,36,123,0.99238,3.34,0.44,10.8,6 +7.5,0.17,0.71,11.8,0.038,52,148,0.99801,3.03,0.46,8.9,5 +7.5,0.18,0.72,9.6,0.039,53,151,0.99802,3.03,0.46,8.9,5 +7,0.27,0.48,6.1,0.042,60,184,0.99566,3.2,0.5,9.4,6 +5.8,0.32,0.31,2.7,0.049,25,153,0.99067,3.44,0.73,12.2,7 +7.8,0.26,0.31,3.6,0.025,22,100,0.99066,2.99,0.47,12.1,7 +7.4,0.3,0.32,1.7,0.03,23,128,0.9929,3.17,0.66,10.9,5 +6.7,0.16,0.34,1.6,0.026,27,109,0.9934,3.34,0.58,10.1,6 +5.8,0.32,0.31,2.7,0.049,25,153,0.99067,3.44,0.73,12.2,7 +6.7,0.19,0.39,1,0.032,14,71,0.98912,3.31,0.38,13,7 +6.6,0.36,0.24,0.9,0.038,15,72,0.99066,3.23,0.39,11,5 +7.2,0.17,0.41,1.6,0.052,24,126,0.99228,3.19,0.49,10.8,5 +6.7,0.19,0.39,1,0.032,14,71,0.98912,3.31,0.38,13,7 +6,0.11,0.47,10.6,0.052,69,148,0.9958,2.91,0.34,9.3,4 +6,0.21,0.34,2,0.042,63,123,0.99052,3.44,0.42,11.4,6 +6.7,0.325,0.82,1.2,0.152,49,120,0.99312,2.99,0.38,9.2,5 +6.6,0.4,0.46,6.2,0.056,42,241,0.9968,3.5,0.6,9.9,5 +6.5,0.2,0.24,9.2,0.044,25,150,0.99502,3.22,0.44,10.5,5 +7.6,0.27,0.34,5,0.04,18,56,0.99084,3.06,0.48,12.4,6 +7.2,0.26,0.4,6.3,0.047,52,172,0.99573,3.18,0.53,9.5,6 +6.3,0.25,0.22,3.3,0.048,41,161,0.99256,3.16,0.5,10.5,6 +6.5,0.22,0.45,8,0.053,52,196,0.9959,3.23,0.48,9.1,6 +6.4,0.14,0.31,1.2,0.034,53,138,0.99084,3.38,0.35,11.5,7 +6.4,0.14,0.31,1.2,0.034,53,138,0.99084,3.38,0.35,11.5,7 +7.1,0.26,0.32,16.2,0.044,31,170,0.99644,3.17,0.37,11.2,5 +6.6,0.22,0.34,11.6,0.05,59,140,0.99526,3.22,0.4,10.8,5 +6.6,0.45,0.43,7.2,0.064,31,186,0.9954,3.12,0.44,9.4,5 +6.6,0.17,0.3,1.1,0.031,13,73,0.99095,3.17,0.58,11,6 +7.2,0.44,0.28,3.4,0.048,22,112,0.99188,3.21,0.37,11.3,7 +6.2,0.15,0.27,1.4,0.041,51,117,0.9909,3.28,0.38,11.2,6 +6.3,0.25,0.22,3.3,0.048,41,161,0.99256,3.16,0.5,10.5,6 +6.5,0.22,0.45,8,0.053,52,196,0.9959,3.23,0.48,9.1,6 +7.3,0.26,0.3,9.3,0.05,35,154,0.99581,3.21,0.5,10.4,6 +6.9,0.15,0.29,2.3,0.033,14,82,0.99132,3.1,0.58,11.2,7 +5.8,0.22,0.29,0.9,0.034,34,89,0.98936,3.14,0.36,11.1,7 +6.5,0.37,0.33,3.5,0.036,23,92,0.99136,3.18,0.38,11.2,6 +5.5,0.375,0.38,1.7,0.036,17,98,0.99142,3.29,0.39,10.5,6 +5.9,0.2,0.4,1.3,0.047,23,92,0.99232,3.2,0.45,10,6 +5.9,0.22,0.38,1.3,0.046,24,90,0.99232,3.2,0.47,10,6 +8,0.22,0.31,5.6,0.049,24,97,0.993,3.1,0.42,10.9,5 +6.5,0.22,0.29,7.4,0.028,16,87,0.99311,3.15,0.56,10.9,7 +6.9,0.15,0.29,2.3,0.033,14,82,0.99132,3.1,0.58,11.2,7 +5.8,0.2,0.34,1,0.035,40,86,0.98993,3.5,0.42,11.7,5 +6.6,0.31,0.07,1.5,0.033,55,144,0.99208,3.16,0.42,10,5 +7.7,0.43,0.37,10,0.169,22,210,0.99776,3.02,0.64,9.5,5 +6.7,0.24,0.29,14.9,0.053,55,136,0.99839,3.03,0.52,9,5 +7.3,0.23,0.34,9.3,0.052,19,86,0.99574,3.04,0.56,10,5 +7.9,0.2,0.39,1,0.041,37,154,0.99093,3.08,0.43,11.9,5 +5.3,0.16,0.39,1,0.028,40,101,0.99156,3.57,0.59,10.6,6 +6.4,0.21,0.28,5.9,0.047,29,101,0.99278,3.15,0.4,11,6 +6.9,0.33,0.26,5,0.027,46,143,0.9924,3.25,0.43,11.2,7 +5.6,0.18,0.58,1.25,0.034,29,129,0.98984,3.51,0.6,12,7 +6.6,0.29,0.31,3.9,0.027,39,96,0.99035,3.24,0.6,12.6,8 +6.9,0.33,0.26,5,0.027,46,143,0.9924,3.25,0.43,11.2,7 +6.6,0.21,0.36,0.8,0.034,48,113,0.99165,3.24,0.68,10.5,6 +7.3,0.21,0.33,1,0.037,66,144,0.9923,3.11,0.52,10.2,6 +6.4,0.21,0.28,5.9,0.047,29,101,0.99278,3.15,0.4,11,6 +5.1,0.11,0.32,1.6,0.028,12,90,0.99008,3.57,0.52,12.2,6 +6.5,0.15,0.32,1.3,0.036,19,76,0.98964,3.18,0.41,12.3,6 +5.3,0.16,0.39,1,0.028,40,101,0.99156,3.57,0.59,10.6,6 +5.6,0.19,0.46,1.1,0.032,33,115,0.9909,3.36,0.5,10.4,6 +5.6,0.18,0.58,1.25,0.034,29,129,0.98984,3.51,0.6,12,7 +6.7,0.48,0.32,1.4,0.021,22,121,0.9889,3.15,0.53,12.7,7 +6.2,0.23,0.23,1.2,0.018,18,128,0.99178,3.05,0.28,10.6,5 +6,0.17,0.29,5,0.028,25,108,0.99076,3.14,0.34,12.3,6 +6.7,0.48,0.32,1.4,0.021,22,121,0.9889,3.15,0.53,12.7,7 +6.7,0.15,0.38,1.7,0.037,20,84,0.99046,3.09,0.53,11.4,6 +4.2,0.17,0.36,1.8,0.029,93,161,0.98999,3.65,0.89,12,7 +5.8,0.21,0.32,1.6,0.045,38,95,0.98946,3.23,0.94,12.4,8 +5.4,0.23,0.36,1.5,0.03,74,121,0.98976,3.24,0.99,12.1,7 +6.7,0.15,0.38,1.7,0.037,20,84,0.99046,3.09,0.53,11.4,6 +6.4,0.22,0.31,13.9,0.04,57,135,0.99672,3.21,0.38,10.7,5 +6.5,0.15,0.55,5.9,0.045,75,162,0.99482,2.97,0.4,9.3,5 +5.9,0.32,0.33,2.1,0.027,35,138,0.98945,3.37,0.42,12.7,6 +5.7,0.37,0.3,1.1,0.029,24,88,0.98883,3.18,0.39,11.7,6 +7.9,0.25,0.35,6.7,0.039,22,64,0.99362,2.93,0.49,10.7,5 +7.2,0.21,0.28,2.7,0.033,38,94,0.99075,2.99,0.43,11.8,7 +7,0.24,0.3,6.7,0.039,37,125,0.99436,3.2,0.39,9.9,5 +6.8,0.475,0.33,3.95,0.047,16,81,0.98988,3.23,0.53,13.4,7 +7,0.28,0.32,7.75,0.032,30,114,0.99158,3.12,0.64,12.8,7 +6.9,0.4,0.3,10.6,0.033,24,87,0.99265,3.15,0.45,12.8,6 +6.6,0.41,0.31,1.6,0.042,18,101,0.99195,3.13,0.41,10.5,5 +6.4,0.2,0.28,2.5,0.032,24,84,0.99168,3.31,0.55,11.5,5 +8.5,0.22,0.34,0.7,0.04,5,25,0.9918,3.04,0.37,10.5,4 +8.4,0.36,0.36,11.1,0.032,21,132,0.99313,2.95,0.39,13,6 +5.2,0.285,0.29,5.15,0.035,64,138,0.9895,3.19,0.34,12.4,8 +6.9,0.2,0.3,4.7,0.041,40,148,0.9932,3.16,0.35,10.2,6 +6.7,0.42,0.46,9.7,0.054,67,234,0.99848,3.23,0.5,9,5 +6.2,0.16,0.34,1.7,0.038,85,153,0.9909,3.33,0.86,12,7 +6.4,0.125,0.36,1.4,0.044,22,68,0.99014,3.15,0.5,11.7,7 +6.4,0.44,0.26,2,0.054,20,180,0.9952,3.58,0.57,10,5 +7,0.31,0.39,7.5,0.055,42,218,0.99652,3.37,0.54,10.3,5 +6.7,0.42,0.46,9.7,0.054,67,234,0.99848,3.23,0.5,9,5 +8.6,0.18,0.28,0.8,0.032,25,78,0.99104,2.99,0.38,11.1,5 +6.2,0.21,0.26,13.1,0.05,59,150,0.99772,3.31,0.46,9,5 +6.1,0.16,0.37,1.1,0.031,37,97,0.9922,3.4,0.58,10.5,6 +6.5,0.22,0.32,2.2,0.028,36,92,0.99076,3.27,0.59,11.9,7 +6.2,0.36,0.14,8.9,0.036,38,155,0.99622,3.27,0.5,9.4,5 +5.7,0.21,0.25,1.1,0.035,26,81,0.9902,3.31,0.52,11.4,6 +6.4,0.25,0.32,0.9,0.034,40,114,0.99114,3.31,0.58,10.8,7 +7.6,0.31,0.26,1.7,0.073,40,157,0.9938,3.1,0.46,9.8,5 +6.6,0.26,0.46,6.9,0.047,59,183,0.99594,3.2,0.45,9.3,5 +5.7,0.21,0.25,1.1,0.035,26,81,0.9902,3.31,0.52,11.4,6 +6.2,0.2,0.31,1,0.031,22,73,0.99035,3.24,0.52,11.3,6 +6.2,0.18,0.3,1,0.031,23,73,0.99032,3.23,0.52,11.3,6 +6.1,0.37,0.2,7.6,0.031,49,170,0.99558,3.22,0.48,9.5,5 +6.2,0.36,0.14,8.9,0.036,38,155,0.99622,3.27,0.5,9.4,5 +6.5,0.22,0.32,2.2,0.028,36,92,0.99076,3.27,0.59,11.9,7 +7.7,0.18,0.3,1.2,0.046,49,199,0.99413,3.03,0.38,9.3,5 +6.9,0.14,0.38,1,0.041,22,81,0.99043,3.03,0.54,11.4,6 +6.9,0.14,0.38,1,0.041,22,81,0.99043,3.03,0.54,11.4,6 +6,0.44,0.26,3.1,0.053,57,128,0.98982,3.22,0.39,12.7,6 +7.1,0.36,0.4,1.95,0.033,26,118,0.98934,3.2,0.45,13.5,7 +5.7,0.28,0.28,2.2,0.019,15,65,0.9902,3.06,0.52,11.2,6 +6.4,0.16,0.32,8.75,0.038,38,118,0.99449,3.19,0.41,10.7,5 +7.4,0.28,0.4,11.9,0.032,13,92,0.99629,3.01,0.46,10.8,4 +6.7,0.39,0.31,2.7,0.054,27,202,0.9948,3.46,0.57,10.5,6 +6.5,0.44,0.47,5.45,0.014,44,137,0.98984,3.13,0.32,13,8 +6.9,0.22,0.31,6.3,0.029,41,131,0.99326,3.08,0.49,10.8,6 +6.6,0.22,0.29,14.4,0.046,39,118,0.99834,3.05,0.5,9.1,6 +7.7,0.25,0.3,7.8,0.038,67,196,0.99555,3.1,0.5,10.1,5 +5.2,0.155,0.33,1.6,0.028,13,59,0.98975,3.3,0.84,11.9,8 +7,0.31,0.31,9.1,0.036,45,140,0.99216,2.98,0.31,12,7 +7,0.31,0.31,9.1,0.036,45,140,0.99216,2.98,0.31,12,7 +6.6,0.22,0.29,14.4,0.046,39,118,0.99834,3.05,0.5,9.1,6 +5.6,0.21,0.4,1.3,0.041,81,147,0.9901,3.22,0.95,11.6,8 +5.2,0.155,0.33,1.6,0.028,13,59,0.98975,3.3,0.84,11.9,8 +6.4,0.25,0.32,11.3,0.038,69,192,0.99573,3.14,0.5,10.2,6 +6.9,0.22,0.31,6.3,0.029,41,131,0.99326,3.08,0.49,10.8,6 +5.3,0.21,0.29,0.7,0.028,11,66,0.99215,3.3,0.4,9.8,5 +7.1,0.27,0.28,1.25,0.023,3,89,0.98993,2.95,0.3,11.4,4 +5.2,0.17,0.27,0.7,0.03,11,68,0.99218,3.3,0.41,9.8,5 +7.7,0.25,0.3,7.8,0.038,67,196,0.99555,3.1,0.5,10.1,5 +7,0.12,0.29,10.3,0.039,41,98,0.99564,3.19,0.38,9.8,8 +7,0.12,0.29,10.3,0.039,41,98,0.99564,3.19,0.38,9.8,8 +7.1,0.29,0.34,7.8,0.036,49,128,0.99397,3.21,0.4,10.7,6 +7.2,0.3,0.3,8.7,0.022,14,111,0.99576,3.11,0.61,10.6,5 +6.8,0.26,0.46,8.3,0.037,49,173,0.99601,3.17,0.47,9.3,5 +7,0.12,0.29,10.3,0.039,41,98,0.99564,3.19,0.38,9.8,8 +7.1,0.29,0.34,7.8,0.036,49,128,0.99397,3.21,0.4,10.7,6 +4.9,0.33,0.31,1.2,0.016,39,150,0.98713,3.33,0.59,14,8 +5.1,0.29,0.28,8.3,0.026,27,107,0.99308,3.36,0.37,11,6 +5.1,0.29,0.28,8.3,0.026,27,107,0.99308,3.36,0.37,11,6 +6.8,0.26,0.48,6.2,0.049,55,182,0.99582,3.21,0.45,9.4,6 +6,0.28,0.52,5,0.078,30,139,0.99494,3.1,0.36,9,6 +6,0.28,0.25,1.8,0.042,8,108,0.9929,3.08,0.55,9,5 +7.2,0.2,0.22,1.6,0.044,17,101,0.99471,3.37,0.53,10,5 +6.1,0.27,0.25,1.8,0.041,9,109,0.9929,3.08,0.54,9,5 +6,0.28,0.25,1.8,0.042,8,108,0.9929,3.08,0.55,9,5 +6.4,0.29,0.3,2.9,0.036,25,79,0.99037,3.29,0.6,12.4,7 +7.4,0.35,0.24,6,0.042,28,123,0.99304,3.14,0.44,11.3,5 +8.1,0.12,0.38,0.9,0.034,36,86,0.99026,2.8,0.55,12,6 +6.4,0.12,0.3,1.1,0.031,37,94,0.98986,3.01,0.56,11.7,6 +7.2,0.2,0.22,1.6,0.044,17,101,0.99471,3.37,0.53,10,5 +7.3,0.4,0.26,5.45,0.016,26,90,0.98951,2.84,0.54,13.2,7 +7.7,0.11,0.34,14.05,0.04,41,114,0.99634,3.07,0.59,11,7 +6.9,0.23,0.41,8,0.03,30,114,0.99368,3.22,0.54,11,6 +6.9,0.38,0.38,13.1,0.112,14,94,0.99792,3.02,0.48,9.2,5 +7.5,0.38,0.29,4.9,0.021,38,113,0.99026,3.08,0.48,13,7 +5.8,0.19,0.24,1.3,0.044,38,128,0.99362,3.77,0.6,10.6,5 +5.5,0.34,0.26,2.2,0.021,31,119,0.98919,3.55,0.49,13,8 +6.6,0.23,0.3,14.9,0.051,33,118,0.99835,3.04,0.54,9,6 +6.6,0.23,0.3,14.9,0.051,33,118,0.99835,3.04,0.54,9,6 +8.4,0.31,0.31,0.95,0.021,52,148,0.99038,2.93,0.32,11.5,5 +6.7,0.2,0.3,1.4,0.025,17,76,0.99104,3.11,0.44,11,6 +8.4,0.31,0.31,0.95,0.021,52,148,0.99038,2.93,0.32,11.5,5 +7.3,0.26,0.24,1.7,0.05,10,112,0.99286,3.11,0.43,9.9,5 +6.3,0.22,0.22,5.6,0.039,31,128,0.99296,3.12,0.46,10.4,6 +6.6,0.23,0.3,14.9,0.051,33,118,0.99835,3.04,0.54,9,6 +7.5,0.19,0.4,7.1,0.056,50,110,0.9954,3.06,0.52,9.9,6 +8,0.14,0.33,1.2,0.045,71,162,0.9914,3.07,0.47,11,6 +6.8,0.32,0.39,9.6,0.026,34,124,0.99286,3.18,0.35,12.1,6 +6.6,0.23,0.2,11.4,0.044,45,131,0.99604,2.96,0.51,9.7,6 +6.6,0.23,0.2,11.4,0.044,45,131,0.99604,2.96,0.51,9.7,6 +6.7,0.36,0.26,7.9,0.034,39,123,0.99119,2.99,0.3,12.2,7 +6.1,0.38,0.42,5,0.016,31,113,0.99007,3.15,0.31,12.4,7 +8.5,0.23,0.28,11.1,0.033,30,97,0.99507,3.03,0.39,10.5,7 +7,0.2,0.31,8,0.05,29,213,0.99596,3.28,0.57,10.4,6 +6,0.26,0.32,3.8,0.029,48,180,0.99011,3.15,0.34,12,6 +6.9,0.3,0.3,10.55,0.037,4,28,0.99184,3.07,0.32,12.7,6 +6.7,0.18,0.28,10.2,0.039,29,115,0.99469,3.11,0.45,10.9,7 +6.7,0.18,0.28,10.2,0.039,29,115,0.99469,3.11,0.45,10.9,7 +6.8,0.18,0.28,9.8,0.039,29,113,0.99406,3.11,0.45,10.9,7 +7.2,0.19,0.31,6.3,0.034,17,103,0.99305,3.15,0.52,11.4,7 +6.2,0.16,0.32,1.1,0.036,74,184,0.99096,3.22,0.41,11,6 +5,0.27,0.32,4.5,0.032,58,178,0.98956,3.45,0.31,12.6,7 +6.3,0.37,0.28,6.3,0.034,45,152,0.9921,3.29,0.46,11.6,7 +6.6,0.2,0.27,10.9,0.038,29,130,0.99496,3.11,0.44,10.5,7 +6.8,0.18,0.28,9.8,0.039,29,113,0.99406,3.11,0.45,10.9,7 +6.8,0.18,0.28,9.8,0.039,29,113,0.99406,3.11,0.45,10.9,7 +6.6,0.28,0.34,0.8,0.037,42,119,0.9888,3.03,0.37,12.5,6 +6.5,0.35,0.36,0.8,0.034,32,111,0.98942,3.11,0.5,12.1,8 +6.9,0.25,0.33,1.2,0.035,35,158,0.99082,3.02,0.58,11.3,6 +6,0.32,0.3,1.3,0.025,18,112,0.98802,3.07,0.64,13.3,7 +6.8,0.18,0.28,9.8,0.039,29,113,0.99406,3.11,0.45,10.9,7 +6.7,0.18,0.28,10.2,0.039,29,115,0.99469,3.11,0.45,10.9,7 +6.6,0.2,0.27,10.9,0.038,29,130,0.99496,3.11,0.44,10.5,7 +6.3,0.37,0.28,6.3,0.034,45,152,0.9921,3.29,0.46,11.6,7 +7.2,0.19,0.31,6.3,0.034,17,103,0.99305,3.15,0.52,11.4,7 +6.3,0.18,0.36,1.2,0.034,26,111,0.99074,3.16,0.51,11,6 +6.9,0.3,0.36,0.9,0.037,40,156,0.98968,3.08,0.36,12.1,6 +6.2,0.16,0.32,1.1,0.036,74,184,0.99096,3.22,0.41,11,6 +5,0.27,0.32,4.5,0.032,58,178,0.98956,3.45,0.31,12.6,7 +5,0.3,0.33,3.7,0.03,54,173,0.9887,3.36,0.3,13,7 +6.5,0.2,0.5,18.1,0.054,50,221,0.99941,2.94,0.64,8.8,6 +6.7,0.25,0.31,1.35,0.061,30.5,218,0.99388,3.16,0.53,9.5,5 +6.6,0.22,0.36,5.5,0.029,30,105,0.99206,3.2,0.47,11.8,6 +6.8,0.25,0.37,3.1,0.026,29,93,0.99035,3.14,0.45,12.2,6 +7,0.13,0.37,12.85,0.042,36,105,0.99581,3.05,0.55,10.7,6 +7,0.45,0.34,19.8,0.04,12,67,0.9976,3.07,0.38,11,6 +7.2,0.32,0.3,8.25,0.02,14,104,0.99362,2.99,0.44,11.4,6 +7,0.13,0.37,12.85,0.042,36,105,0.99581,3.05,0.55,10.7,6 +5.9,0.34,0.3,3.8,0.035,57,135,0.99016,3.09,0.34,12,6 +6.8,0.22,0.31,6.9,0.037,33,121,0.99176,3.02,0.39,11.9,8 +7.2,0.32,0.3,8.25,0.02,14,104,0.99362,2.99,0.44,11.4,6 +8.4,0.32,0.35,11.7,0.029,3,46,0.99439,3.02,0.34,11.8,6 +6.8,0.27,0.29,4.6,0.046,6,88,0.99458,3.34,0.48,10.6,4 +8,0.74,0.21,4,0.05,24,133,0.99418,3.06,0.38,9.7,5 +7,0.45,0.34,19.8,0.04,12,67,0.9976,3.07,0.38,11,6 +7,0.13,0.37,12.85,0.042,36,105,0.99581,3.05,0.55,10.7,6 +5.4,0.22,0.29,1.2,0.045,69,152,0.99178,3.76,0.63,11,7 +8.4,0.22,0.3,8.9,0.024,17,118,0.99456,2.99,0.34,10.5,6 +7.4,0.32,0.22,11.7,0.035,44,150,0.99578,3.1,0.45,10.4,5 +7.5,0.18,0.37,6.2,0.05,21,138,0.99546,3.2,0.55,10.5,6 +7.1,0.47,0.29,14.8,0.024,22,142,0.99518,3.12,0.48,12,8 +7.1,0.47,0.29,14.8,0.024,22,142,0.99518,3.12,0.48,12,8 +5.8,0.19,0.25,10.8,0.042,33,124,0.99646,3.22,0.41,9.2,6 +6.7,0.14,0.46,1.6,0.036,15,92,0.99264,3.37,0.49,10.9,5 +6.8,0.24,0.38,8.3,0.045,50,185,0.99578,3.15,0.5,9.5,6 +6.9,0.25,0.47,8.4,0.042,36,156,0.99604,3.15,0.55,9.4,6 +6,0.24,0.33,2.5,0.026,31,85,0.99014,3.13,0.5,11.3,7 +6.8,0.29,0.34,3.5,0.054,26,189,0.99489,3.42,0.58,10.4,5 +6.3,0.33,0.42,17.2,0.037,57,170,0.99884,3.26,0.57,9.4,6 +6.5,0.23,0.45,2.1,0.027,43,104,0.99054,3.02,0.52,11.3,6 +6.3,0.27,0.29,12.2,0.044,59,196,0.99782,3.14,0.4,8.8,6 +6.3,0.2,0.37,11.8,0.045,58,130,0.99519,3.2,0.35,10.8,5 +6.2,0.33,0.41,16.8,0.037,58,173,0.99882,3.25,0.57,9.4,6 +6.3,0.33,0.42,17.2,0.037,57,170,0.99884,3.26,0.57,9.4,6 +7.2,0.21,1,1.1,0.154,46,114,0.9931,2.95,0.43,9.2,6 +6,0.27,0.3,14.7,0.044,15,144,0.99666,3.12,0.53,10.3,6 +5.7,0.12,0.26,5.5,0.034,21,99,0.99324,3.09,0.57,9.9,6 +6.9,0.24,0.37,6.1,0.027,38,112,0.99086,3.19,0.34,12.4,6 +7.7,0.18,0.53,1.2,0.041,42,167,0.9908,3.11,0.44,11.9,5 +7.1,0.17,0.43,1.3,0.023,33,132,0.99067,3.11,0.56,11.7,6 +7.5,0.33,0.38,8.7,0.126,49,199,0.99711,2.98,0.57,9.4,5 +6.2,0.255,0.24,1.7,0.039,138.5,272,0.99452,3.53,0.53,9.6,4 +7.5,0.33,0.38,8.7,0.126,49,199,0.99711,2.98,0.57,9.4,5 +5.6,0.2,0.66,10.2,0.043,78,175,0.9945,2.98,0.43,10.4,7 +7.6,0.17,0.36,4.5,0.042,26,102,0.99427,3.09,0.47,9.5,5 +5.8,0.15,0.31,5.9,0.036,7,73,0.99152,3.2,0.43,11.9,6 +6.3,0.25,0.44,1.7,0.024,36,116,0.98935,3.18,0.4,12.5,6 +6.9,0.28,0.41,1.4,0.016,6,55,0.98876,3.16,0.4,13.4,5 +7.2,0.27,0.37,5.4,0.026,27,114,0.99174,3.13,0.84,12.7,5 +6.2,0.25,0.38,7.9,0.045,54,208,0.99572,3.17,0.46,9.1,5 +8.5,0.19,0.48,1.1,0.026,23,58,0.99184,2.9,0.5,10.5,6 +6.2,0.25,0.54,7,0.046,58,176,0.99454,3.19,0.7,10.4,5 +6.2,0.25,0.54,7,0.046,58,176,0.99454,3.19,0.7,10.4,5 +6.8,0.28,0.43,7.6,0.03,30,110,0.99164,3.08,0.59,12.5,8 +6.2,0.25,0.54,7,0.046,58,176,0.99454,3.19,0.7,10.4,5 +7.4,0.21,0.8,12.3,0.038,77,183,0.99778,2.95,0.48,9,5 +7,0.15,0.38,15.3,0.045,54,120,0.9975,3.18,0.42,9.8,6 +7.4,0.21,0.8,12.3,0.038,77,183,0.99778,2.95,0.48,9,5 +7.3,0.28,0.42,1.2,0.033,29,142,0.99205,3.17,0.43,10.7,4 +6.1,0.18,0.38,2.3,0.033,28,111,0.98962,3.16,0.49,12.4,6 +7,0.53,0.43,6.1,0.029,6,76,0.99118,3.08,0.5,12.5,8 +6.8,0.28,0.43,7.6,0.03,30,110,0.99164,3.08,0.59,12.5,8 +6.5,0.36,0.38,10.2,0.028,20,82,0.99274,3.1,0.43,12.1,7 +7.5,0.25,0.47,4.1,0.041,95,163,0.99184,2.92,0.59,11.3,6 +6.7,0.24,0.41,2.9,0.039,48,122,0.99052,3.25,0.43,12,5 +6.6,0.25,0.33,8.5,0.042,29,141,0.99546,3.28,0.6,10.4,5 +6.4,0.15,0.4,1.5,0.042,23,87,0.98972,3.11,0.46,12.2,7 +6.3,0.28,0.3,3.1,0.039,24,115,0.9942,3.05,0.43,8.6,5 +6.2,0.25,0.38,7.9,0.045,54,208,0.99572,3.17,0.46,9.1,5 +7.1,0.28,0.35,3.5,0.028,35,91,0.99022,2.96,0.33,12.1,5 +6.6,0.35,0.34,4.9,0.032,9,125,0.99253,3.32,0.81,12,5 +8.5,0.19,0.48,1.1,0.026,23,58,0.99184,2.9,0.5,10.5,6 +6.2,0.25,0.54,7,0.046,58,176,0.99454,3.19,0.7,10.4,5 +6,0.35,0.51,1.2,0.029,10,102,0.9903,3.46,0.42,11.9,6 +5.8,0.31,0.32,4.5,0.024,28,94,0.98906,3.25,0.52,13.7,7 +6.6,0.17,0.35,2.6,0.03,33,78,0.99146,3.22,0.72,11.3,6 +8.5,0.23,0.4,9.9,0.036,24,88,0.9951,3.02,0.42,10.5,6 +5.8,0.31,0.32,4.5,0.024,28,94,0.98906,3.25,0.52,13.7,7 +6.1,0.2,0.34,9.5,0.041,38,201,0.995,3.14,0.44,10.1,3 +6.3,0.37,0.37,1.5,0.024,12,76,0.98876,2.94,0.39,12.3,6 +6.2,0.36,0.38,3.2,0.031,20,89,0.98956,3.06,0.33,12,7 +6.6,0.17,0.35,2.6,0.03,33,78,0.99146,3.22,0.72,11.3,6 +6.3,0.28,0.47,11.2,0.04,61,183,0.99592,3.12,0.51,9.5,6 +7.6,0.27,0.52,3.2,0.043,28,152,0.99129,3.02,0.53,11.4,6 +7,0.25,0.45,2.3,0.045,40,118,0.99064,3.16,0.48,11.9,7 +9.7,0.24,0.49,4.9,0.032,3,18,0.99368,2.85,0.54,10,6 +9.7,0.24,0.49,4.9,0.032,3,18,0.99368,2.85,0.54,10,6 +6.8,0.13,0.39,1.4,0.034,19,102,0.99121,3.23,0.6,11.3,7 +6.6,0.78,0.5,1.5,0.045,30,133,0.99104,3.25,0.48,11.7,5 +5.1,0.33,0.27,6.7,0.022,44,129,0.99221,3.36,0.39,11,7 +6.7,0.34,0.4,2.1,0.033,34,111,0.98924,2.97,0.48,12.2,7 +6.7,0.14,0.51,4.3,0.028,57,124,0.99176,2.91,0.54,10.7,7 +7,0.26,0.34,10.9,0.038,25,84,0.99432,3.11,0.34,10.9,6 +6.5,0.29,0.26,7,0.04,18,113,0.99366,3.17,0.38,10.2,6 +7,0.25,0.45,2.3,0.045,40,118,0.99064,3.16,0.48,11.9,7 +7.6,0.21,0.49,2.5,0.047,20,130,0.99178,3.15,0.48,11.1,5 +7.7,0.26,0.51,2.6,0.045,26,159,0.99126,3,0.5,11.2,6 +7.6,0.27,0.52,3.2,0.043,28,152,0.99129,3.02,0.53,11.4,6 +7.7,0.25,0.49,2.5,0.047,31,169,0.99252,3.07,0.57,10.6,6 +7.6,0.35,0.46,14.7,0.047,33,151,0.99709,3.03,0.53,10.3,5 +6.9,0.3,0.36,4.5,0.054,31,203,0.99513,3.4,0.57,10.4,4 +6.7,0.24,0.46,2.2,0.033,19,111,0.99045,3.1,0.62,11.9,6 +6.5,0.23,0.39,1.9,0.036,41,98,0.99,3.19,0.43,11.9,7 +7.6,0.23,0.34,1.6,0.043,24,129,0.99305,3.12,0.7,10.4,5 +6.5,0.24,0.39,17.3,0.052,22,126,0.99888,3.11,0.47,9.2,6 +6.3,0.17,0.32,4.2,0.04,37,117,0.99182,3.24,0.43,11.3,6 +6.3,0.17,0.32,4.2,0.04,37,117,0.99182,3.24,0.43,11.3,6 +6.7,0.21,0.37,2.5,0.034,35,89,0.9913,3.25,0.5,11,7 +6.5,0.23,0.39,1.9,0.036,41,98,0.99,3.19,0.43,11.9,7 +5.9,0.28,0.39,1.4,0.031,47,147,0.98836,3.08,0.64,12.9,7 +5.9,0.19,0.37,0.8,0.027,3,21,0.9897,3.09,0.31,10.8,5 +6.2,0.25,0.42,8,0.049,53,206,0.99586,3.16,0.47,9.1,6 +7.6,0.23,0.34,1.6,0.043,24,129,0.99305,3.12,0.7,10.4,5 +5.6,0.18,0.27,1.7,0.03,31,103,0.98892,3.35,0.37,12.9,6 +5.5,0.18,0.22,5.5,0.037,10,86,0.99156,3.46,0.44,12.2,5 +6.5,0.24,0.39,17.3,0.052,22,126,0.99888,3.11,0.47,9.2,6 +7.4,0.23,0.38,8.6,0.052,41,150,0.99534,3.06,0.46,10.3,5 +7.2,0.17,0.37,6.9,0.059,47,128,0.99322,3.08,0.46,11,7 +7.6,0.3,0.38,2.1,0.043,10,98,0.99296,3.17,0.65,11,5 +5,0.24,0.21,2.2,0.039,31,100,0.99098,3.69,0.62,11.7,6 +6.1,0.21,0.38,1.5,0.039,37,122,0.98972,3.2,0.43,12,6 +6.5,0.33,0.38,2.5,0.047,30,148,0.98964,3.17,0.43,12.7,6 +6.3,0.35,0.26,17.6,0.061,59,198,0.99918,3.11,0.49,8.8,5 +6.3,0.17,0.32,4.2,0.04,37,117,0.99182,3.24,0.43,11.3,6 +6.6,0.25,0.35,2.9,0.034,38,121,0.99008,3.19,0.4,12.8,6 +6.5,0.16,0.33,4.8,0.043,45,114,0.992,3.18,0.44,11.2,6 +6.6,0.39,0.39,11.9,0.057,51,221,0.99851,3.26,0.51,8.9,6 +5.6,0.19,0.27,0.9,0.04,52,103,0.99026,3.5,0.39,11.2,5 +6.2,0.25,0.39,1.3,0.051,42,135,0.9906,3.23,0.4,11.1,6 +6.9,0.22,0.43,6.4,0.042,34,115,0.99293,3.05,0.51,10.8,6 +6.2,0.19,0.29,4.3,0.045,33,126,0.99658,3.18,0.42,9.3,6 +6.6,0.39,0.39,11.9,0.057,51,221,0.99851,3.26,0.51,8.9,6 +5.9,0.33,0.32,8.1,0.038,9,34,0.9911,3.22,0.36,12.7,7 +7.8,0.17,0.5,1.3,0.045,35,140,0.9904,3.16,0.4,12,6 +5.5,0.19,0.27,0.9,0.04,52,103,0.99026,3.5,0.39,11.2,5 +6.2,0.23,0.36,17.2,0.039,37,130,0.99946,3.23,0.43,8.8,6 +6.2,0.23,0.36,17.2,0.039,37,130,0.99946,3.23,0.43,8.8,6 +6.2,0.23,0.36,17.2,0.039,37,130,0.99946,3.23,0.43,8.8,6 +7.2,0.32,0.4,8.7,0.038,45,154,0.99568,3.2,0.47,10.4,6 +6.2,0.23,0.36,17.2,0.039,37,130,0.99946,3.23,0.43,8.8,6 +7.2,0.32,0.4,8.7,0.038,45,154,0.99568,3.2,0.47,10.4,6 +5.8,0.39,0.47,7.5,0.027,12,88,0.9907,3.38,0.45,14,6 +6.2,0.23,0.36,17.2,0.039,37,130,0.99946,3.23,0.43,8.8,6 +7.6,0.25,1.23,4.6,0.035,51,294,0.99018,3.03,0.43,13.1,6 +5.8,0.29,0.33,3.7,0.029,30,88,0.98994,3.25,0.42,12.3,6 +7.2,0.4,0.38,2.2,0.03,40,109,0.99075,3.27,0.46,12.6,6 +6.8,0.39,0.34,7.4,0.02,38,133,0.99212,3.18,0.44,12,7 +6.1,0.17,0.42,15.1,0.033,28,124,0.99684,2.87,0.47,9.5,5 +6.8,0.39,0.34,7.4,0.02,38,133,0.99212,3.18,0.44,12,7 +7.1,0.36,0.37,4.8,0.019,39,114,0.99036,3.08,0.49,12.7,7 +6.9,0.19,0.32,7.9,0.042,30,130,0.99456,3.4,0.39,10.5,6 +6.5,0.34,0.46,1,0.023,6,80,0.98865,3.15,0.54,12.9,6 +6.1,0.17,0.42,15.1,0.033,28,124,0.99684,2.87,0.47,9.5,5 +6.8,0.39,0.34,7.4,0.02,38,133,0.99212,3.18,0.44,12,7 +7.1,0.36,0.37,4.8,0.019,39,114,0.99036,3.08,0.49,12.7,7 +7.8,0.3,0.36,4.6,0.024,20,198,0.99222,3.06,0.66,11.9,6 +6.1,0.68,0.52,1.4,0.037,32,123,0.99022,3.24,0.45,12,6 +5.2,0.34,0.37,6.2,0.031,42,133,0.99076,3.25,0.41,12.5,6 +5.6,0.28,0.4,6.1,0.034,36,118,0.99144,3.21,0.43,12.1,7 +6.2,0.19,0.38,5.1,0.019,22,82,0.98961,3.05,0.36,12.5,6 +5.7,0.16,0.26,6.3,0.043,28,113,0.9936,3.06,0.58,9.9,6 +7.6,0.17,0.46,0.9,0.036,63,147,0.99126,3.02,0.41,10.7,6 +7.3,0.2,0.39,2.3,0.048,24,87,0.99044,2.94,0.35,12,6 +6.7,0.33,0.36,6.6,0.042,34,116,0.99123,2.97,0.31,12.2,8 +6.7,0.33,0.34,7.5,0.036,39,124,0.99123,2.99,0.32,12.4,8 +6.9,0.36,0.35,8.6,0.038,37,125,0.9916,3,0.32,12.4,8 +7.8,0.21,0.34,11.9,0.039,55,140,0.9959,3.02,0.31,10.3,6 +7.3,0.2,0.39,2.3,0.048,24,87,0.99044,2.94,0.35,12,6 +5.6,0.41,0.22,7.1,0.05,44,154,0.9931,3.3,0.4,10.5,5 +7.6,0.15,0.35,4.3,0.051,23,98,0.99422,3.1,0.44,9.5,6 +8.5,0.2,0.4,1.1,0.046,31,106,0.99194,3,0.35,10.5,4 +6.5,0.24,0.38,1,0.027,31,90,0.98926,3.24,0.36,12.3,6 +8.3,0.16,0.37,7.9,0.025,38,107,0.99306,2.93,0.37,11.9,6 +5.5,0.12,0.33,1,0.038,23,131,0.99164,3.25,0.45,9.8,5 +6.5,0.24,0.38,1,0.027,31,90,0.98926,3.24,0.36,12.3,6 +6.2,0.1,0.41,1,0.04,17,76,0.98988,3.14,0.56,11.4,7 +6.5,0.21,0.4,7.3,0.041,49,115,0.99268,3.21,0.43,11,6 +8.7,0.3,0.59,1.7,0.046,10,70,0.99373,3.06,0.56,10.8,4 +6.7,0.18,0.37,1.3,0.027,42,125,0.98939,3.24,0.37,12.8,7 +7,0.17,0.36,6.4,0.055,42,123,0.99318,3.11,0.5,11,8 +6.6,0.19,0.33,1.8,0.035,42,148,0.99196,3.15,0.36,10.2,5 +5.8,0.28,0.3,1.5,0.026,31,114,0.98952,3.32,0.6,12.5,7 +7.6,0.24,0.44,3.8,0.037,49,146,0.9911,3.06,0.37,11.6,6 +8.3,0.16,0.37,7.9,0.025,38,107,0.99306,2.93,0.37,11.9,6 +5.5,0.12,0.33,1,0.038,23,131,0.99164,3.25,0.45,9.8,5 +5.7,0.16,0.32,1.2,0.036,7,89,0.99111,3.26,0.48,11,5 +7,0.21,0.42,5.3,0.037,36,123,0.99321,3.14,0.52,10.9,6 +6.4,0.22,0.38,9.1,0.044,35,127,0.99326,2.97,0.3,11,7 +7.9,0.34,0.44,6.5,0.027,47,126,0.99124,2.96,0.37,12.5,6 +6.4,0.22,0.38,9.1,0.044,35,127,0.99326,2.97,0.3,11,7 +6.8,0.21,0.4,6.3,0.032,40,121,0.99214,3.18,0.53,12,7 +5.2,0.31,0.36,5.1,0.031,46,145,0.9897,3.14,0.31,12.4,7 +7.9,0.34,0.44,6.5,0.027,47,126,0.99124,2.96,0.37,12.5,6 +5.6,0.42,0.34,2.4,0.022,34,97,0.98915,3.22,0.38,12.8,7 +6.4,0.22,0.38,9.1,0.044,35,127,0.99326,2.97,0.3,11,7 +6.8,0.28,0.34,7.5,0.035,34,177,0.99692,3.33,0.43,9.1,5 +6.8,0.45,0.36,5,0.033,28,156,0.991,3.11,0.4,12.4,7 +6.6,0.29,0.39,6.75,0.031,22,98,0.9913,3.15,0.8,12.9,7 +6.8,0.21,0.42,1.2,0.045,24,126,0.99234,3.09,0.87,10.9,6 +6.8,0.25,0.24,1.6,0.045,39,164,0.99402,3.53,0.58,10.8,5 +6.4,0.21,0.34,16.05,0.04,56,142,0.99678,3.11,0.38,10.6,5 +5.8,0.33,0.23,5,0.053,29,106,0.99458,3.13,0.52,9,5 +8.2,0.3,0.44,12.4,0.043,52,154,0.99452,3.04,0.33,12,6 +6.4,0.24,0.32,0.95,0.041,23,131,0.99033,3.25,0.35,11.8,5 +7.5,0.18,0.45,4.6,0.041,67,158,0.9927,3.01,0.38,10.6,6 +5.2,0.335,0.2,1.7,0.033,17,74,0.99002,3.34,0.48,12.3,6 +7.1,0.14,0.33,1,0.104,20,54,0.99057,3.19,0.64,11.5,6 +7.2,0.13,0.46,1.3,0.044,48,111,0.99127,2.97,0.45,11.1,5 +5.8,0.33,0.23,5,0.053,29,106,0.99458,3.13,0.52,9,5 +6.5,0.29,0.25,2.5,0.142,8,111,0.9927,3,0.44,9.9,4 +6.2,0.35,0.31,2.6,0.036,37,92,0.98938,3.27,0.53,12.8,7 +9,0.38,0.53,2.1,0.102,19,76,0.99001,2.93,0.57,12.9,5 +6.6,0.24,0.38,12.75,0.034,8,74,0.99386,3.1,0.57,12.9,6 +6.6,0.16,0.34,1.1,0.037,41,115,0.9899,3.01,0.68,12,6 +8.2,0.3,0.44,12.4,0.043,52,154,0.99452,3.04,0.33,12,6 +5.7,0.15,0.28,3.7,0.045,57,151,0.9913,3.22,0.27,11.2,6 +6.6,0.33,0.4,2.65,0.041,35,86,0.98916,3.11,0.39,13.3,7 +5.7,0.2,0.3,2.5,0.046,38,125,0.99276,3.34,0.5,9.9,6 +6.8,0.27,0.37,8.2,0.055,52,192,0.99586,3.11,0.52,9.5,6 +6.8,0.27,0.42,7.3,0.054,58,200,0.99556,3.12,0.49,9.4,6 +6.2,0.2,0.26,1.1,0.047,42,119,0.99158,3.48,0.6,11,7 +6.7,0.13,0.57,6.6,0.056,60,150,0.99548,2.96,0.43,9.4,6 +6.8,0.21,0.37,7,0.038,27,107,0.99206,2.98,0.82,11.5,6 +6.7,0.31,0.32,14.5,0.038,6,79,0.99412,3.14,0.34,12.5,5 +6.2,0.2,0.29,11.8,0.035,21,93,0.99364,3.18,0.34,11.9,6 +6.6,0.25,0.34,3,0.054,22,141,0.99338,3.26,0.47,10.4,6 +5.7,0.15,0.28,3.7,0.045,57,151,0.9913,3.22,0.27,11.2,6 +6.9,0.22,0.39,6,0.035,44,141,0.99123,3.11,0.33,12.5,6 +6.4,0.23,0.35,4.6,0.039,43,147,0.99216,3.18,0.4,11,7 +7.6,0.27,0.29,2.5,0.059,37,115,0.99328,3.09,0.37,9.8,5 +6.6,0.34,0.24,3.3,0.034,29,99,0.99031,3.1,0.4,12.3,7 +6.4,0.16,0.42,1,0.036,29,113,0.9908,3.18,0.52,11,6 +5.8,0.3,0.42,1.1,0.036,19,113,0.98871,3.1,0.46,12.6,7 +7,0.29,0.35,1.4,0.036,42,109,0.99119,3.31,0.62,11.6,6 +6.6,0.34,0.24,3.3,0.034,29,99,0.99031,3.1,0.4,12.3,7 +6.7,0.21,0.36,8.55,0.02,20,86,0.99146,3.19,0.22,13.4,7 +7.6,0.27,0.29,2.5,0.059,37,115,0.99328,3.09,0.37,9.8,5 +6.8,0.22,0.41,6.7,0.034,39,116,0.99245,3.18,0.46,11.5,6 +7.7,0.27,0.49,3.8,0.037,46,139,0.99116,3.04,0.38,11.6,6 +6.4,0.25,0.37,4.5,0.039,41,147,0.9921,3.18,0.4,11.1,7 +6.4,0.23,0.35,4.6,0.039,43,147,0.99216,3.18,0.4,11,7 +6.7,0.13,0.45,4.2,0.043,52,131,0.99162,3.06,0.54,11.3,6 +6.7,0.24,0.37,11.3,0.043,64,173,0.99632,3.08,0.53,9.9,6 +7.1,0.26,0.37,5.5,0.025,31,105,0.99082,3.06,0.33,12.6,8 +5.3,0.3,0.16,4.2,0.029,37,100,0.9905,3.3,0.36,11.8,8 +7.1,0.38,0.4,2.2,0.042,54,201,0.99177,3.03,0.5,11.4,5 +7.4,0.19,0.31,14.5,0.045,39,193,0.9986,3.1,0.5,9.2,6 +7.4,0.19,0.31,14.5,0.045,39,193,0.9986,3.1,0.5,9.2,6 +7.4,0.19,0.31,14.5,0.045,39,193,0.9986,3.1,0.5,9.2,6 +7.4,0.19,0.31,14.5,0.045,39,193,0.9986,3.1,0.5,9.2,6 +7.4,0.19,0.31,14.5,0.045,39,193,0.9986,3.1,0.5,9.2,6 +7.4,0.19,0.31,14.5,0.045,39,193,0.9986,3.1,0.5,9.2,6 +6.3,0.32,0.32,1.5,0.03,24,101,0.98923,3.21,0.42,13,5 +7.6,0.19,0.32,18.75,0.047,32,193,1.00014,3.1,0.5,9.3,7 +6.5,0.26,0.31,3.6,0.03,36,92,0.99026,3.22,0.62,12.6,8 +5.9,0.24,0.12,1.4,0.035,60,247,0.99358,3.34,0.44,9.6,6 +4.2,0.215,0.23,5.1,0.041,64,157,0.99688,3.42,0.44,8.0,3 +8.1,0.24,0.32,10.5,0.03,34,105,0.99407,3.11,0.42,11.8,6 +5.8,0.23,0.2,2,0.043,39,154,0.99226,3.21,0.39,10.2,6 +7.5,0.33,0.36,2.6,0.051,26,126,0.99097,3.32,0.53,12.7,6 +6.6,0.38,0.36,9.2,0.061,42,214,0.9976,3.31,0.56,9.4,5 +6.4,0.15,0.29,1.8,0.044,21,115,0.99166,3.1,0.38,10.2,5 +6.5,0.32,0.34,5.7,0.044,27,91,0.99184,3.28,0.6,12,7 +7.5,0.22,0.32,2.4,0.045,29,100,0.99135,3.08,0.6,11.3,7 +6.4,0.23,0.32,1.9,0.038,40,118,0.99074,3.32,0.53,11.8,7 +6.1,0.22,0.31,1.4,0.039,40,129,0.99193,3.45,0.59,10.9,5 +6.5,0.48,0.02,0.9,0.043,32,99,0.99226,3.14,0.47,9.8,4 +6.6,0.23,0.3,4.6,0.06,29,154,0.99142,3.23,0.49,12.2,8 +6.4,0.16,0.25,1.4,0.057,21,125,0.99091,3.23,0.44,11.1,7 +6.6,0.38,0.36,9.2,0.061,42,214,0.9976,3.31,0.56,9.4,5 +7.4,0.16,0.32,1.4,0.065,23,140,0.99134,3.06,0.47,11.4,6 +6.4,0.15,0.29,1.8,0.044,21,115,0.99166,3.1,0.38,10.2,5 +6.5,0.32,0.3,2.3,0.051,20,127,0.98964,3.13,0.52,12.8,6 +6.7,0.12,0.36,2.3,0.039,43,125,0.99229,3.07,0.67,10.1,7 +6.6,0.2,0.14,4.4,0.184,35,168,0.99396,2.93,0.45,9.4,6 +8,0.34,0.25,6.4,0.035,38,103,0.99148,2.91,0.23,12.2,6 +6.8,0.21,0.31,2.9,0.046,40,121,0.9913,3.07,0.65,10.9,7 +6.8,0.23,0.31,2.8,0.047,40,122,0.99126,3.06,0.64,10.9,7 +6.8,0.21,0.31,2.9,0.046,40,121,0.9913,3.07,0.65,10.9,7 +6.6,0.2,0.14,4.4,0.184,35,168,0.99396,2.93,0.45,9.4,6 +6.6,0.28,0.42,8.2,0.044,60,196,0.99562,3.14,0.48,9.4,5 +7.8,0.25,0.37,1,0.043,10,80,0.99128,3.08,0.38,11.4,5 +5.6,0.12,0.33,2.9,0.044,21,73,0.98896,3.17,0.32,12.9,8 +6.6,0.28,0.41,7,0.046,59,194,0.99558,3.14,0.48,9.4,5 +6.8,0.17,0.35,1.8,0.04,29,84,0.98961,2.91,0.57,12,7 +7.3,0.25,0.28,1.5,0.043,19,113,0.99338,3.38,0.56,10.1,6 +6.6,0.28,0.41,7,0.046,59,194,0.99558,3.14,0.48,9.4,5 +6.6,0.28,0.42,8.2,0.044,60,196,0.99562,3.14,0.48,9.4,5 +6.5,0.25,0.5,7.6,0.047,54,184,0.99572,3.17,0.45,9.2,5 +6.3,0.24,0.35,2.3,0.039,43,109,0.99056,3.34,0.44,11.8,6 +6.8,0.32,0.32,8.7,0.029,31,105,0.99146,3,0.34,12.3,7 +7.8,0.25,0.37,1,0.043,10,80,0.99128,3.08,0.38,11.4,5 +5.6,0.12,0.33,2.9,0.044,21,73,0.98896,3.17,0.32,12.9,8 +6.6,0.24,0.28,6.7,0.032,26,91,0.99172,3.13,0.32,12.3,6 +6.3,0.22,0.34,5,0.032,36,93,0.99012,3.27,0.36,13.5,7 +6,0.32,0.3,1.9,0.033,41,142,0.98912,3.29,0.42,12.8,7 +6.3,0.19,0.29,2,0.022,33,96,0.98902,3.04,0.54,12.8,7 +6,0.32,0.3,1.9,0.033,41,142,0.98912,3.29,0.42,12.8,7 +9.4,0.24,0.29,8.5,0.037,124,208,0.99395,2.9,0.38,11,3 +6.4,0.35,0.28,12.6,0.039,19,124,0.99539,3.2,0.43,10.6,6 +6.7,0.46,0.27,5.2,0.039,35,96,0.99129,3.16,0.44,12.4,7 +6.3,0.3,0.29,2.1,0.048,33,142,0.98956,3.22,0.46,12.9,7 +6,0.19,0.29,1.1,0.047,67,152,0.9916,3.54,0.59,11.1,7 +5.9,0.24,0.28,1.3,0.032,36,95,0.98889,3.08,0.64,12.9,7 +7.3,0.145,0.33,1.1,0.042,14,64,0.99012,3.1,0.37,11.8,7 +6.6,0.435,0.38,9.2,0.058,66,243,0.99833,3.23,0.54,9.1,6 +5.8,0.18,0.37,1.2,0.036,19,74,0.98853,3.09,0.49,12.7,7 +5.8,0.18,0.37,1.1,0.036,31,96,0.98942,3.16,0.48,12,6 +5.6,0.32,0.32,8.3,0.043,32,105,0.99266,3.24,0.47,11.2,6 +6.6,0.16,0.35,1.8,0.042,26,105,0.98962,3.19,0.75,12.4,7 +5.1,0.21,0.28,1.4,0.047,48,148,0.99168,3.5,0.49,10.4,5 +7.5,0.29,0.36,15.7,0.05,29,124,0.9968,3.06,0.54,10.4,5 +6,0.26,0.33,4.35,0.04,15,80,0.98934,3.29,0.5,12.7,6 +5.7,0.26,0.3,1.8,0.039,30,105,0.98995,3.48,0.52,12.5,7 +7.1,0.17,0.31,1.6,0.037,15,103,0.991,3.14,0.5,12,6 +6.9,0.17,0.3,2,0.047,13,117,0.99152,3.16,0.51,11.6,6 +6.8,0.25,0.28,5,0.035,42,126,0.99048,3.12,0.38,12.6,7 +6.6,0.17,0.28,1.8,0.042,62,178,0.99204,3.15,0.42,10.2,5 +5.8,0.17,0.36,1.3,0.036,11,70,0.99202,3.43,0.68,10.4,7 +6.4,0.24,0.29,1,0.038,18,122,0.9906,3.3,0.42,11.5,5 +6.7,0.21,0.34,1.4,0.049,36,112,0.99091,3.02,0.5,11,6 +6.7,0.23,0.33,8.1,0.048,45,176,0.99472,3.11,0.52,10.1,6 +6.8,0.23,0.32,8.6,0.046,47,159,0.99452,3.08,0.52,10.5,6 +6.5,0.22,0.28,3.7,0.059,29,151,0.99177,3.23,0.41,12.1,7 +5.1,0.165,0.22,5.7,0.047,42,146,0.9934,3.18,0.55,9.9,6 +6.6,0.425,0.25,2.35,0.034,23,87,0.99082,3.05,0.41,11.4,6 +6.9,0.38,0.29,13.65,0.048,52,189,0.99784,3,0.6,9.5,6 +6.9,0.38,0.29,13.65,0.048,52,189,0.99784,3,0.6,9.5,6 +6.9,0.38,0.29,13.65,0.048,52,189,0.99784,3,0.6,9.5,6 +7.2,0.27,0.28,15.2,0.046,6,41,0.99665,3.17,0.39,10.9,6 +7.6,0.17,0.27,4.6,0.05,23,98,0.99422,3.08,0.47,9.5,6 +6.2,0.3,0.31,1.2,0.048,19,125,0.98999,3.32,0.54,12.6,6 +7.6,0.17,0.27,4.6,0.05,23,98,0.99422,3.08,0.47,9.5,6 +6.5,0.26,0.32,6.65,0.059,34,104,0.99254,3.18,0.42,11.1,5 +6.9,0.36,0.28,13.55,0.048,51,189,0.99782,3,0.6,9.5,7 +6.9,0.38,0.29,13.65,0.048,52,189,0.99784,3,0.6,9.5,6 +6.8,0.18,0.24,9.8,0.058,64,188,0.9952,3.13,0.51,10.6,6 +6.7,0.18,0.24,10.3,0.057,64,185,0.99519,3.12,0.5,10.6,6 +6.6,0.16,0.21,6.7,0.055,43,157,0.99384,3.15,0.52,10.8,6 +7.2,0.27,0.28,15.2,0.046,6,41,0.99665,3.17,0.39,10.9,6 +6.4,0.17,0.27,9.9,0.047,26,101,0.99596,3.34,0.5,9.9,6 +7.2,0.22,0.28,7.2,0.06,41,132,0.9935,3.08,0.59,11.3,6 +6,0.22,0.28,1.1,0.034,47,90,0.98862,3.22,0.38,12.6,6 +6.7,0.36,0.28,8.3,0.034,29,81,0.99151,2.96,0.39,12.5,6 +6.5,0.43,0.28,11.25,0.032,31,87,0.9922,3.02,0.38,12.4,6 +5.9,0.2,0.28,12.8,0.038,29,132,0.99426,3.31,0.57,11.8,7 +5.3,0.32,0.23,9.65,0.026,26,119,0.99168,3.18,0.53,12.2,6 +6.8,0.2,0.28,12.6,0.048,54,136,0.99556,3.19,0.37,10.7,6 +6,0.22,0.33,12.2,0.033,25,97,0.99356,3.17,0.42,11.3,7 +6.7,0.36,0.28,8.3,0.034,29,81,0.99151,2.96,0.39,12.5,6 +6.5,0.43,0.28,11.25,0.032,31,87,0.9922,3.02,0.38,12.4,6 +7.1,0.18,0.49,1.3,0.033,12,72,0.99072,3.05,0.53,11.3,7 +6.4,0.17,0.27,9.9,0.047,26,101,0.99596,3.34,0.5,9.9,6 +7.2,0.22,0.28,7.2,0.06,41,132,0.9935,3.08,0.59,11.3,6 +6,0.22,0.28,1.1,0.034,47,90,0.98862,3.22,0.38,12.6,6 +6,0.2,0.26,1.1,0.033,38,67,0.98954,3.14,0.38,11.5,6 +7.6,0.2,0.26,4.8,0.033,26,76,0.99076,2.98,0.49,12.3,7 +6.2,0.3,0.21,1.1,0.032,31,111,0.9889,2.97,0.42,12.2,6 +6,0.29,0.25,1.4,0.033,30,114,0.98794,3.08,0.43,13.2,6 +6.6,0.18,0.28,1.7,0.041,53,161,0.99207,3.13,0.45,10.2,6 +7,0.22,0.28,10.6,0.039,32,117,0.99355,3.05,0.55,11.5,7 +6,0.29,0.25,1.4,0.033,30,114,0.98794,3.08,0.43,13.2,6 +6.2,0.3,0.21,1.1,0.032,31,111,0.9889,2.97,0.42,12.2,6 +5.6,0.15,0.26,5.55,0.051,51,139,0.99336,3.47,0.5,11,6 +6.9,0.28,0.24,2.1,0.034,49,121,0.98882,2.98,0.43,13.2,7 +5.9,0.19,0.21,1.7,0.045,57,135,0.99341,3.32,0.44,9.5,5 +7.8,0.22,0.26,9,0.047,38,132,0.997,3.25,0.53,10.2,6 +6.6,0.18,0.28,1.7,0.041,53,161,0.99207,3.13,0.45,10.2,6 +7,0.4,0.25,1.8,0.05,51,189,0.99174,3,0.55,11.4,6 +6.1,0.28,0.27,4.7,0.03,56,140,0.99042,3.16,0.42,12.5,8 +7.6,0.36,0.49,11.3,0.046,87,221,0.9984,3.01,0.43,9.2,5 +6.5,0.28,0.34,3.6,0.04,29,121,0.99111,3.28,0.48,12.1,7 +6.9,0.19,0.35,6.9,0.045,51,125,0.9933,3.1,0.44,10.7,7 +6.5,0.28,0.34,3.6,0.04,29,121,0.99111,3.28,0.48,12.1,7 +6.4,0.22,0.32,4.9,0.046,50,156,0.99316,3.38,0.55,11.2,6 +6.8,0.23,0.3,6.95,0.044,42,179,0.9946,3.25,0.56,10.6,6 +6.4,0.32,0.31,1.9,0.037,34,126,0.99,3.06,0.45,11.8,6 +6.1,0.28,0.27,4.7,0.03,56,140,0.99042,3.16,0.42,12.5,8 +7.6,0.36,0.49,11.3,0.046,87,221,0.9984,3.01,0.43,9.2,5 +8.8,0.39,0.35,1.8,0.096,22,80,0.99016,2.95,0.54,12.6,6 +6.6,0.24,0.3,11.3,0.026,11,77,0.99381,3.13,0.55,12.8,7 +6.9,0.29,0.3,8.2,0.026,35,112,0.99144,3,0.37,12.3,6 +6.9,0.28,0.3,8.3,0.026,37,113,0.99139,2.99,0.38,12.3,8 +6.7,0.38,0.26,9.55,0.036,35,91,0.9919,2.98,0.37,12.4,6 +8,0.28,0.3,8.4,0.03,35,115,0.99192,2.93,0.42,12.3,6 +6.5,0.25,0.45,7.8,0.048,52,188,0.99576,3.2,0.53,9.1,5 +6.6,0.26,0.46,7.8,0.047,48,186,0.9958,3.2,0.54,9.1,5 +7.4,0.29,0.28,10.2,0.032,43,138,0.9951,3.1,0.47,10.6,6 +6.3,0.19,0.29,5.5,0.042,44,189,0.99304,3.19,0.47,10.3,6 +6.1,0.33,0.32,7.8,0.052,52,183,0.99657,3.39,0.65,9.5,5 +5.6,0.32,0.33,7.4,0.037,25,95,0.99268,3.25,0.49,11.1,6 +7.7,0.46,0.18,3.3,0.054,18,143,0.99392,3.12,0.51,10.8,6 +8.8,0.19,0.3,5,0.028,34,120,0.99242,2.94,0.47,11.2,5 +7.7,0.46,0.18,3.3,0.054,18,143,0.99392,3.12,0.51,10.8,6 +8.8,0.27,0.25,5,0.024,52,99,0.9925,2.87,0.49,11.4,5 +5.8,0.18,0.28,1.3,0.034,9,94,0.99092,3.21,0.52,11.2,6 +5.8,0.15,0.32,1.2,0.037,14,119,0.99137,3.19,0.5,10.2,6 +5.6,0.32,0.33,7.4,0.037,25,95,0.99268,3.25,0.49,11.1,6 +6.1,0.33,0.32,7.8,0.052,52,183,0.99657,3.39,0.65,9.5,5 +7.1,0.32,0.3,9.9,0.041,63,192,0.99642,3.12,0.49,10.2,6 +6.2,0.23,0.35,0.7,0.051,24,111,0.9916,3.37,0.43,11,3 +8.9,0.3,0.35,4.6,0.032,32,148,0.99458,3.15,0.45,11.5,7 +6,0.14,0.17,5.6,0.036,37,127,0.99373,3.05,0.57,9.8,6 +6.8,0.24,0.29,9.5,0.042,56,157,0.99586,3.11,0.51,10.1,6 +6.7,0.21,0.48,14.8,0.05,31,195,0.99942,2.95,0.75,8.8,6 +8.9,0.3,0.35,4.6,0.032,32,148,0.99458,3.15,0.45,11.5,7 +6.1,0.3,0.3,2.1,0.031,50,163,0.9895,3.39,0.43,12.7,7 +7.2,0.37,0.4,11.6,0.032,34,214,0.9963,3.1,0.51,9.8,6 +6.7,0.64,0.3,1.2,0.03,18,76,0.9892,3.16,0.6,12.9,4 +7.2,0.37,0.4,11.6,0.032,34,214,0.9963,3.1,0.51,9.8,6 +6.1,0.3,0.3,2.1,0.031,50,163,0.9895,3.39,0.43,12.7,7 +7.6,0.28,0.49,20.15,0.06,30,145,1.00196,3.01,0.44,8.5,5 +6.3,0.29,0.28,4.7,0.059,28,81,0.99036,3.24,0.56,12.7,8 +6.2,0.28,0.28,4.3,0.026,22,105,0.989,2.98,0.64,13.1,8 +7.1,0.18,0.39,14.5,0.051,48,156,0.99947,3.35,0.78,9.1,5 +6.4,0.32,0.27,4.9,0.034,18,122,0.9916,3.36,0.71,12.5,6 +7.1,0.17,0.4,14.55,0.047,47,156,0.99945,3.34,0.78,9.1,6 +7.1,0.17,0.4,14.55,0.047,47,156,0.99945,3.34,0.78,9.1,6 +5.8,0.24,0.26,10.05,0.039,63,162,0.99375,3.33,0.5,11.2,6 +6.4,0.32,0.27,4.9,0.034,18,122,0.9916,3.36,0.71,12.5,6 +7.1,0.18,0.39,14.5,0.051,48,156,0.99947,3.35,0.78,9.1,5 +7.1,0.17,0.4,14.55,0.047,47,156,0.99945,3.34,0.78,9.1,6 +7.1,0.18,0.39,15.25,0.047,45,158,0.99946,3.34,0.77,9.1,6 +7.8,0.29,0.29,3.15,0.044,41,117,0.99153,3.24,0.35,11.5,5 +6.2,0.255,0.27,1.3,0.037,30,86,0.98834,3.05,0.59,12.9,7 +8.2,0.34,0.29,5.2,0.076,19,92,0.99138,2.95,0.39,12.5,6 +6.5,0.24,0.28,1.1,0.034,26,83,0.98928,3.25,0.33,12.3,6 +6.9,0.24,0.23,7.1,0.041,20,97,0.99246,3.1,0.85,11.4,6 +6.7,0.4,0.22,8.8,0.052,24,113,0.99576,3.22,0.45,9.4,5 +6.7,0.3,0.44,18.5,0.057,65,224,0.99956,3.11,0.53,9.1,5 +6.7,0.4,0.22,8.8,0.052,24,113,0.99576,3.22,0.45,9.4,5 +6.8,0.17,0.32,1.4,0.04,35,106,0.99026,3.16,0.66,12,5 +7.1,0.25,0.28,1.2,0.04,31,111,0.99174,3.18,0.53,11.1,5 +5.9,0.27,0.27,5,0.035,14,97,0.99058,3.1,0.33,11.8,7 +6,0.16,0.22,1.6,0.042,36,106,0.9905,3.24,0.32,11.4,6 +6.7,0.3,0.44,18.75,0.057,65,224,0.99956,3.11,0.53,9.1,5 +6.6,0.15,0.32,6,0.033,59,128,0.99192,3.19,0.71,12.1,8 +7.3,0.34,0.3,9.4,0.057,34,178,0.99554,3.15,0.44,10.4,6 +6,0.17,0.29,9.7,0.044,33,98,0.99536,3.12,0.36,9.2,6 +6.7,0.47,0.29,4.75,0.034,29,134,0.99056,3.29,0.46,13,7 +6.6,0.15,0.32,6,0.033,59,128,0.99192,3.19,0.71,12.1,8 +6.6,0.21,0.29,5.35,0.029,43,106,0.99112,2.93,0.43,11.5,7 +6.6,0.21,0.29,5.35,0.029,43,106,0.99112,2.93,0.43,11.5,7 +8,0.24,0.48,6.8,0.047,13,134,0.99616,3.23,0.7,10,5 +5.6,0.34,0.3,6.9,0.038,23,89,0.99266,3.25,0.49,11.1,6 +5.8,0.54,0,1.4,0.033,40,107,0.98918,3.26,0.35,12.4,5 +7.3,0.23,0.24,0.9,0.031,29,86,0.98926,2.9,0.38,12.2,6 +6,0.39,0.13,1.2,0.042,60,172,0.99114,3.06,0.52,10.6,5 +6.1,0.105,0.31,1.3,0.037,55,145,0.9912,3.41,0.41,11.1,7 +5.8,0.32,0.2,2.6,0.027,17,123,0.98936,3.36,0.78,13.9,7 +7.6,0.22,0.28,12,0.056,68,143,0.9983,2.99,0.3,9.2,6 +6.8,0.19,0.4,9.85,0.055,41,103,0.99532,2.98,0.56,10.5,6 +6.7,0.24,0.3,3.85,0.042,105,179,0.99189,3.04,0.59,11.3,8 +6.8,0.17,0.34,2,0.04,38,111,0.99,3.24,0.45,12.9,6 +6.2,0.3,0.31,1.6,0.035,40,106,0.98914,3.26,0.39,12.9,7 +6.9,0.29,0.41,7.8,0.046,52,171,0.99537,3.12,0.51,9.6,5 +6.8,0.19,0.34,1.9,0.04,41,108,0.99,3.25,0.45,12.9,6 +6.8,0.17,0.34,2,0.04,38,111,0.99,3.24,0.45,12.9,6 +6.6,0.24,0.27,10.3,0.047,54,219,0.99742,3.04,0.45,8.8,5 +6.6,0.16,0.36,1.1,0.031,27,93,0.98884,3.23,0.34,13.2,8 +7.6,0.22,0.28,12,0.056,68,143,0.9983,2.99,0.3,9.2,6 +6.7,0.24,0.3,3.85,0.042,105,179,0.99189,3.04,0.59,11.3,8 +6.8,0.19,0.4,9.85,0.055,41,103,0.99532,2.98,0.56,10.5,6 +6.7,0.16,0.36,2,0.045,24,131,0.99284,3.3,0.59,10.5,6 +6.5,0.3,0.27,4,0.038,37,97,0.99026,3.2,0.6,12.6,8 +6.5,0.22,0.19,1.1,0.064,36,191,0.99297,3.05,0.5,9.5,6 +6.2,0.36,0.45,10.4,0.06,22,184,0.99711,3.31,0.56,9.8,6 +6.2,0.37,0.24,6.1,0.032,19,86,0.98934,3.04,0.26,13.4,8 +7.6,0.31,0.24,1.8,0.037,39,150,0.9913,3.05,0.44,11.8,7 +6.2,0.36,0.45,10.4,0.06,22,184,0.99711,3.31,0.56,9.8,6 +5.9,0.32,0.28,4.7,0.039,34,94,0.98964,3.22,0.57,13.1,7 +6.5,0.3,0.27,4,0.038,37,97,0.99026,3.2,0.6,12.6,8 +5.8,0.22,0.3,1.1,0.047,36,131,0.992,3.26,0.45,10.4,5 +5.4,0.45,0.27,6.4,0.033,20,102,0.98944,3.22,0.27,13.4,8 +6.1,0.36,0.26,8.15,0.035,14,88,0.99031,3.06,0.27,13,7 +6.2,0.37,0.24,6.1,0.032,19,86,0.98934,3.04,0.26,13.4,8 +7.5,0.21,0.32,4.8,0.056,39,113,0.99393,3.11,0.52,10.2,7 +6.9,0.28,0.33,1.2,0.039,16,98,0.9904,3.07,0.39,11.7,6 +6.5,0.22,0.19,1.1,0.064,36,191,0.99297,3.05,0.5,9.5,6 +7.8,0.2,0.2,1.4,0.036,25,83,0.99088,3.03,0.46,11.7,6 +6.7,0.28,0.31,7.4,0.041,7,81,0.99254,3.04,0.47,11.4,8 +7.6,0.31,0.24,1.8,0.037,39,150,0.9913,3.05,0.44,11.8,7 +8,0.2,0.44,1,0.057,24,111,0.99158,3.09,0.32,11.2,6 +6,0.28,0.27,15.5,0.036,31,134,0.99408,3.19,0.44,13,7 +6,0.28,0.27,15.5,0.036,31,134,0.99408,3.19,0.44,13,7 +6.7,0.24,0.36,8.4,0.042,42,123,0.99473,3.34,0.52,10.9,6 +6.3,0.22,0.28,2.4,0.042,38,102,0.98998,3.14,0.37,11.6,7 +6,0.24,0.28,3.95,0.038,61,134,0.99146,3.3,0.54,11.3,7 +7.7,0.43,1,19.95,0.032,42,164,0.99742,3.29,0.5,12,6 +6.4,0.3,0.36,2,0.052,18,141,0.99273,3.38,0.53,10.5,6 +6.1,0.33,0.3,3,0.036,30,124,0.98922,3.31,0.4,13.1,7 +6,0.28,0.27,15.5,0.036,31,134,0.99408,3.19,0.44,13,7 +6.7,0.24,0.36,8.4,0.042,42,123,0.99473,3.34,0.52,10.9,6 +6.7,0.29,0.45,14.3,0.054,30,181,0.99869,3.14,0.57,9.1,5 +6.9,0.33,0.31,4.2,0.04,21,93,0.9896,3.18,0.48,13.4,7 +6.5,0.16,0.34,1.4,0.029,29,133,0.99108,3.33,0.64,11.5,7 +6,0.2,0.32,3,0.031,26,118,0.99134,3.38,0.68,11.2,7 +7.5,0.33,0.28,4.9,0.042,21,155,0.99385,3.36,0.57,10.9,6 +7.1,0.36,0.28,2.4,0.036,35,115,0.98936,3.19,0.44,13.5,7 +6.7,0.29,0.45,14.3,0.054,30,181,0.99869,3.14,0.57,9.1,5 +6.4,0.26,0.25,10.7,0.046,66,179,0.99606,3.17,0.55,9.9,6 +7,0.22,0.24,11,0.041,75,167,0.99508,2.98,0.56,10.5,6 +6.5,0.19,0.28,1.4,0.046,22,90,0.99038,3.18,0.51,11.7,7 +6.3,0.21,0.31,1.2,0.043,30,117,0.99158,3.49,0.68,11,6 +7.9,0.35,0.28,12.9,0.032,13,63,0.9932,2.99,0.43,13,6 +7.7,0.38,0.23,10.8,0.03,28,95,0.99164,2.93,0.41,13.6,6 +6.8,0.19,0.33,1.3,0.031,22,87,0.98987,3.08,0.62,12.3,7 +7.2,0.33,0.34,2,0.044,61,171,0.98947,3.25,0.53,13.3,7 +6.6,0.29,0.29,1.8,0.036,38,102,0.98819,3.08,0.42,13.7,7 +7.5,0.2,0.41,1.2,0.05,26,131,0.99133,3.19,0.52,11.1,5 +6.9,0.33,0.62,7.5,0.038,46,132,0.99143,3.23,0.43,13.4,7 +6,0.23,0.15,9.7,0.048,101,207,0.99571,3.05,0.3,9.1,5 +5.9,0.23,0.24,3.8,0.038,61,152,0.99139,3.31,0.5,11.3,7 +6.6,0.32,0.41,7.2,0.048,55,178,0.99537,3.2,0.46,9.4,5 +6,0.23,0.15,9.7,0.048,101,207,0.99571,3.05,0.3,9.1,5 +5.3,0.36,0.27,6.3,0.028,40,132,0.99186,3.37,0.4,11.6,6 +5.3,0.36,0.27,6.3,0.028,40,132,0.99186,3.37,0.4,11.6,6 +8.9,0.27,0.28,0.8,0.024,29,128,0.98984,3.01,0.35,12.4,6 +7.6,0.23,0.29,8.6,0.053,65,146,0.9963,3.11,0.32,9.8,6 +6.9,0.75,0.13,6.3,0.036,19,50,0.99312,3.09,0.25,11.1,4 +7.1,0.35,0.27,3.1,0.034,28,134,0.9897,3.26,0.38,13.1,7 +7.2,0.31,0.35,7.2,0.046,45,178,0.9955,3.14,0.53,9.7,5 +6.4,0.28,0.44,7.1,0.048,49,179,0.99528,3.15,0.48,9.2,5 +7.2,0.23,0.46,6.4,0.036,17,85,0.99279,3.1,0.78,11.7,6 +6.6,0.22,0.3,14.7,0.045,50,136,0.99704,3.14,0.37,10.6,6 +7.2,0.31,0.35,7.2,0.046,45,178,0.9955,3.14,0.53,9.7,5 +6.4,0.28,0.44,7.1,0.048,49,179,0.99528,3.15,0.48,9.2,5 +7.2,0.24,0.28,1.9,0.032,30,92,0.9914,3.1,0.39,10.9,6 +6.2,0.27,0.47,1.2,0.146,28,105,0.99224,3.23,0.51,10.1,5 +6.5,0.28,0.25,4.8,0.029,54,128,0.99074,3.17,0.44,12.2,7 +7.2,0.27,0.31,1.2,0.031,27,80,0.98892,3.03,0.33,12.7,6 +7.8,0.28,0.25,3.4,0.024,27,99,0.98959,2.98,0.37,13,6 +8.1,0.26,0.27,4.3,0.03,43,123,0.99212,3.16,0.33,11.2,6 +6.6,0.23,0.37,8.5,0.036,46,153,0.99576,3.2,0.48,9.4,6 +6,0.33,0.2,1.8,0.031,49,159,0.9919,3.41,0.53,11,6 +6,0.33,0.2,1.8,0.031,49,159,0.9919,3.41,0.53,11,6 +7.3,0.2,0.29,19.5,0.039,69,237,1.00037,3.1,0.48,9.2,6 +6.6,0.23,0.37,8.5,0.036,46,153,0.99576,3.2,0.48,9.4,6 +7.3,0.2,0.29,19.9,0.039,69,237,1.00037,3.1,0.48,9.2,6 +6.2,0.47,0.19,8.3,0.029,24,142,0.992,3.22,0.45,12.3,6 +6,0.33,0.2,1.8,0.031,49,159,0.9919,3.41,0.53,11,6 +7.2,0.14,0.32,1.1,0.022,48,116,0.99218,3.04,0.67,10,6 +5.7,0.22,0.22,16.65,0.044,39,110,0.99855,3.24,0.48,9,6 +5.7,0.22,0.22,16.65,0.044,39,110,0.99855,3.24,0.48,9,6 +5.7,0.22,0.22,16.65,0.044,39,110,0.99855,3.24,0.48,9,6 +8.1,0.2,0.28,0.9,0.023,49,87,0.99062,2.92,0.36,11.1,6 +5.8,0.14,0.15,6.1,0.042,27,123,0.99362,3.06,0.6,9.9,6 +4.8,0.21,0.21,10.2,0.037,17,112,0.99324,3.66,0.48,12.2,7 +8.1,0.2,0.28,0.9,0.023,49,87,0.99062,2.92,0.36,11.1,6 +5.7,0.22,0.22,16.65,0.044,39,110,0.99855,3.24,0.48,9,6 +7.5,0.34,0.24,3.85,0.031,5,34,0.99098,3.01,0.36,11.8,4 +6.6,0.64,0.28,4.4,0.032,19,78,0.99036,3.11,0.62,12.9,6 +7,0.48,0.12,4.5,0.05,23,86,0.99398,2.86,0.35,9,5 +7.6,0.37,0.34,3.2,0.028,42,162,0.9903,3.01,0.33,12.4,6 +7,0.48,0.12,4.5,0.05,23,86,0.99398,2.86,0.35,9,5 +6.6,0.64,0.28,4.4,0.032,19,78,0.99036,3.11,0.62,12.9,6 +8,0.25,0.27,9.7,0.036,15,85,0.99406,2.99,0.36,11.2,6 +7.6,0.38,0.28,4.2,0.029,7,112,0.9906,3,0.41,12.6,6 +6.9,0.26,0.27,4.2,0.031,20,80,0.99089,3.12,0.39,11.5,6 +7.8,0.15,0.34,1.1,0.035,31,93,0.99096,3.07,0.72,11.3,7 +8,0.25,0.27,9.7,0.036,15,85,0.99406,2.99,0.36,11.2,6 +6.9,0.26,0.27,4.2,0.031,20,80,0.99089,3.12,0.39,11.5,6 +5.9,0.655,0,5.6,0.033,8,31,0.9936,3.32,0.51,10.5,4 +7.6,0.38,0.28,4.2,0.029,7,112,0.9906,3,0.41,12.6,6 +7.8,0.31,0.4,1.6,0.027,20,87,0.9911,3.15,0.48,11.9,6 +8.1,0.17,0.21,1.6,0.036,24,119,0.99396,3.18,0.52,10.1,6 +6.8,0.18,0.28,1.1,0.027,32,112,0.99089,3.15,0.45,11,7 +7.4,0.28,0.36,14.6,0.048,35,161,0.9968,3.14,0.56,10.6,5 +7.3,0.23,0.27,2.6,0.035,39,120,0.99138,3.04,0.59,11.3,7 +6.7,0.22,0.22,1.2,0.038,5,124,0.99098,3.1,0.37,11.2,4 +7.4,0.25,0.28,7.25,0.028,14,78,0.99238,2.94,0.37,11.5,7 +7.5,0.3,0.21,6.55,0.026,33,143,0.99244,2.92,0.35,11.1,5 +7.2,0.26,0.24,7,0.023,19,130,0.99176,3.14,0.49,12.8,7 +6.3,0.32,0.32,1.5,0.037,12,76,0.98993,3.3,0.46,12.3,6 +7.7,0.24,0.3,1.4,0.041,15,102,0.9929,3.26,0.53,10.4,6 +7.4,0.25,0.28,7.25,0.028,14,78,0.99238,2.94,0.37,11.5,7 +7,0.24,0.35,1,0.032,42,104,0.98988,3.16,0.37,11.7,7 +5.8,0.28,0.28,4.2,0.044,52,158,0.992,3.35,0.44,10.7,7 +6.8,0.19,0.71,17.5,0.042,21,114,0.99784,2.85,0.5,9.5,6 +6.8,0.19,0.71,17.5,0.042,21,114,0.99784,2.85,0.5,9.5,6 +6.8,0.19,0.71,17.5,0.042,21,114,0.99784,2.85,0.5,9.5,6 +6.6,0.19,0.35,1.5,0.037,37,107,0.99006,3.18,0.68,12,7 +6.4,0.28,0.36,1.3,0.053,28,186,0.99211,3.31,0.45,10.8,5 +5.6,0.28,0.27,3.9,0.043,52,158,0.99202,3.35,0.44,10.7,7 +5.6,0.28,0.28,4.2,0.044,52,158,0.992,3.35,0.44,10.7,7 +6.8,0.19,0.32,7.6,0.049,37,107,0.99332,3.12,0.44,10.7,7 +7.2,0.16,0.29,1,0.031,40,123,0.98958,3.12,0.4,12.1,7 +6.6,0.17,0.28,1.1,0.034,55,108,0.98939,3,0.52,11.9,7 +6.6,0.19,0.28,11.8,0.042,54,137,0.99492,3.18,0.37,10.8,6 +5.8,0.2,0.24,1.4,0.033,65,169,0.99043,3.59,0.56,12.3,7 +6.6,0.39,0.38,9.7,0.053,49,226,0.99787,3.3,0.57,9.4,6 +6.8,0.12,0.3,12.9,0.049,32,88,0.99654,3.2,0.35,9.9,6 +6.6,0.295,0.24,1.6,0.039,29,140,0.99304,3.35,0.61,10.4,7 +6.6,0.26,0.24,7.2,0.038,28,137,0.9952,3.35,0.6,10.4,6 +7,0.32,0.27,7.1,0.027,37,122,0.99165,3.15,0.6,12.6,7 +7.4,0.36,0.23,1.9,0.017,31,69,0.9892,2.93,0.36,12.5,6 +6.7,0.35,0.48,8.8,0.056,35,167,0.99628,3.04,0.47,9.4,5 +6.4,0.38,0.24,7.2,0.047,41,151,0.99604,3.11,0.6,9.2,5 +6.8,0.14,0.18,1.4,0.047,30,90,0.99164,3.27,0.54,11.2,6 +7,0.16,0.25,14.3,0.044,27,149,0.998,2.91,0.46,9.2,6 +7,0.16,0.25,14.3,0.044,27,149,0.998,2.91,0.46,9.2,6 +6.7,0.35,0.48,8.8,0.056,35,167,0.99628,3.04,0.47,9.4,5 +6.8,0.14,0.18,1.4,0.047,30,90,0.99164,3.27,0.54,11.2,6 +6.8,0.16,0.18,1.8,0.046,31,114,0.99226,3.27,0.55,10.8,6 +7,0.16,0.25,14.3,0.044,27,149,0.998,2.91,0.46,9.2,6 +6.4,0.38,0.24,7.2,0.047,41,151,0.99604,3.11,0.6,9.2,5 +7.2,0.24,0.3,1.2,0.037,11,95,0.98914,2.96,0.36,12.5,6 +7.7,0.32,0.61,11.8,0.041,66,188,0.99794,3,0.54,9.3,5 +7,0.29,0.33,0.9,0.041,20,117,0.99048,3.21,0.5,11.4,5 +7.1,0.27,0.24,12.6,0.044,48,118,0.99726,3.04,0.56,10,7 +6.8,0.45,0.28,26.05,0.031,27,122,1.00295,3.06,0.42,10.6,6 +6.3,0.2,0.26,4.7,0.04,108,168,0.99278,3.07,0.75,10.7,7 +7.1,0.27,0.24,12.6,0.044,48,118,0.99726,3.04,0.56,10,7 +7.2,0.24,0.3,1.2,0.037,11,95,0.98914,2.96,0.36,12.5,6 +6.8,0.45,0.28,26.05,0.031,27,122,1.00295,3.06,0.42,10.6,6 +6.6,0.36,0.28,6.1,0.029,12,93,0.99054,3.19,0.27,12.8,7 +7.7,0.32,0.61,11.8,0.041,66,188,0.99794,3,0.54,9.3,5 +7,0.29,0.33,0.9,0.041,20,117,0.99048,3.21,0.5,11.4,5 +6.4,0.37,0.2,5.6,0.117,61,183,0.99459,3.24,0.43,9.5,5 +6.4,0.38,0.2,5.3,0.117,57,181,0.99459,3.24,0.43,9.5,6 +6.4,0.36,0.2,5.7,0.118,61,172,0.9946,3.24,0.43,9.5,6 +6.6,0.3,0.25,8,0.036,21,124,0.99362,3.06,0.38,10.8,6 +6.6,0.3,0.25,8,0.036,21,124,0.99362,3.06,0.38,10.8,6 +6.5,0.21,0.51,17.6,0.045,34,125,0.99966,3.2,0.47,8.8,6 +6.6,0.3,0.25,8,0.036,21,124,0.99362,3.06,0.38,10.8,6 +7.6,0.31,0.27,8.8,0.021,57,156,0.99442,3.08,0.38,11,7 +5.8,0.58,0,1.5,0.02,33,96,0.98918,3.29,0.38,12.4,6 +6.5,0.26,0.39,1.4,0.02,12,66,0.99089,3.25,0.75,11.3,7 +8.7,0.3,0.34,4.8,0.018,23,127,0.99474,3.12,0.49,11.2,7 +6.4,0.29,0.32,2.4,0.014,34,89,0.99008,3.24,0.66,12.5,7 +6.7,0.13,0.32,3.7,0.017,32,99,0.99348,3.12,0.44,10,6 +6.8,0.19,0.33,4.9,0.047,42,130,0.99283,3.12,0.56,11,6 +6,0.25,0.4,5.7,0.052,56,152,0.99398,3.16,0.88,10.5,6 +6,0.25,0.4,5.7,0.052,56,152,0.99398,3.16,0.88,10.5,6 +6.8,0.19,0.33,4.9,0.047,42,130,0.99283,3.12,0.56,11,6 +6.4,0.24,0.23,2,0.046,30,133,0.9908,3.12,0.54,11.4,7 +5.9,0.18,0.28,5.1,0.039,50,139,0.99165,3.16,0.44,11.3,6 +7.2,0.33,0.22,4.5,0.031,10,73,0.99076,2.97,0.52,12.2,7 +6.4,0.29,0.24,3.2,0.037,31,95,0.98942,2.9,0.66,12.6,7 +7.3,0.31,0.25,6.65,0.032,30,138,0.99244,2.9,0.37,11.1,5 +7,0.29,0.37,1.6,0.035,34,126,0.99058,3.26,0.47,12.3,6 +6.9,0.19,0.6,4,0.037,6,122,0.99255,2.92,0.59,10.4,4 +6.3,0.32,0.17,17.75,0.06,51,190,0.99916,3.13,0.48,8.8,6 +6.6,0.085,0.33,1.4,0.036,17,109,0.99306,3.27,0.61,9.5,6 +6.3,0.32,0.17,17.75,0.06,51,190,0.99916,3.13,0.48,8.8,6 +6.8,0.18,0.32,7.2,0.047,17,109,0.99498,3.42,0.44,10.4,6 +6.8,0.52,0.26,5.7,0.038,27,130,0.99,3.11,0.27,13,7 +7.1,0.28,0.28,8.5,0.03,25,191,0.99338,3.16,0.46,12.2,7 +5.7,0.15,0.47,11.4,0.035,49,128,0.99456,3.03,0.34,10.5,8 +5.8,0.275,0.3,5.4,0.043,41,149,0.9926,3.33,0.42,10.8,7 +5.4,0.53,0.16,2.7,0.036,34,128,0.98856,3.2,0.53,13.2,8 +5.8,0.32,0.28,4.3,0.032,46,115,0.98946,3.16,0.57,13,8 +6.7,0.22,0.39,1.2,0.049,26,152,0.99346,3.5,0.47,10,6 +6.1,0.6,0.12,1.8,0.05,11,76,0.99268,3.42,0.48,10.4,4 +6.5,0.26,0.31,1.3,0.034,59,145,0.98944,3.16,0.54,12.4,6 +5,0.29,0.54,5.7,0.035,54,155,0.98976,3.27,0.34,12.9,8 +5.4,0.53,0.16,2.7,0.036,34,128,0.98856,3.2,0.53,13.2,8 +6.8,0.21,0.26,11.7,0.038,61,152,0.99523,3.02,0.56,10.5,7 +5.8,0.32,0.28,4.3,0.032,46,115,0.98946,3.16,0.57,13,8 +6.5,0.27,0.26,11,0.03,2,82,0.99402,3.07,0.36,11.2,5 +5.9,0.37,0.32,1.6,0.029,41,102,0.98916,3.41,0.55,12.7,7 +6.2,0.21,0.18,11.6,0.044,61,155,0.99655,3.14,0.52,9.4,6 +6.8,0.3,0.29,6.2,0.025,29,95,0.99071,3.03,0.32,12.9,7 +7.3,0.41,0.29,1.8,0.032,26,74,0.98889,2.96,0.35,13,8 +5.4,0.3,0.3,1.2,0.029,25,93,0.98742,3.31,0.4,13.6,7 +6.6,0.34,0.2,1,0.053,26,112,0.99336,3.32,0.55,9.1,5 +5.6,0.25,0.19,2.4,0.049,42,166,0.992,3.25,0.43,10.4,6 +5.3,0.3,0.3,1.2,0.029,25,93,0.98742,3.31,0.4,13.6,7 +6.9,0.58,0.58,8.2,0.032,29,169,0.99275,3.28,0.44,12.2,6 +7.2,0.23,0.25,18.8,0.085,19,111,1.00044,3.1,0.51,8.7,5 +7.1,0.2,0.27,9.6,0.037,19,105,0.99444,3.04,0.37,10.5,7 +6.8,0.15,0.41,12.9,0.044,79.5,183,0.99742,3.24,0.78,10.2,6 +7,0.22,0.26,9.2,0.027,37,122,0.99228,3.06,0.34,12.5,8 +6.4,0.16,0.44,1.2,0.051,39,122,0.99058,3.11,0.75,11.3,7 +6.8,0.15,0.41,12.9,0.044,79.5,183,0.99742,3.24,0.78,10.2,6 +6.8,0.31,0.3,8,0.028,33,122,0.99164,3.13,0.63,12.6,7 +6.8,0.15,0.41,12.9,0.044,79.5,183,0.99742,3.24,0.78,10.2,6 +7.6,0.3,0.37,1.6,0.087,27,177,0.99438,3.09,0.5,9.8,5 +6,0.16,0.27,12,0.03,39,98,0.99402,3.15,0.34,10.8,5 +7.1,0.21,0.35,2.5,0.04,41,186,0.99128,3.32,0.56,12.5,6 +7,0.22,0.26,9.2,0.027,37,122,0.99228,3.06,0.34,12.5,8 +5.6,0.21,0.24,4.4,0.027,37,150,0.991,3.3,0.31,11.5,7 +7.4,0.22,0.26,8.8,0.027,23,112,0.9931,2.98,0.41,11.4,6 +7.1,0.2,0.27,9.6,0.037,19,105,0.99444,3.04,0.37,10.5,7 +6.8,0.31,0.3,8,0.028,33,122,0.99164,3.13,0.63,12.6,7 +7.2,0.23,0.25,18.8,0.085,19,111,1.00044,3.1,0.51,8.7,5 +6.4,0.15,0.4,1.3,0.053,61,146,0.99112,3.17,0.68,11,6 +6.4,0.16,0.44,1.2,0.051,39,122,0.99058,3.11,0.75,11.3,7 +6.8,0.15,0.41,12.9,0.044,79.5,182,0.99742,3.24,0.78,10.2,6 +6.3,0.22,0.34,1.2,0.036,32,96,0.98961,3.06,0.74,11.6,6 +7.6,0.3,0.37,1.6,0.087,27,177,0.99438,3.09,0.5,9.8,5 +7,0.3,0.27,1.5,0.076,24,145,0.99344,3.1,0.52,10.1,5 +6.6,0.26,0.22,18.15,0.05,23,139,0.99904,3.06,0.5,9.2,5 +7.5,0.24,0.31,13.1,0.05,26,180,0.99884,3.05,0.53,9.1,6 +7.5,0.24,0.31,13.1,0.05,26,180,0.99884,3.05,0.53,9.1,6 +7.5,0.24,0.31,13.1,0.05,26,180,0.99884,3.05,0.53,9.1,6 +7.5,0.24,0.31,13.1,0.05,26,180,0.99884,3.05,0.53,9.1,6 +6.6,0.15,0.34,1,0.037,45,79,0.98949,2.96,0.5,11.7,6 +6.7,0.34,0.43,1.6,0.041,29,114,0.99014,3.23,0.44,12.6,6 +7.7,0.35,0.46,11.8,0.088,61,183,0.99786,2.86,0.47,9,5 +6.7,0.31,0.09,1.4,0.039,53,141,0.99206,3.12,0.44,10.1,5 +4.7,0.67,0.09,1,0.02,5,9,0.98722,3.3,0.34,13.6,5 +7.5,0.24,0.31,13.1,0.05,26,180,0.99884,3.05,0.53,9.1,6 +6.3,0.2,0.18,10.6,0.045,57,159,0.99666,3.09,0.54,9.2,5 +6.6,0.28,0.23,10.4,0.049,45,190,0.99754,3.12,0.51,8.8,5 +8.5,0.18,0.3,1.1,0.028,34,95,0.99272,2.83,0.36,10,4 +6.5,0.35,0.38,7.4,0.036,20,196,0.99712,3.47,0.48,9.1,6 +6.8,0.22,0.26,1.2,0.041,29,182,0.99104,3.04,0.35,11.2,5 +6.3,0.18,0.24,3.4,0.053,20,119,0.99373,3.11,0.52,9.2,6 +6.6,0.26,0.22,18.15,0.05,23,139,0.99904,3.06,0.5,9.2,5 +6.6,0.3,0.45,8,0.038,54,200,0.9956,3.18,0.48,9.5,5 +6.3,0.34,0.27,2.5,0.024,40,152,0.99095,3.35,0.6,11.9,7 +7.7,0.3,0.23,2,0.068,28,138,0.99382,3.11,0.62,9.8,5 +7.7,0.31,0.23,2,0.069,29,134,0.99382,3.11,0.62,9.8,5 +5.7,0.265,0.28,6.9,0.036,46,150,0.99299,3.36,0.44,10.8,7 +5.4,0.255,0.33,1.2,0.051,29,122,0.99048,3.37,0.66,11.3,6 +6.6,0.26,0.28,9.4,0.028,13,121,0.99254,3.17,0.34,12.1,6 +4.8,0.17,0.28,2.9,0.03,22,111,0.9902,3.38,0.34,11.3,7 +5.7,0.265,0.28,6.9,0.036,46,150,0.99299,3.36,0.44,10.8,7 +6.2,0.2,0.33,5.4,0.028,21,75,0.99012,3.36,0.41,13.5,7 +7.5,0.28,0.41,1.3,0.044,11,126,0.99293,3.28,0.45,10.3,5 +6.2,0.22,0.2,20.8,0.035,58,184,1.00022,3.11,0.53,9,6 +7,0.34,0.26,10.3,0.041,51,166,0.99382,3.08,0.35,11.6,6 +7.5,0.28,0.41,1.3,0.044,11,126,0.99293,3.28,0.45,10.3,5 +6.5,0.19,0.34,1.6,0.029,39,116,0.98954,3.21,0.68,12.5,6 +6,0.21,0.29,13.1,0.042,28,125,0.99936,3.39,0.45,8.6,5 +6.1,0.22,0.46,1.8,0.16,34,74,0.9884,3.19,0.33,13.4,6 +6.5,0.32,0.48,8,0.026,18,88,0.99144,3.22,0.79,12.7,4 +7.1,0.21,0.72,1.6,0.167,65,120,0.99324,2.97,0.51,9.2,5 +5.6,0.26,0.18,1.4,0.034,18,135,0.99174,3.32,0.35,10.2,6 +7,0.15,0.28,14.7,0.051,29,149,0.99792,2.96,0.39,9,7 +7,0.15,0.28,14.7,0.051,29,149,0.99792,2.96,0.39,9,7 +7,0.15,0.28,14.7,0.051,29,149,0.99792,2.96,0.39,9,7 +7,0.15,0.28,14.7,0.051,29,149,0.99792,2.96,0.39,9,7 +7,0.15,0.28,14.7,0.051,29,149,0.99792,2.96,0.39,9,7 +7,0.15,0.28,14.7,0.051,29,149,0.99792,2.96,0.39,9,7 +7,0.15,0.28,14.7,0.051,29,149,0.99792,2.96,0.39,9,7 +7.4,0.27,0.28,1.8,0.04,45,121,0.99043,3.02,0.4,11.9,5 +6.8,0.22,0.3,10.6,0.07,67,194,0.99654,2.89,0.42,9,6 +6.2,0.24,0.25,12.5,0.055,47,134,0.99758,3.3,0.51,9,5 +6.3,0.28,0.29,6.8,0.051,40,143,0.99374,3.43,0.59,11,6 +7,0.15,0.28,14.7,0.051,29,149,0.99792,2.96,0.39,9,7 +5.5,0.17,0.23,2.9,0.039,10,108,0.99243,3.28,0.5,10,5 +6.5,0.26,0.34,1.4,0.04,25,184,0.99216,3.29,0.46,10.7,5 +6.6,0.27,0.33,1.4,0.042,24,183,0.99215,3.29,0.46,10.7,5 +5.4,0.46,0.15,2.1,0.026,29,130,0.98953,3.39,0.77,13.4,8 +7.8,0.19,0.32,7.4,0.015,47,124,0.99278,2.99,0.39,11,6 +5.5,0.17,0.23,2.9,0.039,10,108,0.99243,3.28,0.5,10,5 +6.5,0.26,0.34,1.4,0.04,25,184,0.99216,3.29,0.46,10.7,5 +6.6,0.27,0.33,1.4,0.042,24,183,0.99215,3.29,0.46,10.7,5 +7.8,0.19,0.32,7.4,0.015,47,124,0.99278,2.99,0.39,11,6 +7.8,0.2,0.32,5,0.016,31,101,0.99186,2.99,0.39,11,6 +6.1,0.17,0.28,2.5,0.028,22,98,0.99072,3.16,0.37,11.1,7 +7.4,0.2,0.35,6.1,0.025,10,40,0.99244,2.79,0.52,10.9,5 +6.7,0.39,0.24,2.7,0.017,22,80,0.99084,3.03,0.37,11.5,5 +5.4,0.46,0.15,2.1,0.026,29,130,0.98953,3.39,0.77,13.4,8 +6.9,0.4,0.17,12.9,0.033,59,186,0.99754,3.08,0.49,9.4,5 +6.9,0.4,0.17,12.9,0.033,59,186,0.99754,3.08,0.49,9.4,5 +6.9,0.4,0.17,12.9,0.033,59,186,0.99754,3.08,0.49,9.4,5 +6.3,0.24,0.29,13.7,0.035,53,134,0.99567,3.17,0.38,10.6,6 +6.9,0.4,0.17,12.9,0.033,59,186,0.99754,3.08,0.49,9.4,5 +7.4,0.27,0.31,2.4,0.014,15,143,0.99094,3.03,0.65,12,4 +6.1,0.27,0.28,9.8,0.042,61,125,0.99532,3.14,0.42,10.2,6 +6.3,0.24,0.29,13.7,0.035,53,134,0.99567,3.17,0.38,10.6,6 +5,0.61,0.12,1.3,0.009,65,100,0.9874,3.26,0.37,13.5,5 +6.7,0.42,0.39,12.1,0.04,61,248,0.99794,3.31,0.58,9.7,5 +6.5,0.33,0.28,6.1,0.018,41,103,0.99122,3.24,0.32,12.2,6 +6.9,0.33,0.31,7.7,0.04,29,135,0.99226,3.11,0.57,12.3,5 +6.5,0.33,0.28,6.1,0.018,41,103,0.99122,3.24,0.32,12.2,6 +6.3,0.15,0.3,1.4,0.022,38,100,0.99099,3.42,0.57,11.4,7 +6.5,0.32,0.45,7.7,0.022,31,97,0.99134,3.2,0.7,12.7,7 +6.7,0.42,0.39,12.1,0.04,61,248,0.99794,3.31,0.58,9.7,5 +7.4,0.25,0.29,6.8,0.02,31,113,0.99338,3.13,0.29,10.8,6 +7.6,0.27,0.3,9.2,0.018,23,96,0.9938,3.08,0.29,11,6 +6.4,0.27,0.45,8.3,0.05,52,196,0.9955,3.18,0.48,9.5,5 +6.5,0.25,0.27,17.4,0.064,29,140,0.99776,3.2,0.49,10.1,6 +5.6,0.19,0.31,2.7,0.027,11,100,0.98964,3.46,0.4,13.2,7 +7.4,0.29,0.48,12.8,0.037,61.5,182,0.99808,3.02,0.34,8.8,5 +6.4,0.34,0.44,8.2,0.043,54,201,0.99551,3.18,0.48,9.5,5 +6.6,0.27,0.52,8.1,0.044,53,202,0.99548,3.18,0.48,9.5,5 +6.6,0.26,0.52,8.2,0.047,52,191,0.99541,3.16,0.47,9.5,6 +6.4,0.27,0.45,8.3,0.05,52,196,0.9955,3.18,0.48,9.5,5 +6.5,0.26,0.5,8,0.051,46,197,0.99536,3.18,0.47,9.5,5 +6.8,0.25,0.3,11.8,0.043,53,133,0.99524,3.03,0.58,10.4,6 +6.3,0.32,0.26,12,0.049,63,170,0.9961,3.14,0.55,9.9,6 +5.5,0.24,0.45,1.7,0.046,22,113,0.99224,3.22,0.48,10,5 +6.5,0.25,0.27,17.4,0.064,29,140,0.99776,3.2,0.49,10.1,6 +6.6,0.13,0.29,13.9,0.056,33,95,0.99702,3.17,0.39,9.4,6 +7,0.39,0.21,10.7,0.098,13,91,0.99657,3.03,0.47,9.3,5 +7.9,0.21,0.39,2,0.057,21,138,0.99176,3.05,0.52,10.9,5 +7,0.3,0.28,2.2,0.042,21,177,0.99166,3.2,0.57,11.4,5 +8.1,0.2,0.3,1.3,0.036,7,49,0.99242,2.99,0.73,10.3,5 +8.3,0.18,0.3,1.1,0.033,20,57,0.99109,3.02,0.51,11,6 +7.9,0.21,0.39,2,0.057,21,138,0.99176,3.05,0.52,10.9,5 +7.2,0.17,0.34,6.4,0.042,16,111,0.99278,2.99,0.4,10.8,6 +8.1,0.2,0.3,1.3,0.036,7,49,0.99242,2.99,0.73,10.3,5 +8.3,0.18,0.3,1.1,0.033,20,57,0.99109,3.02,0.51,11,6 +7,0.39,0.21,10.7,0.098,13,91,0.99657,3.03,0.47,9.3,5 +6.8,0.21,0.62,6.4,0.041,7,113,0.99358,2.96,0.59,10.2,5 +6.9,0.21,0.62,6.3,0.042,7,109,0.99358,2.96,0.59,10.2,6 +7.2,0.17,0.34,6.4,0.042,16,111,0.99278,2.99,0.4,10.8,6 +6.8,0.26,0.34,15.1,0.06,42,162,0.99705,3.24,0.52,10.5,3 +7.2,0.28,0.38,2,0.052,23,156,0.9912,3.13,0.52,11.1,5 +7.9,0.21,0.39,2,0.057,21,138,0.99176,3.05,0.52,10.9,5 +7,0.3,0.28,2.2,0.042,21,177,0.99166,3.2,0.57,11.4,5 +7.4,0.34,0.28,12.1,0.049,31,149,0.99677,3.22,0.49,10.3,5 +6.3,0.43,0.32,8.8,0.042,18,106,0.99172,3.28,0.33,12.9,7 +6.8,0.41,0.3,8.8,0.045,28,131,0.9953,3.12,0.59,9.9,5 +6.3,0.4,0.24,5.1,0.036,43,131,0.99186,3.24,0.44,11.3,6 +5.1,0.35,0.26,6.8,0.034,36,120,0.99188,3.38,0.4,11.5,6 +5.1,0.35,0.26,6.8,0.034,36,120,0.99188,3.38,0.4,11.5,6 +6.3,0.3,0.2,3.7,0.039,34,132,0.99158,3,0.38,10.7,5 +6.9,0.28,0.28,12.2,0.042,52,139,0.99522,3.03,0.56,10.4,6 +7,0.33,0.28,5.7,0.033,39,204,0.99176,3.17,0.64,12.5,6 +6.7,0.26,0.49,8.1,0.052,48,197,0.99558,3.19,0.48,9.5,5 +7.3,0.24,0.3,2.5,0.042,31,104,0.9911,3.05,0.56,11.3,7 +6.7,0.46,0.21,4,0.034,12,88,0.99016,3.26,0.54,13,6 +5.1,0.35,0.26,6.8,0.034,36,120,0.99188,3.38,0.4,11.5,6 +5.1,0.23,0.18,1,0.053,13,99,0.98956,3.22,0.39,11.5,5 +6.3,0.4,0.24,5.1,0.036,43,131,0.99186,3.24,0.44,11.3,6 +7.1,0.44,0.23,5.8,0.035,24,100,0.99062,3.15,0.57,13.2,7 +4.8,0.26,0.23,10.6,0.034,23,111,0.99274,3.46,0.28,11.5,7 +6.8,0.31,0.19,3.5,0.086,30,130,0.993,2.83,0.44,9.6,5 +6.8,0.31,0.19,3.5,0.086,30,130,0.993,2.83,0.44,9.6,5 +7,0.15,0.29,16.4,0.058,45,110,0.9978,3.15,0.37,9.7,6 +6.5,0.41,0.22,4.8,0.052,49,142,0.9946,3.14,0.62,9.2,5 +6.2,0.31,0.23,3.3,0.052,34,113,0.99429,3.16,0.48,8.4,5 +8,0.27,0.33,1.2,0.05,41,103,0.99002,3,0.45,12.4,6 +8,0.27,0.33,1.2,0.05,41,103,0.99002,3,0.45,12.4,6 +6.5,0.41,0.22,4.8,0.052,49,142,0.9946,3.14,0.62,9.2,5 +6.2,0.31,0.23,3.3,0.052,34,113,0.99429,3.16,0.48,8.4,5 +6.7,0.37,0.25,2.5,0.028,24,84,0.9909,3.14,0.36,11.7,6 +6.6,0.21,0.5,8.7,0.036,41,191,0.99294,2.96,0.56,11,6 +7.5,0.26,0.31,1.6,0.032,36,109,0.99044,2.97,0.43,11.9,6 +7.5,0.34,0.28,4,0.028,46,100,0.98958,3.2,0.5,13.2,7 +6.7,0.37,0.25,2.5,0.028,24,84,0.9909,3.14,0.36,11.7,6 +6.4,0.32,0.23,16.2,0.055,36,176,0.9986,3.26,0.54,9.1,5 +6.7,0.24,0.32,9,0.023,20,109,0.99262,3.34,0.35,12.6,6 +6.4,0.32,0.23,16.2,0.055,36,176,0.9986,3.26,0.54,9.1,5 +7.1,0.39,0.79,1.4,0.194,23,90,0.99212,3.17,0.46,10.5,6 +8.2,0.31,0.43,7,0.047,18,87,0.99628,3.23,0.64,10.6,5 +6.7,0.24,0.32,9,0.023,20,109,0.99262,3.34,0.35,12.6,6 +5.9,0.17,0.29,3.1,0.03,32,123,0.98913,3.41,0.33,13.7,7 +5.9,0.2,0.23,1.5,0.037,38,93,0.99021,3.36,0.49,12,6 +6.6,0.32,0.26,4.6,0.031,26,120,0.99198,3.4,0.73,12.5,7 +5.9,0.12,0.27,4.8,0.03,40,110,0.99226,3.55,0.68,12.1,6 +5.9,0.18,0.29,4.6,0.032,68,137,0.99159,3.21,0.38,11.3,6 +5.9,0.2,0.23,1.5,0.037,38,93,0.99021,3.36,0.49,12,6 +5.4,0.17,0.27,2.7,0.049,28,104,0.99224,3.46,0.55,10.3,6 +6.1,0.21,0.3,6.3,0.039,47,136,0.99068,3.27,0.31,12.7,6 +7.3,0.25,0.26,7.2,0.048,52,207,0.99587,3.12,0.37,9.2,5 +7.3,0.25,0.26,7.2,0.048,52,207,0.99587,3.12,0.37,9.2,5 +6.2,0.22,0.3,12.4,0.054,108,152,0.99728,3.1,0.47,9.5,6 +6.5,0.27,0.19,6.6,0.045,98,175,0.99364,3.16,0.34,10.1,6 +6.5,0.27,0.19,6.6,0.045,98,175,0.99364,3.16,0.34,10.1,6 +6.6,0.39,0.22,4,0.038,17,98,0.99018,3.25,0.53,13,7 +6,0.31,0.38,4.8,0.04,41,101,0.98968,3.24,0.56,13.1,6 +8.4,0.23,0.32,1.3,0.048,59,113,0.99178,3.1,0.55,11,6 +7.3,0.25,0.26,7.2,0.048,52,207,0.99587,3.12,0.37,9.2,5 +6,0.22,0.25,11.1,0.056,112,177,0.9961,3.08,0.36,9.4,6 +6.2,0.22,0.3,12.4,0.054,108,152,0.99728,3.1,0.47,9.5,6 +6.1,0.23,0.27,9.8,0.055,74,134,0.99534,3.16,0.4,10.2,6 +6.5,0.27,0.19,6.6,0.045,98,175,0.99364,3.16,0.34,10.1,6 +7.3,0.36,0.54,13.3,0.054,63,193,0.99864,3.06,0.49,8.6,4 +7.6,0.37,0.51,11.7,0.094,58,181,0.99776,2.91,0.51,9,5 +6.7,0.26,0.51,8,0.062,50,194,0.99545,3.13,0.5,9.6,5 +7.4,0.22,0.27,1.6,0.057,45,98,0.99299,3.29,0.44,9.9,7 +6.1,0.22,0.28,16.55,0.059,54,135,0.99665,3.2,0.38,10.5,5 +7.1,0.28,0.31,1.5,0.053,20,98,0.99069,3.15,0.5,11.4,5 +6.5,0.35,0.31,10.2,0.069,58,170,0.99692,3.18,0.49,9.4,5 +6.8,0.73,0.2,6.6,0.054,25,65,0.99324,3.12,0.28,11.1,4 +6,0.28,0.24,17.8,0.047,42,111,0.99896,3.1,0.45,8.9,6 +6,0.28,0.24,17.8,0.047,42,111,0.99896,3.1,0.45,8.9,6 +7.1,0.2,0.37,1.5,0.049,28,129,0.99226,3.15,0.52,10.8,5 +6.8,0.33,0.31,7.4,0.045,34,143,0.99226,3.06,0.55,12.2,6 +6,0.28,0.24,17.8,0.047,42,111,0.99896,3.1,0.45,8.9,6 +7.2,0.24,0.36,2,0.029,21,63,0.99076,3.13,0.63,12.5,6 +6.8,0.33,0.31,7.4,0.045,34,143,0.99226,3.06,0.55,12.2,6 +7.2,0.24,0.36,2,0.029,21,63,0.99076,3.13,0.63,12.5,6 +6,0.28,0.24,17.8,0.047,42,111,0.99896,3.1,0.45,8.9,6 +6.2,0.27,0.26,12.1,0.046,43,127,0.9951,3.16,0.37,10.8,6 +6.4,0.38,0.26,8.2,0.043,28,98,0.99234,2.99,0.31,11.4,6 +7.1,0.2,0.37,1.5,0.049,28,129,0.99226,3.15,0.52,10.8,5 +6,0.21,0.3,8.7,0.036,47,127,0.99368,3.18,0.39,10.6,5 +7,0.34,0.1,3.5,0.044,17,63,0.9937,3.01,0.39,9.2,5 +5.9,0.435,0.16,6.4,0.031,21,134,0.99151,3.24,0.46,12.2,6 +7,0.25,0.33,2.1,0.021,17,76,0.99021,3.26,0.45,12.3,6 +6.7,0.26,0.29,7.7,0.038,40,179,0.99479,3.23,0.56,10.4,6 +7,0.24,0.3,12.3,0.035,72,172,0.9954,2.99,0.57,10.4,6 +8.5,0.23,0.34,1.3,0.035,54,110,0.99176,3.07,0.55,11,7 +6,0.21,0.3,8.7,0.036,47,127,0.99368,3.18,0.39,10.6,5 +7,0.34,0.1,3.5,0.044,17,63,0.9937,3.01,0.39,9.2,5 +4.8,0.65,0.12,1.1,0.013,4,10,0.99246,3.32,0.36,13.5,4 +6.1,0.22,0.38,2.8,0.144,12,65,0.9908,2.95,0.64,11.4,6 +5.8,0.27,0.26,3.5,0.071,26,69,0.98994,3.1,0.38,11.5,6 +5,0.455,0.18,1.9,0.036,33,106,0.98746,3.21,0.83,14,7 +6.5,0.33,0.3,3.8,0.036,34,88,0.99028,3.25,0.63,12.5,7 +6.5,0.33,0.3,3.8,0.036,34,88,0.99028,3.25,0.63,12.5,7 +6.7,0.31,0.3,2.4,0.038,30,83,0.98867,3.09,0.36,12.8,7 +6.2,0.39,0.24,4.8,0.037,45,138,0.99174,3.23,0.43,11.2,7 +6.2,0.39,0.24,4.8,0.037,45,138,0.99174,3.23,0.43,11.2,7 +7.1,0.37,0.3,6.2,0.04,49,139,0.99021,3.17,0.27,13.6,6 +7.2,0.23,0.82,1.3,0.149,70,109,0.99304,2.93,0.42,9.2,6 +6.5,0.33,0.3,3.8,0.036,34,88,0.99028,3.25,0.63,12.5,7 +7.2,0.25,0.32,1.5,0.054,24,105,0.99154,3.17,0.48,11.1,6 +6.2,0.39,0.24,4.8,0.037,45,138,0.99174,3.23,0.43,11.2,7 +4.7,0.455,0.18,1.9,0.036,33,106,0.98746,3.21,0.83,14,7 +7.1,0.37,0.3,6.2,0.04,49,139,0.99021,3.17,0.27,13.6,6 +6.2,0.28,0.51,7.9,0.056,49,206,0.9956,3.18,0.52,9.4,5 +6.4,0.35,0.28,1.6,0.037,31,113,0.98779,3.12,0.4,14.2,7 +6.6,0.31,0.28,1.4,0.035,28,107,0.98836,3,0.4,13.2,6 +7.4,0.25,0.37,2.6,0.05,24,132,0.99138,3.04,0.53,11.2,6 +7.3,0.36,0.34,14.8,0.057,46,173,0.99751,3.14,0.57,10.2,5 +6.7,0.31,0.3,2.4,0.038,30,83,0.98867,3.09,0.36,12.8,7 +8.6,0.31,0.3,0.9,0.045,16,109,0.99249,2.95,0.39,10.1,5 +8.6,0.31,0.3,0.9,0.045,16,109,0.99249,2.95,0.39,10.1,5 +8.6,0.22,0.33,1.2,0.031,38,95,0.99239,2.83,0.31,10.3,5 +6.9,0.14,0.29,9.9,0.056,30,91,0.99512,3.19,0.33,9.9,6 +6.5,0.22,0.31,3.9,0.046,17,106,0.99098,3.15,0.31,11.5,5 +6.6,0.32,0.47,15.6,0.063,27,173,0.99872,3.18,0.56,9,5 +6.6,0.32,0.47,15.6,0.063,27,173,0.99872,3.18,0.56,9,5 +6.1,0.28,0.26,1.5,0.03,25,101,0.98894,3.03,0.41,12.1,6 +6.2,0.3,0.28,1.6,0.036,28,106,0.988245,3.14,0.41,13.3,6 +6.9,0.22,0.28,7.8,0.05,43,116,0.99326,3.22,0.6,11.5,8 +8.7,0.31,0.21,5.6,0.039,28,67,0.99328,2.96,0.52,11,4 +7.3,0.27,0.3,1.3,0.04,26,84,0.99222,3.28,0.53,10.7,6 +7,0.46,0.2,16.7,0.046,50,184,0.99898,3.08,0.56,9.4,5 +5.7,0.23,0.25,7.95,0.042,16,108,0.99486,3.44,0.61,10.3,6 +6.5,0.36,0.36,6.7,0.185,51.5,151,0.99528,3.17,0.42,9.3,5 +8.2,0.18,0.38,1.1,0.04,41,92,0.99062,2.88,0.6,12,6 +6.2,0.27,0.32,6.3,0.048,47,159,0.99282,3.21,0.6,11,6 +6.9,0.4,0.37,8.9,0.053,36,148,0.996,3.16,0.5,9.3,5 +4.9,0.345,0.34,1,0.068,32,143,0.99138,3.24,0.4,10.1,5 +7.2,0.23,0.39,1.5,0.053,26,106,0.99166,3.18,0.47,11.1,6 +6.4,0.2,0.15,6.6,0.046,26,113,0.99408,2.99,0.58,9.9,6 +6.1,0.27,0.32,6.2,0.048,47,161,0.99281,3.22,0.6,11,6 +6.2,0.27,0.32,6.3,0.048,47,159,0.99282,3.21,0.6,11,6 +6,0.3,0.33,2.1,0.042,31,127,0.98964,3.32,0.42,12.5,6 +6.1,0.3,0.32,2.2,0.042,41,142,0.98952,3.31,0.44,12.7,7 +5.7,0.14,0.3,5.4,0.045,26,105,0.99469,3.32,0.45,9.3,5 +6.9,0.4,0.37,8.9,0.053,36,148,0.996,3.16,0.5,9.3,5 +4.9,0.345,0.34,1,0.068,32,143,0.99138,3.24,0.4,10.1,5 +6.3,0.33,0.2,17.9,0.066,36,161,0.9991,3.14,0.51,8.8,5 +7,0.16,0.3,2.6,0.043,34,90,0.99047,2.88,0.47,11.2,6 +8.4,0.22,0.3,1.3,0.038,45,122,0.99178,3.13,0.54,10.8,7 +6.3,0.33,0.2,17.9,0.066,36,161,0.9991,3.14,0.51,8.8,5 +7,0.16,0.3,2.6,0.043,34,90,0.99047,2.88,0.47,11.2,6 +5.4,0.24,0.18,2.3,0.05,22,145,0.99207,3.24,0.46,10.3,5 +7.7,0.31,0.36,4.3,0.026,15,87,0.99152,3.11,0.48,12,5 +5.6,0.185,0.19,7.1,0.048,36,110,0.99438,3.26,0.41,9.5,6 +5.6,0.185,0.19,7.1,0.048,36,110,0.99438,3.26,0.41,9.5,6 +6.6,0.43,0.24,11.9,0.04,54,159,0.99622,3.14,0.54,9.8,6 +7.6,0.39,0.46,11.7,0.084,55,170,0.99773,2.91,0.51,9,5 +7.2,0.58,0.27,5.8,0.032,40,118,0.99088,3.17,0.53,13,7 +6,0.34,0.32,3.8,0.044,13,116,0.99108,3.39,0.44,11.8,7 +7.5,0.35,0.48,12.4,0.056,61,176.5,0.99803,2.97,0.52,8.8,5 +7.3,0.38,0.23,6.5,0.05,18,102,0.99304,3.1,0.55,11.2,4 +5.4,0.185,0.19,7.1,0.048,36,110,0.99438,3.26,0.41,9.5,6 +6.3,0.27,0.51,7.6,0.049,35,200,0.99548,3.16,0.54,9.4,4 +6.5,0.29,0.52,7.9,0.049,35,192,0.99551,3.16,0.51,9.5,6 +6.4,0.17,0.3,2.8,0.034,33,125,0.99152,3.03,0.49,10.4,6 +6.7,0.18,0.31,10.6,0.035,42,143,0.99572,3.08,0.49,9.8,7 +6.4,0.17,0.3,2.8,0.034,33,125,0.99152,3.03,0.49,10.4,6 +6.8,0.37,0.67,1.5,0.175,16,98,0.99244,3.06,0.56,10.3,6 +6.3,0.27,0.51,7.6,0.049,35,200,0.99548,3.16,0.54,9.4,4 +6.5,0.29,0.52,7.9,0.049,35,192,0.99551,3.16,0.51,9.5,6 +6.1,0.24,0.26,1.7,0.033,61,134,0.9903,3.19,0.81,11.9,7 +7,0.32,0.29,7.6,0.025,35,124,0.99162,3.15,0.65,12.8,7 +6.9,0.27,0.25,7.5,0.03,18,117,0.99116,3.09,0.38,13,6 +6.5,0.29,0.53,1.7,0.04,41,192,0.9922,3.26,0.59,10.4,7 +6.5,0.29,0.52,1.7,0.034,41,193,0.99223,3.25,0.59,10.4,6 +6.1,0.22,0.25,12.1,0.035,54,135,0.99481,3.21,0.4,10.7,5 +6.3,0.22,0.27,4.5,0.036,81,157,0.9928,3.05,0.76,10.7,7 +6.1,0.24,0.26,1.7,0.033,61,134,0.9903,3.19,0.81,11.9,7 +5.6,0.23,0.25,8,0.043,31,101,0.99429,3.19,0.42,10.4,6 +7,0.32,0.29,7.6,0.025,35,124,0.99162,3.15,0.65,12.8,7 +6.8,0.11,0.27,8.6,0.044,45,104,0.99454,3.2,0.37,9.9,6 +6.8,0.11,0.27,8.6,0.044,45,104,0.99454,3.2,0.37,9.9,6 +7.3,0.23,0.41,14.6,0.048,73,223,0.99863,3.16,0.71,9.4,6 +6.1,0.2,0.17,1.6,0.048,46,129,0.991,3.3,0.43,11.4,6 +6.8,0.11,0.27,8.6,0.044,45,104,0.99454,3.2,0.37,9.9,6 +7.3,0.23,0.41,14.6,0.048,73,223,0.99863,3.16,0.71,9.4,6 +6.9,0.2,0.41,1.1,0.06,36,104,0.99317,2.99,0.39,9.2,5 +6.7,0.19,0.32,3.7,0.041,26,76,0.99173,2.9,0.57,10.5,7 +6.7,0.28,0.34,8.9,0.048,32,111,0.99455,3.25,0.54,11,7 +6.7,0.28,0.34,8.9,0.048,32,111,0.99455,3.25,0.54,11,7 +8,0.37,0.31,4.7,0.038,3,127,0.99186,2.9,0.72,12.1,5 +6.7,0.28,0.34,8.9,0.048,32,111,0.99455,3.25,0.54,11,7 +6,0.26,0.29,3.1,0.041,37,144,0.98944,3.22,0.39,12.8,7 +6.4,0.24,0.49,5.8,0.053,25,120,0.9942,3.01,0.98,10.5,6 +6.4,0.24,0.49,5.8,0.053,25,120,0.9942,3.01,0.98,10.5,6 +6.4,0.24,0.49,5.8,0.053,25,120,0.9942,3.01,0.98,10.5,6 +6.4,0.25,0.57,1,0.062,21,122,0.99238,3,0.4,9.5,5 +6.1,0.25,0.48,15.8,0.052,25,94,0.99782,3.07,0.45,9.2,6 +6.8,0.14,0.35,1.5,0.047,40,117,0.99111,3.07,0.72,11.1,6 +6.5,0.38,0.26,5.2,0.042,33,112,0.99067,3.06,0.5,12.3,7 +6.8,0.14,0.35,1.5,0.047,40,117,0.99111,3.07,0.72,11.1,6 +5.4,0.15,0.32,2.5,0.037,10,51,0.98878,3.04,0.58,12.6,6 +6.4,0.25,0.57,1,0.062,21,122,0.99238,3,0.4,9.5,5 +6.1,0.25,0.48,15.8,0.052,25,94,0.99782,3.07,0.45,9.2,6 +6.8,0.22,0.32,5.9,0.054,40,152,0.9938,3.2,0.57,10.8,6 +7.2,0.21,0.29,3.1,0.044,39,122,0.99143,3,0.6,11.3,6 +6,0.26,0.29,3.1,0.041,37,144,0.98944,3.22,0.39,12.8,7 +6.4,0.24,0.49,5.8,0.053,25,120,0.9942,3.01,0.98,10.5,6 +6.5,0.46,0.24,11.5,0.051,56,171,0.99588,3.08,0.56,9.8,6 +6.5,0.18,0.48,18,0.054,56,183,1.00038,2.98,0.61,8.5,6 +6.2,0.32,0.12,4.8,0.054,6,97,0.99424,3.16,0.5,9.3,5 +7.2,0.4,0.24,8.5,0.055,45,151,0.99626,3.2,0.52,9.2,5 +5.9,0.23,0.24,1.6,0.037,32,115,0.99076,3.21,0.51,11.4,6 +6.4,0.18,0.48,18,0.054,56,183,1.00038,2.98,0.61,8.5,6 +6.2,0.32,0.12,4.8,0.054,6,97,0.99424,3.16,0.5,9.3,5 +6.4,0.37,0.12,5.9,0.056,6,91,0.99536,3.06,0.46,8.4,4 +7,0.23,0.42,1.1,0.062,35,100,0.99318,3.04,0.4,9.2,5 +7.2,0.4,0.24,8.5,0.055,45,151,0.99626,3.2,0.52,9.2,5 +7.6,0.19,0.37,13.1,0.033,52,151,0.99726,3.18,0.79,10.4,6 +6,0.28,0.27,4.1,0.046,50,147,0.99126,3.27,0.56,11.6,6 +6.2,0.32,0.45,2.9,0.029,37,94,0.98998,3.25,0.6,12.4,6 +7.6,0.19,0.37,13.1,0.033,52,151,0.99726,3.18,0.79,10.4,6 +6.4,0.26,0.26,1.1,0.052,22,176,0.99304,3.09,0.54,9.3,5 +5.9,0.25,0.27,1.5,0.029,37,81,0.9892,3.2,0.46,12.2,6 +6.1,0.28,0.3,7.75,0.031,33,139,0.99296,3.22,0.46,11,6 +6.9,0.19,0.38,1.15,0.023,30,105,0.99047,3.11,0.38,11.4,5 +6.4,0.29,0.57,1,0.06,15,120,0.9924,3.06,0.41,9.5,5 +6.8,0.27,0.22,17.8,0.034,16,116,0.9989,3.07,0.53,9.2,5 +7.5,0.26,0.38,5.7,0.021,23,125,0.99338,3.13,0.62,11.1,6 +6.8,0.27,0.22,17.8,0.034,16,116,0.9989,3.07,0.53,9.2,5 +6.4,0.2,0.22,7.4,0.032,53,172,0.99404,3.24,0.58,11,6 +7.3,0.33,0.22,1.4,0.041,40,177,0.99287,3.14,0.48,9.9,5 +7.3,0.34,0.22,1.4,0.044,43,176,0.99286,3.14,0.46,9.9,5 +6.4,0.29,0.57,1,0.06,15,120,0.9924,3.06,0.41,9.5,5 +6.1,1.1,0.16,4.4,0.033,8,109,0.99058,3.35,0.47,12.4,4 +6.3,0.24,0.29,1.6,0.052,48,185,0.9934,3.21,0.5,9.4,5 +6.2,0.24,0.22,7.9,0.053,45,149,0.99545,3.23,0.52,9.3,5 +7.4,0.16,0.27,15.5,0.05,25,135,0.9984,2.9,0.43,8.7,7 +7.4,0.16,0.27,15.5,0.05,25,135,0.9984,2.9,0.43,8.7,7 +7.4,0.16,0.27,15.5,0.05,25,135,0.9984,2.9,0.43,8.7,7 +7.4,0.16,0.27,15.5,0.05,25,135,0.9984,2.9,0.43,8.7,7 +7.2,0.17,0.28,17.55,0.05,33,154,0.99971,2.94,0.43,9,7 +6.9,0.19,0.35,13.5,0.038,49,118,0.99546,3,0.63,10.7,6 +6.9,0.19,0.35,13.5,0.038,49,118,0.99546,3,0.63,10.7,6 +6.8,0.16,0.36,1.3,0.034,32,98,0.99058,3.02,0.58,11.3,6 +7.4,0.16,0.27,15.5,0.05,25,135,0.9984,2.9,0.43,8.7,7 +6.8,0.3,0.27,11.6,0.028,22,97,0.99314,2.96,0.38,11.7,6 +6.2,0.24,0.22,7.9,0.053,45,149,0.99545,3.23,0.52,9.3,5 +7.4,0.16,0.27,15.5,0.05,25,135,0.9984,2.9,0.43,8.7,7 +7.2,0.17,0.28,17.55,0.05,33,154,0.99971,2.94,0.43,9,7 +6.8,0.3,0.27,11.6,0.028,22,97,0.99314,2.96,0.38,11.7,6 +6.5,0.43,0.18,13.15,0.032,25,131,0.99565,3.23,0.51,10.7,5 +6.6,0.17,0.36,1.9,0.036,38,110,0.99056,3.05,0.54,11.4,6 +6.9,0.19,0.35,13.5,0.038,49,118,0.99546,3,0.63,10.7,6 +6.8,0.16,0.36,1.3,0.034,32,98,0.99058,3.02,0.58,11.3,6 +6.4,0.41,0.01,6.1,0.048,20,70,0.99362,3.19,0.42,10,5 +6.4,0.41,0.01,6.1,0.048,20,70,0.99362,3.19,0.42,10,5 +7.4,0.36,0.32,1.9,0.036,27,119,0.99196,3.15,0.49,11.2,6 +6.1,0.17,0.21,1.9,0.09,44,130,0.99255,3.07,0.41,9.7,5 +5.5,0.28,0.21,1.6,0.032,23,85,0.99027,3.42,0.42,12.5,5 +6.6,0.5,0.26,11.3,0.029,32,110,0.99302,3.27,0.78,12.9,8 +7.1,0.44,0.27,8.4,0.057,60,160,0.99257,3.16,0.36,11.8,6 +6.9,0.38,0.28,8.3,0.062,22,166,0.99506,3.16,0.72,10.6,5 +7.1,0.44,0.27,8.4,0.057,60,160,0.99257,3.16,0.36,11.8,6 +6.2,0.24,0.28,12.2,0.049,54,133,0.9952,3.19,0.37,10.7,6 +6.1,0.28,0.27,8,0.048,41,162,0.99498,3.21,0.51,9.9,5 +7.6,0.26,0.32,1.3,0.048,23,76,0.9903,2.96,0.46,12,6 +7.5,0.16,0.38,12.7,0.043,70.5,163,0.99706,3.15,0.82,10.4,7 +6.5,0.36,0.16,1.3,0.054,11,107,0.99398,3.19,0.39,8.5,5 +6.6,0.35,0.19,10.5,0.06,15,82,0.99588,3.13,0.38,9.9,4 +5.7,0.25,0.26,12.5,0.049,52.5,120,0.99691,3.08,0.45,9.4,6 +7.4,0.37,0.26,9.6,0.05,33,134,0.99608,3.13,0.46,10.4,5 +5.7,0.25,0.21,1.5,0.044,21,108,0.99142,3.3,0.59,11,6 +5.8,0.23,0.21,1.5,0.044,21,110,0.99138,3.3,0.57,11,6 +5.4,0.265,0.28,7.8,0.052,27,91,0.99432,3.19,0.38,10.4,6 +5.7,0.25,0.27,10.8,0.05,58,116,0.99592,3.1,0.5,9.8,6 +5.7,0.25,0.26,12.5,0.049,52.5,106,0.99691,3.08,0.45,9.4,6 +5.9,0.23,0.28,8.6,0.046,37,142,0.99432,3.23,0.53,10.6,6 +6.2,0.3,0.32,1.2,0.052,32,185,0.99266,3.28,0.44,10.1,5 +6.5,0.33,0.24,14.5,0.048,20,96,0.99456,3.06,0.3,11.5,8 +7.4,0.26,0.29,3.7,0.048,14,73,0.9915,3.06,0.45,11.4,6 +7,0.2,0.4,1.1,0.058,30,93,0.99322,3.03,0.38,9.2,6 +6.5,0.21,0.42,1.1,0.059,33,101,0.9927,3.12,0.38,9.7,6 +7.3,0.25,0.27,3.8,0.047,16,79,0.99173,3.07,0.46,11.3,6 +6.8,0.27,0.24,4.6,0.098,36,127,0.99412,3.15,0.49,9.6,6 +6.7,0.24,0.3,10.2,0.07,44,179,0.99666,2.86,0.46,8.9,6 +6.4,0.14,0.28,7.9,0.057,21,82,0.99425,3.26,0.36,10,6 +6.4,0.5,0.2,2.4,0.059,19,112,0.99314,3.18,0.4,9.2,6 +6.6,0.41,0.27,10.7,0.11,20,103,0.99672,3.08,0.41,9,6 +6.4,0.25,0.28,4.9,0.03,29,98,0.99024,3.09,0.58,12.8,7 +6.6,0.41,0.27,10.7,0.11,20,103,0.99672,3.08,0.41,9,6 +8,0.25,0.35,1.1,0.054,13,136,0.99366,3.08,0.55,9.5,5 +6.4,0.14,0.28,7.9,0.057,21,82,0.99425,3.26,0.36,10,6 +6.6,0.21,0.34,5.6,0.046,30,140,0.99299,3.22,0.38,11,5 +6.4,0.5,0.2,2.4,0.059,19,112,0.99314,3.18,0.4,9.2,6 +6.3,0.29,0.23,14.2,0.037,24,99,0.99528,3.08,0.38,10.6,6 +6.9,0.37,0.23,9.5,0.057,54,166,0.99568,3.23,0.42,10,5 +6.9,0.37,0.23,9.5,0.057,54,166,0.99568,3.23,0.42,10,5 +5.7,0.31,0.28,4.1,0.03,22,86,0.99062,3.31,0.38,11.7,7 +6.9,0.45,0.27,4.7,0.035,17,80,0.99058,3.12,0.36,12.5,7 +6.9,0.3,0.45,1.4,0.039,36,122,0.99059,3.07,0.47,11.1,7 +5.3,0.23,0.56,0.9,0.041,46,141,0.99119,3.16,0.62,9.7,5 +6.8,0.3,0.26,20.3,0.037,45,150,0.99727,3.04,0.38,12.3,6 +6.7,0.28,0.42,3.5,0.035,43,105,0.99021,3.18,0.38,12.2,6 +5,0.255,0.22,2.7,0.043,46,153,0.99238,3.75,0.76,11.3,6 +7.6,0.4,0.27,1.2,0.053,23,193,0.99164,3.22,0.38,11.6,5 +5.5,0.21,0.25,1.2,0.04,18,75,0.99006,3.31,0.56,11.3,6 +6,0.2,0.25,2,0.041,30,95,0.99078,3.27,0.56,11.1,6 +6.1,0.17,0.29,1.1,0.041,32,92,0.99036,3.26,0.57,11.2,6 +7.5,0.21,0.29,1.5,0.046,35,107,0.99123,3.15,0.45,11.3,6 +7.3,0.26,0.32,1.2,0.041,29,94,0.98978,3.07,0.45,12,6 +6.2,0.35,0.2,18.1,0.069,33,158,0.99908,3.15,0.5,8.8,6 +6.2,0.35,0.2,18.1,0.069,33,158,0.99908,3.15,0.5,8.8,6 +6.5,0.43,0.31,3.6,0.046,19,143,0.99022,3.15,0.34,12,8 +6.5,0.4,0.31,3.5,0.046,22,147,0.99024,3.15,0.31,12,7 +7.4,0.28,0.5,12.1,0.049,48,122,0.9973,3.01,0.44,9,5 +6.3,0.23,0.22,17.45,0.054,42,151,0.99853,3.12,0.6,9.3,6 +6.2,0.34,0.25,12.1,0.059,33,171,0.99769,3.14,0.56,8.7,6 +6.6,0.44,0.32,3,0.095,13,75,0.98954,3.1,0.63,12.8,6 +6,0.13,0.36,1.6,0.052,23,72,0.98974,3.1,0.5,11.5,7 +6.3,0.17,0.23,5.7,0.048,44,147,0.99382,3.08,0.54,10,5 +6.3,0.18,0.22,5.6,0.047,45,147,0.99383,3.09,0.54,10,5 +6.7,0.31,0.34,6.8,0.059,51,215,0.99538,3.33,0.56,10.3,5 +6.6,0.33,0.32,15.6,0.054,62,227,0.99734,3.25,0.56,10.4,5 +6.3,0.34,0.31,6,0.02,18,68,0.98981,3.22,0.29,13.4,7 +6.8,0.29,0.32,1.8,0.032,18,130,0.99095,3.05,0.62,11.2,6 +7.4,0.31,0.26,8.6,0.048,47,206,0.9964,3.26,0.36,9.1,5 +7.4,0.31,0.26,8.6,0.048,47,206,0.9964,3.26,0.36,9.1,5 +5.7,0.25,0.27,11.5,0.04,24,120,0.99411,3.33,0.31,10.8,6 +6.8,0.27,0.28,7.8,0.038,26,89,0.9915,3.24,0.34,12.5,6 +5.9,0.26,0.24,2.4,0.046,27,132,0.99234,3.63,0.73,11.3,5 +5.9,0.65,0.23,5,0.035,20,128,0.99016,3.46,0.48,12.8,6 +7.4,0.31,0.26,8.6,0.048,47,206,0.9964,3.26,0.36,9.1,5 +6.6,0.23,0.32,1.5,0.041,8,72,0.98949,3.22,0.39,12.7,6 +6.8,0.18,0.35,5.4,0.054,53,143,0.99287,3.1,0.54,11,7 +6.8,0.28,0.29,11.9,0.052,51,149,0.99544,3.02,0.58,10.4,6 +6.8,0.28,0.29,11.9,0.052,51,149,0.99544,3.02,0.58,10.4,6 +5.9,0.27,0.27,9,0.051,43,136,0.9941,3.25,0.53,10.7,6 +6.1,0.25,0.28,10,0.055,56,131,0.994,3.22,0.35,10.9,6 +6.8,0.28,0.29,11.9,0.052,51,149,0.99544,3.02,0.58,10.4,6 +6.8,0.26,0.29,11.9,0.052,54,160,0.99546,3.03,0.58,10.4,6 +7.1,0.13,0.29,15.5,0.064,56,115.5,0.99737,3.16,0.41,9.7,7 +6.8,0.18,0.35,5.4,0.054,53,143,0.99287,3.1,0.54,11,7 +6.2,0.2,0.25,15,0.055,8,120,0.99767,3.19,0.53,9.6,6 +5.8,0.24,0.28,1.4,0.038,40,76,0.98711,3.1,0.29,13.9,7 +7.6,0.48,0.31,9.4,0.046,6,194,0.99714,3.07,0.61,9.4,5 +7.4,0.26,0.32,3.7,0.032,29,193,0.99134,3.1,0.67,12.5,6 +6.2,0.2,0.25,15,0.055,8,120,0.99767,3.19,0.53,9.6,6 +6.1,0.3,0.47,1.4,0.049,50,187,0.9927,3.19,0.45,9.5,5 +6.2,0.32,0.5,6.5,0.048,61,186,0.9948,3.19,0.45,9.6,5 +6.1,0.3,0.47,1.4,0.049,50,187,0.9927,3.19,0.45,9.5,5 +6.3,0.34,0.52,6.3,0.047,63,186,0.99481,3.18,0.44,9.6,5 +7.4,0.16,0.3,13.7,0.056,33,168,0.99825,2.9,0.44,8.7,7 +7.4,0.16,0.3,13.7,0.056,33,168,0.99825,2.9,0.44,8.7,7 +7.4,0.16,0.3,13.7,0.056,33,168,0.99825,2.9,0.44,8.7,7 +7.4,0.16,0.3,13.7,0.056,33,168,0.99825,2.9,0.44,8.7,7 +7.4,0.16,0.3,13.7,0.056,33,168,0.99825,2.9,0.44,8.7,7 +7.4,0.16,0.3,13.7,0.056,33,168,0.99825,2.9,0.44,8.7,7 +7.2,0.26,0.38,1.5,0.061,12,120,0.99192,3.18,0.46,10.4,5 +7,0.31,0.35,1.6,0.063,13,119,0.99184,3.22,0.5,10.7,5 +6.6,0.22,0.35,1.4,0.05,23,83,0.99019,3.17,0.48,12,7 +5.8,0.23,0.31,3.5,0.044,35,158,0.98998,3.19,0.37,12.1,7 +6.3,0.17,0.32,1,0.04,39,118,0.98886,3.31,0.4,13.1,8 +6,0.19,0.26,1.4,0.039,30,104,0.98998,3.32,0.41,12.4,6 +6.7,0.21,0.34,1.5,0.035,45,123,0.98949,3.24,0.36,12.6,7 +7.4,0.16,0.3,13.7,0.056,33,168,0.99825,2.9,0.44,8.7,7 +6.6,0.22,0.37,1.6,0.04,31,101,0.99009,3.15,0.66,12,5 +6.8,0.34,0.27,5.2,0.06,14,169,0.99252,3.27,0.57,11.6,6 +7.1,0.34,0.86,1.4,0.174,36,99,0.99288,2.92,0.5,9.3,5 +6.3,0.24,0.22,11.9,0.05,65,179,0.99659,3.06,0.58,9.3,6 +6.9,0.35,0.39,2.4,0.048,25,157,0.99133,3.2,0.54,11.1,7 +6.8,0.24,0.33,3.2,0.049,68,161,0.99324,3.1,0.69,10.2,6 +6.4,0.25,0.33,1.7,0.037,35,113,0.99164,3.23,0.66,10.6,6 +5.8,0.19,0.33,4.2,0.038,49,133,0.99107,3.16,0.42,11.3,7 +6.9,0.24,0.4,15.4,0.052,81,198,0.9986,3.2,0.69,9.4,5 +6.5,0.31,0.61,13,0.053,31,123,0.99708,3.09,0.5,9.3,6 +6.6,0.25,0.32,5.6,0.039,15,68,0.99163,2.96,0.52,11.1,6 +7.5,0.38,0.56,9.7,0.055,15,170,0.99605,3.13,0.65,9.9,6 +6.2,0.3,0.3,2.5,0.041,29,82,0.99065,3.31,0.61,11.8,7 +6.4,0.33,0.28,4,0.04,24,81,0.9903,3.26,0.64,12.6,7 +6.9,0.24,0.4,15.4,0.052,81,198,0.9986,3.2,0.69,9.4,5 +7.6,0.27,0.32,1.2,0.043,23,72,0.99236,3.06,0.68,10.5,5 +5.9,0.24,0.34,2,0.037,40,108,0.98948,3.19,0.5,12.3,6 +5.3,0.33,0.3,1.2,0.048,25,119,0.99045,3.32,0.62,11.3,6 +6.4,0.21,0.21,5.1,0.097,21,105,0.9939,3.07,0.46,9.6,5 +7,0.22,0.3,1.4,0.04,14,63,0.98985,3.2,0.33,12,6 +7.8,0.27,0.35,1.2,0.05,36,140,0.99138,3.09,0.45,11.2,5 +6.7,0.2,0.24,6.5,0.044,28,100,0.99348,3.12,0.33,10.2,6 +8.1,0.27,0.33,1.3,0.045,26,100,0.99066,2.98,0.44,12.4,6 +6.7,0.2,0.24,6.5,0.044,28,100,0.99348,3.12,0.33,10.2,6 +7.1,0.45,0.24,2.7,0.04,24,87,0.98862,2.94,0.38,13.4,8 +5.8,0.22,0.29,1.3,0.036,25,68,0.98865,3.24,0.35,12.6,6 +6.3,0.3,0.48,7.4,0.053,34,149,0.99472,3.18,0.53,9.8,5 +7.9,0.36,0.53,12.9,0.049,63,139,0.99792,2.94,0.45,9.1,5 +8.1,0.27,0.33,1.3,0.045,26,100,0.99066,2.98,0.44,12.4,6 +8,0.24,0.33,1.2,0.044,28,101,0.99035,3.03,0.43,12.5,6 +6.7,0.41,0.27,2.6,0.033,25,85,0.99086,3.05,0.34,11.7,6 +6.7,0.24,0.31,2.3,0.044,37,113,0.99013,3.29,0.46,12.9,6 +6.2,0.3,0.32,1.3,0.054,27,183,0.99266,3.3,0.43,10.1,5 +6.9,0.26,0.38,10.5,0.044,33,139,0.99517,3.06,0.5,10.3,6 +6.7,0.41,0.27,2.6,0.033,25,85,0.99086,3.05,0.34,11.7,6 +5.9,0.32,0.2,14.4,0.05,29,144,0.99666,3.24,0.41,10.3,6 +6.1,0.25,0.3,1.2,0.036,42,107,0.991,3.34,0.56,10.8,7 +5.6,0.23,0.29,3.1,0.023,19,89,0.99068,3.25,0.51,11.2,6 +6.6,0.23,0.32,1.7,0.024,26,102,0.99084,3.29,0.6,11.8,6 +6,0.17,0.21,6,0.05,26,134,0.9939,3.08,0.54,9.8,6 +7.1,0.38,0.42,11.8,0.041,32,193,0.99624,3.04,0.49,10,6 +6.6,0.31,0.37,6.2,0.052,13,164,0.99602,3.24,0.39,8.8,4 +6.5,0.38,0.53,1.4,0.142,5,69,0.9926,3.14,0.52,10.1,4 +7,0.44,0.24,12.1,0.056,68,210,0.99718,3.05,0.5,9.5,5 +7,0.44,0.24,12.1,0.056,68,210,0.99718,3.05,0.5,9.5,5 +7,0.44,0.24,12.1,0.056,68,210,0.99718,3.05,0.5,9.5,5 +6.1,0.38,0.14,3.9,0.06,27,113,0.99344,3.07,0.34,9.2,4 +8,0.33,0.32,4.6,0.041,31,180,0.99184,2.92,0.74,12.2,6 +7,0.44,0.24,12.1,0.056,68,210,0.99718,3.05,0.5,9.5,5 +6,0.19,0.29,1.2,0.046,29,92,0.99033,3.22,0.53,11.3,6 +6.3,0.28,0.34,8.1,0.038,44,129,0.99248,3.26,0.29,12.1,6 +6.1,0.38,0.14,3.9,0.06,27,113,0.99344,3.07,0.34,9.2,4 +5.3,0.43,0.11,1.1,0.029,6,51,0.99076,3.51,0.48,11.2,4 +5.4,0.22,0.35,6.5,0.029,26,87,0.99092,3.29,0.44,12.5,7 +6.2,0.345,0.27,10.1,0.056,38,187,0.99486,3.31,0.56,10.6,5 +5.6,0.255,0.57,10.7,0.056,66,171,0.99464,3.25,0.61,10.4,7 +5.2,0.2,0.27,3.2,0.047,16,93,0.99235,3.44,0.53,10.1,7 +6.2,0.29,0.23,12.4,0.048,33,201,0.99612,3.11,0.56,9.9,6 +6.3,0.26,0.25,5.2,0.046,11,133,0.99202,2.97,0.68,11,6 +6,0.22,0.23,5,0.045,10,122,0.99261,2.94,0.63,10,6 +7.5,0.35,0.37,2.5,0.066,29,89,0.98964,3.14,0.42,12.7,6 +6.6,0.39,0.28,9.2,0.036,10,92,0.99206,3.07,0.35,12.1,6 +6.3,0.23,0.33,6.9,0.052,23,118,0.9938,3.23,0.46,10.4,6 +6.3,0.22,0.3,2,0.05,23,120,0.99204,3.24,0.47,10.4,6 +6.4,0.29,0.18,15,0.04,21,116,0.99736,3.14,0.5,9.2,5 +6.4,0.29,0.18,15,0.04,21,116,0.99736,3.14,0.5,9.2,5 +7.5,0.23,0.3,1.2,0.03,27,80,0.99192,3.05,0.68,10.5,5 +6.4,0.29,0.18,15,0.04,21,116,0.99736,3.14,0.5,9.2,5 +5.7,0.28,0.36,1.8,0.041,38,90,0.99002,3.27,0.98,11.9,7 +6.5,0.26,0.24,10.8,0.042,47,130,0.996,3.08,0.4,10.1,6 +6.4,0.27,0.29,3.9,0.034,62,140,0.99237,3.1,0.59,11.1,6 +5.9,0.22,0.29,4.2,0.037,69,144,0.99214,3.13,0.74,10.8,7 +6.8,0.26,0.26,2,0.019,23.5,72,0.99041,3.16,0.47,11.8,6 +7.6,0.36,0.48,13.5,0.038,44,116,0.9982,3.04,0.48,9.2,5 +7.6,0.35,0.47,13.3,0.037,42,116,0.99822,3.04,0.5,9.2,5 +5.7,0.18,0.26,2.2,0.023,21,95,0.9893,3.07,0.54,12.3,6 +6.6,0.36,0.47,1.4,0.145,26,124,0.99274,3.09,0.56,10.1,6 +5.9,0.14,0.2,1.6,0.04,26,114,0.99105,3.25,0.45,11.4,6 +5.5,0.23,0.19,2.2,0.044,39,161,0.99209,3.19,0.43,10.4,6 +6.7,0.11,0.26,14.8,0.053,44,95,0.99676,3.2,0.35,9.8,6 +7,0.24,0.24,1.8,0.047,29,91,0.99251,3.3,0.43,9.9,6 +6.7,0.11,0.26,14.8,0.053,44,95,0.99676,3.2,0.35,9.8,6 +5.3,0.47,0.1,1.3,0.036,11,74,0.99082,3.48,0.54,11.2,4 +7.5,0.29,0.24,9.9,0.058,25,115,0.99567,3.15,0.46,10.9,5 +6,0.33,0.26,5.1,0.051,16,119,0.99416,3.15,0.41,9.2,5 +6,0.33,0.26,5.1,0.051,16,119,0.99416,3.15,0.41,9.2,5 +5.8,0.32,0.23,1.5,0.033,39,121,0.9887,2.96,0.35,12,5 +5.8,0.3,0.23,1.5,0.034,37,121,0.98871,2.96,0.34,12.1,6 +3.8,0.31,0.02,11.1,0.036,20,114,0.99248,3.75,0.44,12.4,6 +6.2,0.36,0.22,5.25,0.038,44,145,0.99184,3.22,0.4,11.2,6 +6,0.31,0.27,2.3,0.042,19,120,0.98952,3.32,0.41,12.7,7 +6.9,0.52,0.54,7.9,0.036,23,169,0.99267,3.26,0.47,12.2,6 +7,0.55,0.05,8,0.036,19,164,0.99269,3.26,0.46,12.2,6 +5.8,0.2,0.16,1.4,0.042,44,99,0.98912,3.23,0.37,12.2,6 +6.2,0.36,0.22,5.25,0.038,44,145,0.99184,3.22,0.4,11.2,6 +6,0.31,0.27,2.3,0.042,19,120,0.98952,3.32,0.41,12.7,7 +6,0.29,0.27,2.3,0.044,20,117,0.9895,3.31,0.41,12.7,7 +5.7,0.22,0.29,3.5,0.04,27,146,0.98999,3.17,0.36,12.1,6 +7.1,0.46,0.23,13.7,0.045,44,192,0.9981,3.11,0.53,9.4,5 +6.6,0.21,0.3,9.9,0.041,64,174,0.995,3.07,0.5,10.1,6 +6.9,0.42,0.2,15.4,0.043,57,201,0.99848,3.08,0.54,9.4,5 +5.7,0.22,0.2,16,0.044,41,113,0.99862,3.22,0.46,8.9,6 +5.7,0.22,0.2,16,0.044,41,113,0.99862,3.22,0.46,8.9,6 +5.7,0.22,0.2,16,0.044,41,113,0.99862,3.22,0.46,8.9,6 +5.7,0.22,0.2,16,0.044,41,113,0.99862,3.22,0.46,8.9,6 +5.2,0.31,0.2,2.4,0.027,27,117,0.98886,3.56,0.45,13,7 +7.2,0.22,0.35,5.5,0.054,37,183,0.99474,3.08,0.5,10.3,5 +5.6,0.18,0.29,2.3,0.04,5,47,0.99126,3.07,0.45,10.1,4 +6.2,0.24,0.27,16.8,0.04,48,129,0.99691,3.23,0.38,10.5,6 +5.7,0.22,0.2,16,0.044,41,113,0.99862,3.22,0.46,8.9,6 +5.7,0.26,0.24,17.8,0.059,23,124,0.99773,3.3,0.5,10.1,5 +5.7,0.26,0.24,17.8,0.059,23,124,0.99773,3.3,0.5,10.1,5 +6,0.2,0.26,6.8,0.049,22,93,0.9928,3.15,0.42,11,6 +6,0.2,0.26,6.8,0.049,22,93,0.9928,3.15,0.42,11,6 +6,0.2,0.26,6.8,0.049,22,93,0.9928,3.15,0.42,11,6 +6,0.2,0.26,6.8,0.049,22,93,0.9928,3.15,0.42,11,6 +7.6,0.28,0.17,1.6,0.046,28,117,0.99288,3.08,0.43,10,5 +7,0.2,0.33,4.7,0.03,25,76,0.99202,2.88,0.54,10.5,6 +6.6,0.26,0.27,11.8,0.048,28,112,0.99606,2.87,0.49,9.7,6 +5.7,0.26,0.24,17.8,0.059,23,124,0.99773,3.3,0.5,10.1,5 +7.2,0.21,0.36,15.7,0.045,68,183,0.99922,3.25,0.76,9.4,5 +6.9,0.22,0.32,5.8,0.041,20,119,0.99296,3.17,0.55,11.2,6 +7.2,0.21,0.36,15.7,0.045,68,183,0.99922,3.25,0.76,9.4,5 +7.4,0.22,0.28,9,0.046,22,121,0.99468,3.1,0.55,10.8,5 +7.2,0.21,0.36,15.7,0.045,68,183,0.99922,3.25,0.76,9.4,5 +6.9,0.22,0.32,5.8,0.041,20,119,0.99296,3.17,0.55,11.2,6 +7,0.2,0.35,8.8,0.037,31,103,0.99388,3.13,0.49,11,6 +5.6,0.26,0,10.2,0.038,13,111,0.99315,3.44,0.46,12.4,6 +6.3,0.28,0.3,6.6,0.208,60,154,0.99478,3.1,0.4,9.4,6 +6.4,0.29,0.3,6.5,0.209,62,156,0.99478,3.1,0.4,9.4,5 +7.2,0.34,0.23,8.9,0.105,22,155,0.99692,3.01,0.58,9.5,5 +7.1,0.39,0.39,11.1,0.034,25,204,0.99616,3.05,0.52,10,6 +6.9,0.26,0.29,4.2,0.043,33,114,0.9902,3.16,0.31,12.5,6 +6.1,0.24,0.25,1.6,0.044,24,115,0.9921,3.39,0.59,10.9,6 +5.9,0.25,0.24,7.4,0.044,21,113,0.99462,3.38,0.58,10.5,6 +6.1,0.24,0.27,11.5,0.05,51,133,0.99476,3.22,0.37,10.8,6 +6.5,0.22,0.27,1.6,0.039,36,116,0.99178,3.38,0.57,11,7 +6.2,0.26,0.29,2,0.036,16,87,0.99081,3.33,0.61,11.8,6 +6.6,0.34,0.25,4.8,0.038,16,121,0.99198,3.36,0.71,12.6,6 +5.6,0.225,0.24,9.8,0.054,59,140,0.99545,3.17,0.39,10.2,6 +7.1,0.23,0.28,1.9,0.046,33,103,0.98997,3.12,0.31,12,5 +6.9,0.26,0.29,4.2,0.043,33,114,0.9902,3.16,0.31,12.5,6 +6.4,0.27,0.3,1.6,0.04,19,86,0.99089,3.32,0.65,11.5,6 +6.3,0.41,0.22,7.3,0.035,23,117,0.99172,3.2,0.39,11.94,7 +6.7,0.41,0.24,5.4,0.035,33,115,0.9901,3.12,0.44,12.8933333333333,7 +7.2,0.585,0.2,10.4,0.086,17,94,0.99681,3.13,0.4,9.4,5 +6.7,0.34,0.26,1.9,0.038,58,138,0.9893,3,0.47,12.2,7 +6.3,0.41,0.22,7.3,0.035,23,117,0.99172,3.2,0.39,11.94,7 +6.7,0.41,0.24,5.4,0.035,33,115,0.9901,3.12,0.44,12.8933333333333,7 +6.4,0.26,0.35,7.7,0.056,45,191,0.99527,3.16,0.5,9.5,5 +6.3,0.28,0.22,11.5,0.036,27,150,0.99445,3,0.33,10.6,6 +7.4,0.16,0.33,1.2,0.042,47,121,0.99198,3.04,0.68,10.5,7 +8.4,0.27,0.3,2.2,0.037,36,129,0.99085,2.89,0.3,11.4666666666667,6 +5.9,0.2,0.28,1,0.043,45,100,0.99033,3.4,0.41,11.4,6 +6.4,0.24,0.26,8.2,0.054,47,182,0.99538,3.12,0.5,9.5,5 +7.4,0.38,0.34,8.3,0.052,44,168,0.99627,3.11,0.52,9.2,5 +6.4,0.24,0.26,8.2,0.054,47,182,0.99538,3.12,0.5,9.5,5 +6.4,0.42,0.19,9.3,0.043,28,145,0.99433,3.23,0.53,10.98,5 +6.4,0.23,0.26,8.1,0.054,47,181,0.9954,3.12,0.49,9.4,5 +6.4,0.24,0.26,8.2,0.054,47,182,0.99538,3.12,0.5,9.5,5 +7.4,0.38,0.34,8.3,0.052,44,168,0.99627,3.11,0.52,9.2,5 +7.3,0.19,0.27,13.9,0.057,45,155,0.99807,2.94,0.41,8.8,8 +7.3,0.19,0.27,13.9,0.057,45,155,0.99807,2.94,0.41,8.8,8 +7.3,0.19,0.27,13.9,0.057,45,155,0.99807,2.94,0.41,8.8,8 +7.3,0.19,0.27,13.9,0.057,45,155,0.99807,2.94,0.41,8.8,8 +7.3,0.19,0.27,13.9,0.057,45,155,0.99807,2.94,0.41,8.8,8 +7.3,0.19,0.27,13.9,0.057,45,155,0.99807,2.94,0.41,8.8,8 +7.3,0.19,0.27,13.9,0.057,45,155,0.99807,2.94,0.41,8.8,8 +6.8,0.24,0.29,2,0.044,15,96,0.99232,3.23,0.64,10.4,8 +7.3,0.19,0.27,13.9,0.057,45,155,0.99807,2.94,0.41,8.8,8 +7.4,0.27,0.52,15.7,0.054,36,139,0.99788,3.04,0.62,10.0333333333333,6 +5.7,0.28,0.35,1.2,0.052,39,141,0.99108,3.44,0.69,11.3,6 +5.8,0.22,0.25,1.5,0.024,21,109,0.99234,3.37,0.58,10.4,6 +6.7,0.27,0.69,1.2,0.176,36,106,0.99288,2.96,0.43,9.2,6 +7.1,0.2,0.35,3.2,0.034,21,107,0.99195,3.11,0.54,11.1,6 +6.7,0.27,0.69,1.2,0.176,36,106,0.99288,2.96,0.43,9.2,6 +7.1,0.23,0.3,2.6,0.034,62,148,0.99121,3.03,0.56,11.3,7 +7.6,0.31,0.52,13.2,0.042,61,148,0.99839,2.98,0.47,9.1,6 +7.2,0.34,0.28,10.4,0.108,43,187,0.99738,2.96,0.57,9.4,5 +7,0.36,0.25,5.7,0.015,14,73,0.98963,2.82,0.59,13.2,6 +6.4,0.31,0.28,2.5,0.039,34,137,0.98946,3.22,0.38,12.7,6 +7.3,0.28,0.35,1.6,0.054,31,148,0.99178,3.18,0.47,10.7,5 +7.4,0.16,0.3,1.4,0.064,34,166,0.99136,3.11,0.42,11.4333333333333,6 +6.4,0.31,0.27,7.4,0.049,48,169,0.99323,3.27,0.45,11.1,6 +6.4,0.31,0.28,2.5,0.039,34,137,0.98946,3.22,0.38,12.7,6 +6.2,0.29,0.29,5.6,0.046,35,178,0.99313,3.25,0.51,10.5333333333333,5 +5.9,0.28,0.34,3.6,0.04,50,194,0.9912,3.31,0.52,11.6,6 +6.5,0.23,0.2,7.5,0.05,44,179,0.99504,3.18,0.48,9.53333333333333,5 +7.2,0.34,0.2,5.8,0.062,52,203,0.99461,3.17,0.44,9.8,6 +7.3,0.28,0.35,1.6,0.054,31,148,0.99178,3.18,0.47,10.7,5 +6.5,0.2,0.33,1.5,0.039,36,110,0.99008,3.22,0.65,12,6 +6.2,0.24,0.27,2.9,0.039,30,123,0.98959,3.12,0.37,12.8,6 +7.1,0.31,0.25,11.2,0.048,32,136,0.99663,3.14,0.4,9.5,5 +6.4,0.29,0.21,9.65,0.041,36,119,0.99334,2.99,0.34,10.9333333333333,6 +6.3,0.19,0.33,10.1,0.063,63,133,0.99561,2.86,0.41,9.1,5 +5.9,0.29,0.28,3.2,0.035,16,117,0.98959,3.26,0.42,12.6,6 +7.1,0.31,0.25,11.2,0.048,32,136,0.99663,3.14,0.4,9.5,5 +6.5,0.3,0.28,11.45,0.041,29,109,0.99418,2.98,0.3,10.9,6 +6.4,0.29,0.21,9.65,0.041,36,119,0.99334,2.99,0.34,10.9333333333333,6 +6.5,0.22,0.19,4.5,0.096,16,115,0.9937,3.02,0.44,9.6,5 +7,0.23,0.28,2.7,0.053,16,92,0.99372,3.18,0.56,9.3,5 +7.1,0.23,0.23,3.5,0.038,23,112,0.99157,3.05,0.37,11.3666666666667,6 +6.1,0.26,0.28,1.7,0.043,24,98,0.98918,3.14,0.44,12.5,6 +6.4,0.35,0.21,2.1,0.051,46,171,0.9932,3.16,0.5,9.5,5 +6,0.32,0.32,4.8,0.041,40,186,0.99235,3.22,0.54,11,6 +6.1,0.34,0.21,5,0.042,17,133,0.99373,3.02,0.53,9.4,5 +6.5,0.13,0.27,2.6,0.035,32,76,0.9914,3.21,0.76,11.3333333333333,6 +6.5,0.315,0.2,6.6,0.041,9,126,0.99494,2.94,0.51,8.8,5 +6.1,0.34,0.21,5,0.042,17,133,0.99373,3.02,0.53,9.4,5 +5.7,0.31,0.29,7.3,0.05,33,143,0.99332,3.31,0.5,11.0666666666667,6 +6.4,0.3,0.27,5,0.058,27,151,0.99198,3.22,0.49,12.2,6 +7,0.24,0.26,1.7,0.041,31,110,0.99142,3.2,0.53,11,6 +6.5,0.13,0.27,2.6,0.035,32,76,0.9914,3.21,0.76,11.3333333333333,6 +6.4,0.26,0.21,8.2,0.05,51,182,0.99542,3.23,0.48,9.5,5 +6.4,0.26,0.21,8.2,0.05,51,182,0.99542,3.23,0.48,9.5,5 +6,0.27,0.31,5,0.043,54,170,0.9924,3.28,0.52,11,6 +7.1,0.21,0.33,1.2,0.039,34,97,0.99112,3.11,0.75,11.2,6 +6.7,0.26,0.29,7.1,0.036,28,100,0.99534,3.08,0.36,9.3,6 +6.3,0.28,0.22,9.5,0.04,30,111,0.99338,3.05,0.31,10.8,4 +6.2,0.25,0.44,15.8,0.057,39,167,0.99804,3.14,0.51,9.2,5 +7.3,0.22,0.37,15.5,0.048,70,203,0.99899,3.25,0.77,9.4,5 +6.2,0.25,0.44,15.8,0.057,39,167,0.99804,3.14,0.51,9.2,5 +6.4,0.18,0.28,17.05,0.047,53,139,0.99724,3.25,0.35,10.5,6 +6.3,0.2,0.26,12.7,0.046,60,143,0.99526,3.26,0.35,10.8,6 +6.6,0.24,0.22,12.3,0.051,35,146,0.99676,3.1,0.67,9.4,5 +7.4,0.27,0.26,11.8,0.053,55,173,0.99699,3.11,0.6,9.8,5 +7.4,0.27,0.26,11.8,0.053,55,173,0.99699,3.11,0.6,9.8,5 +7.4,0.27,0.26,11.8,0.053,55,173,0.99699,3.11,0.6,9.8,5 +6.6,0.24,0.22,12.3,0.051,35,146,0.99676,3.1,0.67,9.4,5 +7.4,0.27,0.26,11.8,0.053,55,173,0.99699,3.11,0.6,9.8,5 +7.1,0.38,0.29,13.6,0.041,30,137,0.99461,3.02,0.96,12.1,6 +6.8,0.43,0.26,5.2,0.043,40,176,0.99116,3.17,0.41,12.3,6 +5.2,0.22,0.46,6.2,0.066,41,187,0.99362,3.19,0.42,9.73333333333333,5 +5.9,0.29,0.16,7.9,0.044,48,197,0.99512,3.21,0.36,9.4,5 +5.9,0.29,0.16,7.9,0.044,48,197,0.99512,3.21,0.36,9.4,5 +6.3,0.29,0.29,3.3,0.037,32,140,0.9895,3.17,0.36,12.8,7 +6.3,0.19,0.32,2.8,0.046,18,80,0.99043,2.92,0.47,11.05,6 +5.7,0.29,0.16,7.9,0.044,48,197,0.99512,3.21,0.36,9.4,5 +6.3,0.29,0.29,3.3,0.037,32,140,0.9895,3.17,0.36,12.8,7 +5.7,0.24,0.47,6.3,0.069,35,182,0.99391,3.11,0.46,9.73333333333333,5 +5.8,0.3,0.38,4.9,0.039,22,86,0.98963,3.23,0.58,13.1,7 +7.1,0.27,0.27,10.4,0.041,26,114,0.99335,3.04,0.52,11.5,7 +5.8,0.3,0.38,4.9,0.039,22,86,0.98963,3.23,0.58,13.1,7 +7.1,0.27,0.27,10.4,0.041,26,114,0.99335,3.04,0.52,11.5,7 +6.3,0.305,0.22,16,0.061,26,141,0.99824,3.08,0.5,9.1,5 +5.7,0.24,0.47,6.3,0.069,35,182,0.99391,3.11,0.46,9.75,5 +6.2,0.22,0.28,2.2,0.04,24,125,0.9917,3.19,0.48,10.5,6 +6.6,0.22,0.23,17.3,0.047,37,118,0.99906,3.08,0.46,8.8,6 +6.6,0.22,0.23,17.3,0.047,37,118,0.99906,3.08,0.46,8.8,6 +6.6,0.22,0.23,17.3,0.047,37,118,0.99906,3.08,0.46,8.8,6 +6.6,0.22,0.23,17.3,0.047,37,118,0.99906,3.08,0.46,8.8,6 +6.2,0.22,0.28,2.2,0.04,24,125,0.9917,3.19,0.48,10.5,6 +6.2,0.22,0.28,2.2,0.04,24,125,0.9917,3.19,0.48,10.5,6 +6.6,0.22,0.23,17.3,0.047,37,118,0.99906,3.08,0.46,8.8,6 +6.1,0.22,0.5,6.6,0.045,30,122,0.99415,3.22,0.49,9.9,6 +6.2,0.21,0.52,6.5,0.047,28,123,0.99418,3.22,0.49,9.9,6 +6.3,0.32,0.26,12.3,0.044,24,205,0.99611,3.11,0.58,9.9,5 +6.9,0.44,0.27,5,0.038,33,166,0.99124,3.2,0.42,12.2,6 +6.1,0.31,0.34,2.8,0.042,59.5,162,0.99179,3.27,0.47,10.8,6 +8.1,0.36,0.59,13.6,0.051,60,134,0.99886,2.96,0.39,8.7,5 +6.6,0.38,0.28,2.8,0.043,17,67,0.98924,3.21,0.47,13.2,6 +6.7,0.24,0.26,5.4,0.03,15,94,0.99045,3.15,0.38,12.7,6 +6.9,0.56,0.26,10.9,0.06,55,193,0.9969,3.21,0.44,9.4,5 +7.2,0.24,0.24,1.7,0.045,18,161,0.99196,3.25,0.53,11.2,6 +6.5,0.29,0.3,9.15,0.051,25,166,0.99339,3.24,0.56,11.3333333333333,6 +6.7,0.28,0.28,4.5,0.051,14,92,0.99224,3.36,0.58,11.9,6 +6.5,0.29,0.3,9.15,0.051,25,166,0.99339,3.24,0.56,11.35,6 +6.1,0.21,0.19,1.4,0.046,51,131,0.99184,3.22,0.39,10.5,5 +7.2,0.47,0.16,5.9,0.048,14,125,0.99428,3.09,0.49,9.8,5 +6.7,0.34,0.31,16.4,0.051,20,146,0.99834,3.06,0.54,9.1,5 +6.6,0.27,0.25,1.2,0.033,36,111,0.98918,3.16,0.37,12.4,6 +6.7,0.34,0.31,16.4,0.051,20,146,0.99834,3.06,0.54,9.1,5 +7.2,0.47,0.16,5.9,0.048,14,125,0.99428,3.09,0.49,9.8,5 +5,0.35,0.25,7.8,0.031,24,116,0.99241,3.39,0.4,11.3,6 +5,0.35,0.25,7.8,0.031,24,116,0.99241,3.39,0.4,11.3,6 +4.4,0.46,0.1,2.8,0.024,31,111,0.98816,3.48,0.34,13.1,6 +6.6,0.38,0.29,2.9,0.035,15,101,0.98916,3.04,0.37,12.5,6 +7.3,0.3,0.25,2.5,0.045,32,122,0.99329,3.18,0.54,10.3,5 +6.4,0.28,0.22,12.8,0.039,51,150,0.99535,3.23,0.43,10.7,6 +6.9,0.29,0.25,12.2,0.04,29,136,0.99552,3.05,0.65,10.4,6 +6.3,0.3,0.19,7.7,0.049,47,184,0.99514,3.22,0.48,9.5,5 +6.3,0.39,0.22,2.8,0.048,53,173,0.99304,3.24,0.45,9.8,5 +6.6,0.38,0.29,2.9,0.035,15,101,0.98916,3.04,0.37,12.5,6 +6.6,0.18,0.26,17.3,0.051,17,149,0.9984,3,0.43,9.4,6 +6,0.28,0.29,19.3,0.051,36,174,0.99911,3.14,0.5,9,5 +6,0.28,0.29,19.3,0.051,36,174,0.99911,3.14,0.5,9,5 +6,0.28,0.29,19.3,0.051,36,174,0.99911,3.14,0.5,9,5 +6.6,0.35,0.26,2.7,0.045,19,129,0.98952,3.24,0.48,13,7 +5.9,0.22,0.18,6.4,0.041,28,120,0.99403,3.27,0.5,9.9,5 +6.6,0.18,0.26,17.3,0.051,17,149,0.9984,3,0.43,9.4,6 +7.7,0.28,0.24,2.4,0.044,29,157,0.99312,3.27,0.56,10.6,6 +7.1,0.42,0.2,2.8,0.038,28,109,0.98968,3.23,0.47,13.4,6 +6.7,0.32,0.32,1.7,0.031,31,114,0.98946,3.12,0.35,12.5,6 +6.6,0.26,0.56,15.4,0.053,32,141,0.9981,3.11,0.49,9.3,5 +6.6,0.26,0.56,15.4,0.053,32,141,0.9981,3.11,0.49,9.3,5 +6.2,0.32,0.24,4.1,0.051,34,149,0.99306,3.36,0.52,11,5 +6.3,0.25,0.27,6.6,0.054,40,158,0.99378,3.2,0.48,10.3,5 +6.2,0.21,0.24,1.2,0.051,31,95,0.99036,3.24,0.57,11.3,6 +6.4,0.23,0.27,2.1,0.042,35,100,0.99094,3.03,0.63,10.9,6 +4.7,0.145,0.29,1,0.042,35,90,0.9908,3.76,0.49,11.3,6 +6.2,0.2,0.28,1.1,0.039,24,78,0.9899,3.36,0.47,12.1,6 +7,0.28,0.28,1.4,0.039,12,83,0.99173,3.18,0.65,11.1,5 +7.1,0.36,0.2,1.6,0.271,24,140,0.99356,3.11,0.63,9.8,5 +5.7,0.25,0.22,9.8,0.049,50,125,0.99571,3.2,0.45,10.1,6 +5.7,0.22,0.33,1.9,0.036,37,110,0.98945,3.26,0.58,12.4,6 +6,0.2,0.38,1.3,0.034,37,104,0.98865,3.11,0.52,12.7,6 +6.4,0.32,0.26,7.9,0.05,53,180,0.99514,3.14,0.5,9.6,5 +6.4,0.32,0.26,7.9,0.05,53,180,0.99514,3.14,0.5,9.6,5 +6,0.555,0.26,4.5,0.053,17,126,0.9943,3.24,0.46,9.1,5 +5.9,0.22,0.45,22.6,0.12,55,122,0.99636,3.1,0.35,12.8,5 +6.4,0.32,0.26,7.9,0.05,53,180,0.99514,3.14,0.5,9.6,5 +6.2,0.3,0.33,3.5,0.037,37,155,0.98987,3.18,0.37,12.4,6 +5.8,0.28,0.18,1.2,0.058,7,108,0.99288,3.23,0.58,9.55,4 +5.8,0.555,0.26,4.5,0.053,17,126,0.9943,3.24,0.46,9.1,5 +6.7,0.31,0.33,2,0.033,12,74,0.99064,3.29,0.65,12.5,6 +6.4,0.15,0.25,7.8,0.05,13,68,0.99394,3.16,0.4,9.9,6 +6.4,0.13,0.28,0.9,0.045,32,87,0.99175,3.47,0.52,11.2,6 +6.7,0.48,0.49,2.9,0.03,28,122,0.98926,3.13,0.4,13,6 +6.7,0.48,0.49,2.9,0.03,28,122,0.98926,3.13,0.4,13,6 +5.8,0.3,0.33,3.5,0.033,25,116,0.99057,3.2,0.44,11.7,6 +6.1,0.28,0.23,4.2,0.038,13,95,0.98898,2.97,0.7,13.1,6 +6,0.19,0.37,9.7,0.032,17,50,0.9932,3.08,0.66,12,6 +6.8,0.31,0.25,10.5,0.043,30,165,0.9972,3.36,0.55,10.55,6 +7.5,0.24,0.29,1.1,0.046,34,84,0.9902,3.04,0.39,11.45,6 +6.8,0.23,0.39,16.1,0.053,71,194,0.9988,3.18,0.64,10.2,6 +7.5,0.24,0.29,1.1,0.046,34,84,0.9902,3.04,0.39,11.45,6 +6.3,0.29,0.3,8.1,0.212,60,150,0.9958,3.1,0.4,9.3,5 +6.8,0.2,0.25,6.2,0.052,22,106,0.9935,3.09,0.54,10.8,5 +5.2,0.38,0.26,7.7,0.053,20,103,0.9925,3.27,0.45,12.2,6 +7.8,0.27,0.33,2.4,0.053,36,175,0.992,3.2,0.55,11,6 +6.6,0.54,0.21,16.3,0.055,41,182,0.9986,3.35,0.54,10.4,6 +7.1,0.25,0.31,2.3,0.05,32,156,0.9914,3.14,0.51,11.4,6 +5.8,0.61,0.01,8.4,0.041,31,104,0.9909,3.26,0.72,14.05,7 +6.5,0.32,0.23,8.5,0.051,20,138,0.9943,3.03,0.42,10.7,5 +6.4,0.28,0.23,6,0.051,50,162,0.994,3.15,0.52,10.2,5 +6.6,0.19,0.28,1.1,0.044,38,100,0.9904,3.22,0.69,11.2,6 +5.1,0.305,0.13,1.75,0.036,17,73,0.99,3.4,0.51,12.3333333333333,5 +5.8,0.26,0.3,2.6,0.034,75,129,0.9902,3.2,0.38,11.5,4 +6.7,0.23,0.17,1.3,0.061,14,100,0.9925,3.07,0.55,9.5,5 +6.8,0.33,0.3,2.1,0.047,35,147,0.9886,3.24,0.56,13.4,6 +6.1,0.27,0.32,1.1,0.034,24,110,0.9898,3.36,0.4,12.5,6 +6.1,0.27,0.32,1.1,0.034,24,110,0.9898,3.36,0.4,12.5,6 +6.8,0.4,0.29,2.8,0.044,27,97,0.9904,3.12,0.42,11.2,6 +6.1,0.4,0.18,9,0.051,28.5,259,0.9964,3.19,0.5,8.8,5 +7.1,0.28,0.26,2.8,0.039,50,118,0.9908,3.06,0.59,11.2,6 +6.2,0.32,0.32,2.2,0.036,15,70,0.9899,3.16,0.48,12.7,6 +6.8,0.17,0.17,5.1,0.049,26,82,0.993,3,0.38,9.8,6 +9,0.2,0.33,3.5,0.049,10,40,0.9944,3.14,0.36,9.8,6 +5.8,0.13,0.22,12.7,0.058,24,183,0.9956,3.32,0.42,11.7,6 +5.8,0.31,0.31,7.5,0.052,55,230,0.9949,3.19,0.46,9.8,5 +6.3,0.36,0.2,2,0.048,48,191,0.9929,3.17,0.51,9.6,5 +9,0.2,0.33,3.5,0.049,10,40,0.9944,3.14,0.36,9.8,6 +6.7,0.18,0.25,14.3,0.048,79,149,0.9975,3.12,0.37,9.7,5 +6.6,0.16,0.25,9.8,0.049,59.5,137,0.995,3.16,0.38,10,6 +5.8,0.13,0.22,12.7,0.058,24,183,0.9956,3.32,0.42,11.7,6 +5.8,0.27,0.22,12.7,0.058,42,206,0.9946,3.32,0.38,12.3,6 +6.8,0.17,0.17,5.1,0.049,26,82,0.993,3,0.38,9.8,6 +6.4,0.37,0.19,3.5,0.068,18,101,0.9934,3.03,0.38,9,6 +7.3,0.26,0.53,12.7,0.047,60.5,164.5,0.9984,3.06,0.45,9.1,6 +7.3,0.28,0.54,12.9,0.049,62,162.5,0.9984,3.06,0.45,9.1,6 +7.3,0.28,0.54,12.9,0.049,62,162.5,0.9984,3.06,0.45,9.1,6 +5.8,0.12,0.21,1.3,0.056,35,121,0.9908,3.32,0.33,11.4,6 +6.1,0.25,0.18,10.5,0.049,41,124,0.9963,3.14,0.35,10.5,5 +6.4,0.24,0.27,1.5,0.04,35,105,0.98914,3.13,0.3,12.4,6 +7.3,0.26,0.53,12.7,0.047,60.5,156,0.9984,3.06,0.45,9.1,6 +7.3,0.28,0.54,12.9,0.049,62,152,0.9984,3.06,0.45,9.1,6 +8.3,0.18,0.37,1.2,0.049,6,94,0.9937,3.18,0.52,10.1,5 +7.1,0.09,0.3,6.2,0.032,24,134,0.993,2.99,0.39,10.9,6 +8.3,0.14,0.36,8.8,0.026,13,60,0.9956,3.13,0.35,11.05,5 +5.8,0.28,0.3,3.9,0.026,36,105,0.98963,3.26,0.58,12.75,6 +6,0.23,0.34,1.3,0.025,23,111,0.98961,3.36,0.37,12.7,6 +6.9,0.28,0.37,9.1,0.037,16,76,0.9948,3.05,0.54,11.1,5 +6.9,0.28,0.37,9.1,0.037,16,76,0.9948,3.05,0.54,11.1,5 +5.8,0.28,0.3,3.9,0.026,36,105,0.98963,3.26,0.58,12.75,6 +6.3,0.25,0.53,1.8,0.021,41,101,0.989315,3.19,0.31,13,6 +6.5,0.2,0.31,2.1,0.033,32,95,0.989435,2.96,0.61,12,6 +5.9,0.29,0.32,1.4,0.022,17,47,0.9899,3.35,0.35,11.5,6 +6.4,0.46,0.22,14.7,0.047,51,183,0.998275,3.39,0.6,10.5,5 +6.9,0.28,0.37,9.1,0.037,16,76,0.9948,3.05,0.54,11.1,5 +6.8,0.23,0.33,1.9,0.047,20,101,0.9914,3.1,0.46,11.1,6 +7,0.23,0.32,1.8,0.048,25,113,0.9915,3.11,0.47,11.1,6 +6.4,0.55,0.26,9.6,0.027,20,104,0.9924,3.22,0.73,13.1,6 +5.7,0.28,0.3,3.9,0.026,36,105,0.98963,3.26,0.58,12.75,6 +6,0.23,0.34,1.3,0.025,23,111,0.98961,3.36,0.37,12.7,6 +6.8,0.45,0.3,11.8,0.094,23,97,0.997,3.09,0.44,9.6,5 +6.1,0.2,0.4,1.9,0.028,32,138,0.9914,3.26,0.72,11.7,5 +6.1,0.37,0.46,12,0.042,61,210,0.997,3.17,0.59,9.7,6 +5.9,0.21,0.23,7.9,0.033,22,130,0.9944,3.38,0.59,10.9,6 +6.9,0.22,0.32,9.3,0.04,22,110,0.9958,3.34,0.54,10.7,7 +5.4,0.27,0.22,4.6,0.022,29,107,0.98889,3.33,0.54,13.8,6 +6,0.26,0.26,2.2,0.035,10,72,0.989465,3.11,0.48,12.15,6 +5.6,0.18,0.3,10.2,0.028,28,131,0.9954,3.49,0.42,10.8,7 +5.6,0.26,0.27,10.6,0.03,27,119,0.9947,3.4,0.34,10.7,7 +7,0.23,0.35,1.4,0.036,31,113,0.9912,3.16,0.48,10.8,7 +5.8,0.28,0.66,9.1,0.039,26,159,0.9965,3.66,0.55,10.8,5 +8.6,0.36,0.26,11.1,0.03,43.5,171,0.9948,3.03,0.49,12,5 +5.8,0.28,0.66,9.1,0.039,26,159,0.9965,3.66,0.55,10.8,5 +6.4,0.3,0.27,4.4,0.055,17,135,0.9925,3.23,0.44,12.2,6 +6.2,0.2,0.32,2.8,0.05,17,126,0.9936,3.18,0.55,9.4,6 +5.8,0.29,0.15,1.1,0.029,12,83,0.9898,3.3,0.4,11.4,6 +5.7,0.22,0.28,1.3,0.027,26,101,0.98948,3.35,0.38,12.5,7 +5.6,0.22,0.32,1.2,0.024,29,97,0.98823,3.2,0.46,13.05,7 +6.8,0.32,0.23,3.3,0.026,31,99,0.9896,3.1,0.4,12.4,6 +6.2,0.2,0.26,9.7,0.03,39,102,0.9908,3.08,0.56,12.9,7 +6.1,0.35,0.24,2.3,0.034,25,133,0.9906,3.34,0.59,12,7 +5.9,0.3,0.29,1.1,0.036,23,56,0.9904,3.19,0.38,11.3,5 +6.3,0.15,0.34,11.4,0.05,25,96,0.99754,3.21,0.49,10,6 +4.8,0.13,0.32,1.2,0.042,40,98,0.9898,3.42,0.64,11.8,7 +6,0.2,0.26,14.7,0.045,53,125,0.998365,2.99,0.69,9.4,6 +5.7,0.2,0.24,13.8,0.047,44,112,0.99837,2.97,0.66,8.8,6 +6,0.27,0.26,1.3,0.038,32,138,0.99125,3.46,0.43,11.1,6 +6.3,0.37,0.51,6.3,0.048,35,146,0.9943,3.1,1.01,10.5,6 +6.4,0.23,0.37,7.9,0.05,60,150,0.99488,2.86,0.49,9.3,6 +5.9,0.34,0.25,2,0.042,12,110,0.99034,3.02,0.54,11.4,6 +5,0.33,0.23,11.8,0.03,23,158,0.99322,3.41,0.64,11.8,6 +5.4,0.29,0.38,1.2,0.029,31,132,0.98895,3.28,0.36,12.4,6 +8,0.33,0.35,10,0.035,22,108,0.99457,3.12,0.36,11.6,6 +6.4,0.3,0.33,5.2,0.05,30,137,0.99304,3.26,0.58,11.1,5 +5.4,0.29,0.38,1.2,0.029,31,132,0.98895,3.28,0.36,12.4,6 +6.4,0.33,0.3,7.2,0.041,42,168,0.99331,3.22,0.49,11.1,6 +7,0.33,0.78,9.9,0.042,21,251,0.99435,3.01,0.55,11,6 +6.7,0.45,0.3,5.3,0.036,27,165,0.99122,3.12,0.46,12.2,6 +6.5,0.36,0.31,13.55,0.053,20,113,0.99544,3.2,0.56,11,6 +5.8,0.42,0.3,2.2,0.035,26,129,0.989,3.32,0.47,12.9,6 +7.1,0.39,0.3,9.9,0.037,29,124,0.99414,3.07,0.42,10.9,6 +6.7,0.53,0.29,4.3,0.069,20,114,0.99014,3.22,0.59,13.4,6 +6.7,0.66,0,13,0.033,32,75,0.99551,3.15,0.5,10.7,6 +6.5,0.36,0.31,13.55,0.053,20,113,0.99544,3.2,0.56,11,6 +6.5,0.16,0.33,1,0.027,23,75,0.9908,3.3,0.39,11.4,7 +8.3,0.22,0.34,1.1,0.043,20,116,0.9927,3,0.47,10.2,6 +6.9,0.23,0.35,6.9,0.03,45,116,0.99244,2.8,0.54,11,6 +6.4,0.17,0.34,13.4,0.044,45,139,0.99752,3.06,0.43,9.1,6 +5,0.33,0.18,4.6,0.032,40,124,0.99114,3.18,0.4,11,6 +6.8,0.38,0.29,9.9,0.037,40,146,0.99326,3.11,0.37,11.5,6 +6.5,0.29,0.32,3,0.036,38,93,0.9906,3.16,0.59,12,6 +6.9,0.29,0.32,5.8,0.04,16,112,0.993,3.04,0.58,11.2,5 +6.6,0.28,0.3,12.9,0.033,31,177,0.99479,3.12,0.39,11.2,6 +6.2,0.36,0.27,3.2,0.032,13,73,0.98942,2.9,0.69,12.6,7 +6,0.615,0.04,0.8,0.032,8,50,0.99036,3.14,0.4,11,4 +5.9,0.44,0.36,2.5,0.03,12,73,0.99201,3.22,0.48,10.8,6 +5.9,0.42,0.36,2.4,0.034,19,77,0.99184,3.25,0.48,10.9,5 +5.8,0.34,0.21,7.2,0.041,48,146,0.99441,3.16,0.49,9.8,5 +5.8,0.27,0.2,7.3,0.04,42,145,0.99442,3.15,0.48,9.8,5 +7.1,0.33,0.18,6.3,0.094,27,166,0.99474,2.9,0.49,9.5,5 +6.1,0.44,0.28,4.25,0.032,43,132,0.9916,3.26,0.47,11.2666666666667,7 +7.3,0.28,0.37,1.2,0.039,26,99,0.99198,3.01,0.62,10.8,5 +5.2,0.5,0.18,2,0.036,23,129,0.98949,3.36,0.77,13.4,7 +6.1,0.44,0.28,4.25,0.032,43,132,0.9916,3.26,0.47,11.3,7 +6.4,0.62,0.12,4.7,0.06,33,196,0.99556,3.22,0.48,8.9,5 +6.4,0.38,0.19,4.5,0.038,36,119,0.99151,3.07,0.42,11.2,6 +7.5,0.305,0.38,1.4,0.047,30,95,0.99158,3.22,0.52,11.5,7 +6.5,0.5,0.22,4.1,0.036,35,131,0.9902,3.26,0.55,13,7 +6.6,0.4,0.3,5.3,0.038,20,125,0.99204,3.36,0.73,12.6,6 +6.4,0.4,0.25,4.2,0.032,15,91,0.98988,3.26,0.52,13.1,6 +8.3,0.49,0.23,6.65,0.034,6,158,0.99344,3.05,0.48,11.2,5 +6.3,0.3,0.91,8.2,0.034,50,199,0.99394,3.39,0.49,11.7,6 +6.1,0.19,0.37,2.6,0.041,24,99,0.99153,3.18,0.5,10.9,6 +6.1,0.19,0.37,2.6,0.041,24,99,0.99153,3.18,0.5,10.9,6 +5.6,0.24,0.34,2,0.041,14,73,0.98981,3.04,0.45,11.6,7 +5.7,0.25,0.32,12.2,0.041,43,127,0.99524,3.23,0.53,10.4,7 +6.6,0.21,0.39,2.3,0.041,31,102,0.99221,3.22,0.58,10.9,7 +6.3,0.3,0.91,8.2,0.034,50,199,0.99394,3.39,0.49,11.7,6 +6.2,0.28,0.41,5,0.043,50,188,0.99318,3.23,0.64,10.8,6 +5.8,0.29,0.38,10.7,0.038,49,136,0.99366,3.11,0.59,11.2,6 +5.8,0.345,0.15,10.8,0.033,26,120,0.99494,3.25,0.49,10,6 +6.5,0.51,0.25,1.7,0.048,39,177,0.99212,3.28,0.57,10.5666666666667,5 +6,0.24,0.41,1.3,0.036,42,118,0.99018,3.04,0.64,11.7333333333333,6 +6.5,0.51,0.25,1.7,0.048,39,177,0.99212,3.28,0.57,10.6,5 +6.9,0.54,0.26,12.7,0.049,59,195,0.99596,3.26,0.54,10.5,6 +6,0.24,0.41,1.3,0.036,42,118,0.99018,3.04,0.64,11.75,6 +6.6,0.26,0.36,1.2,0.035,43,126,0.99094,3.01,0.63,11.4,6 +5.7,0.24,0.3,1.3,0.03,25,98,0.98968,3.37,0.43,12.4,7 +6.5,0.21,0.35,5.7,0.043,47,197,0.99392,3.24,0.5,10.1,6 +6.8,0.29,0.22,3.4,0.035,40,122,0.99024,3.09,0.47,12.3,6 +5,0.24,0.34,1.1,0.034,49,158,0.98774,3.32,0.32,13.1,7 +5.9,0.18,0.28,1,0.037,24,88,0.99094,3.29,0.55,10.65,7 +5.8,0.26,0.29,1,0.042,35,101,0.99044,3.36,0.48,11.4,7 +6.7,0.61,0.21,1.65,0.117,40,240,0.9938,3.11,0.57,9.3,5 +5.7,0.695,0.06,6.8,0.042,9,84,0.99432,3.44,0.44,10.2,5 +5.6,0.695,0.06,6.8,0.042,9,84,0.99432,3.44,0.44,10.2,5 +5.7,0.39,0.25,4.9,0.033,49,113,0.98966,3.26,0.58,13.1,7 +6.1,0.38,0.47,1.4,0.051,59,210,0.99309,3.24,0.5,9.6,5 +6.3,0.36,0.28,2.5,0.035,18,73,0.98868,3.1,0.47,12.8,7 +6,0.29,0.41,10.8,0.048,55,149,0.9937,3.09,0.59,10.9666666666667,7 +6,0.29,0.41,10.8,0.048,55,149,0.9937,3.09,0.59,10.9666666666667,7 +6,0.29,0.41,10.8,0.048,55,149,0.9937,3.09,0.59,10.9666666666667,7 +6,0.29,0.41,10.8,0.048,55,149,0.9937,3.09,0.59,11,7 +7.1,0.43,0.25,2.8,0.036,43,132,0.98975,3.21,0.47,13.4,6 +6.6,0.25,0.25,1.3,0.04,28,85,0.98984,2.87,0.48,11.2,6 +6.6,0.33,0.41,2,0.027,14,79,0.99063,3.27,0.63,12.4,6 +8,0.23,0.41,1.1,0.048,35,150,0.99168,3.09,0.47,11.2,5 +7.3,0.17,0.36,8.2,0.028,44,111,0.99272,3.14,0.41,12.4,6 +6,0.17,0.33,6,0.036,30,111,0.99362,3.32,0.58,10.1333333333333,7 +6.1,0.16,0.34,6.1,0.034,31,114,0.99365,3.32,0.58,10.1333333333333,7 +7.3,0.17,0.36,8.2,0.028,44,111,0.99272,3.14,0.41,12.4,6 +6.4,0.31,0.53,8.8,0.057,36,221,0.99642,3.17,0.44,9.1,5 +6.1,0.16,0.34,6.1,0.034,31,114,0.99365,3.32,0.58,10.15,7 +6,0.17,0.33,6,0.036,30,111,0.99362,3.32,0.58,10.15,7 +5.9,0.44,0.33,1.2,0.049,12,117,0.99134,3.46,0.44,11.5,5 +6.6,0.285,0.49,11.4,0.035,57,137,0.99732,3.08,0.54,8.9,6 +4.9,0.335,0.14,1.3,0.036,69,168,0.99212,3.47,0.46,10.4666666666667,5 +4.9,0.335,0.14,1.3,0.036,69,168,0.99212,3.47,0.46,10.4666666666667,5 +6,0.28,0.52,6.2,0.028,37,104,0.99161,3.28,0.51,11.8,7 +5.8,0.35,0.29,3.2,0.034,41,151,0.9912,3.35,0.58,11.6333333333333,7 +5.7,0.21,0.37,4.5,0.04,58,140,0.99332,3.29,0.62,10.6,6 +6.5,0.25,0.32,9.9,0.045,41,128,0.99636,3.18,0.52,9.6,6 +6,0.28,0.52,6.2,0.028,37,104,0.99161,3.28,0.51,11.8,7 +6.6,0.285,0.49,11.4,0.035,57,137,0.99732,3.08,0.54,8.9,6 +4.7,0.335,0.14,1.3,0.036,69,168,0.99212,3.47,0.46,10.5,5 +6.8,0.63,0.04,1.3,0.058,25,133,0.99271,3.17,0.39,10.2,4 +5.6,0.27,0.37,0.9,0.025,11,49,0.98845,3.29,0.33,13.1,6 +6.8,0.32,0.33,0.7,0.027,15,66,0.9899,3.11,0.31,11.8,6 +6.5,0.33,0.32,1,0.041,39,120,0.99004,3.06,0.37,12.2,6 +6,0.24,0.34,1,0.036,52,184,0.99097,3.44,0.44,11.45,6 +7.2,0.26,0.32,10.4,0.062,23,114,0.9966,3.23,0.49,10.5,5 +6.8,0.63,0.04,1.3,0.058,25,133,0.99271,3.17,0.39,10.2,4 +6.7,0.16,0.32,12.5,0.035,18,156,0.99666,2.88,0.36,9,6 +6.7,0.16,0.32,12.5,0.035,18,156,0.99666,2.88,0.36,9,6 +6.7,0.16,0.32,12.5,0.035,18,156,0.99666,2.88,0.36,9,6 +6.7,0.16,0.32,12.5,0.035,18,156,0.99666,2.88,0.36,9,6 +6.9,0.19,0.31,19.25,0.043,38,167,0.99954,2.93,0.52,9.1,7 +6,0.36,0.32,1.1,0.053,26,173,0.99414,3.38,0.54,8.8,5 +6.7,0.16,0.32,12.5,0.035,18,156,0.99666,2.88,0.36,9,6 +6.9,0.19,0.31,19.25,0.043,38,167,0.99954,2.93,0.52,9.1,7 +6.7,0.35,0.32,9,0.032,29,113,0.99188,3.13,0.65,12.9,7 +6.1,0.15,0.4,1.2,0.03,19,84,0.98926,3.19,0.96,13,6 +6.7,0.35,0.32,9,0.032,29,113,0.99188,3.13,0.65,12.9,7 +7,0.27,0.74,1.3,0.173,34,121,0.99334,3.04,0.46,9.2,6 +6.8,0.3,0.33,12.8,0.041,60,168,0.99659,3.1,0.56,9.8,5 +6.8,0.3,0.33,12.8,0.041,60,168,0.99659,3.1,0.56,9.8,5 +6.4,0.69,0.09,7.6,0.044,34,144,0.9948,3.26,0.38,10.1,6 +6.4,0.69,0.09,7.6,0.044,34,144,0.9948,3.26,0.38,10.1,6 +5.9,0.12,0.28,1.4,0.037,36,83,0.99074,3.33,0.42,10.9,7 +6.3,0.36,0.5,8.3,0.053,51,202,0.9955,3.2,0.51,9.6,6 +5.7,0.27,0.16,9,0.053,32,111,0.99474,3.36,0.37,10.4,6 +6.1,0.22,0.4,1.85,0.031,25,111,0.98966,3.03,0.3,11.8,7 +5.6,0.205,0.16,12.55,0.051,31,115,0.99564,3.4,0.38,10.8,6 +7.2,0.33,0.28,1.4,0.034,26,109,0.99246,3.28,0.57,10.6,6 +5.9,0.21,0.31,1.8,0.033,45,142,0.98984,3.35,0.5,12.7,6 +5.4,0.33,0.31,4,0.03,27,108,0.99031,3.3,0.43,12.2,7 +5.4,0.205,0.16,12.55,0.051,31,115,0.99564,3.4,0.38,10.8,6 +5.7,0.27,0.16,9,0.053,32,111,0.99474,3.36,0.37,10.4,6 +6.4,0.28,0.28,3,0.04,19,98,0.99216,3.25,0.47,11.1,6 +6.1,0.22,0.4,1.85,0.031,25,111,0.98966,3.03,0.3,11.8,7 +6.7,0.15,0.32,7.9,0.034,17,81,0.99512,3.29,0.31,10,6 +5.5,0.315,0.38,2.6,0.033,10,69,0.9909,3.12,0.59,10.8,6 +4.8,0.225,0.38,1.2,0.074,47,130,0.99132,3.31,0.4,10.3,6 +5.2,0.24,0.15,7.1,0.043,32,134,0.99378,3.24,0.48,9.9,6 +6.7,0.15,0.32,7.9,0.034,17,81,0.99512,3.29,0.31,10,6 +6.6,0.27,0.32,1.3,0.044,18,93,0.99044,3.11,0.56,12.25,5 +6.1,0.32,0.33,10.7,0.036,27,98,0.99521,3.34,0.52,10.2,6 +6,0.25,0.28,7.7,0.053,37,132,0.99489,3.06,0.5,9.4,6 +6.4,0.42,0.46,8.4,0.05,58,180,0.99495,3.18,0.46,9.7,6 +6.1,0.32,0.33,10.7,0.036,27,98,0.99521,3.34,0.52,10.2,6 +6.9,0.31,0.33,12.7,0.038,33,116,0.9954,3.04,0.65,10.4,6 +6.3,0.48,0.48,1.8,0.035,35,96,0.99121,3.49,0.74,12.2,6 +6,0.25,0.28,7.7,0.053,37,132,0.99489,3.06,0.5,9.4,6 +7.2,0.21,0.31,10.5,0.035,36,122,0.99478,3.12,0.4,10.6,6 +6.8,0.32,0.43,1.6,0.05,4,65,0.99346,3.27,0.47,10.7,5 +7.9,0.3,0.6,1.85,0.048,13,106,0.99331,3.24,0.49,11.85,5 +5.3,0.31,0.38,10.5,0.031,53,140,0.99321,3.34,0.46,11.7,6 +5.3,0.31,0.38,10.5,0.031,53,140,0.99321,3.34,0.46,11.7,6 +5.2,0.185,0.22,1,0.03,47,123,0.99218,3.55,0.44,10.15,6 +5.5,0.16,0.31,1.2,0.026,31,68,0.9898,3.33,0.44,11.6333333333333,6 +6,0.17,0.36,1.7,0.042,14,61,0.99144,3.22,0.54,10.8,6 +6,0.16,0.36,1.6,0.042,13,61,0.99143,3.22,0.54,10.8,6 +6.1,0.24,0.32,9,0.031,41,134,0.99234,3.25,0.26,12.3,7 +5.5,0.3,0.25,1.9,0.029,33,118,0.98972,3.36,0.66,12.5,6 +5.5,0.16,0.31,1.2,0.026,31,68,0.9898,3.33,0.44,11.65,6 +6,0.32,0.46,1.5,0.05,56,189,0.99308,3.24,0.49,9.6,5 +6.1,0.27,0.31,3.9,0.034,42,137,0.99218,3.24,0.46,10.9,6 +6,0.27,0.32,3.6,0.035,36,133,0.99215,3.23,0.46,10.8,6 +6,0.14,0.37,1.2,0.032,63,148,0.99185,3.32,0.44,11.2,5 +5,0.24,0.19,5,0.043,17,101,0.99438,3.67,0.57,10,5 +6.1,0.26,0.25,2.9,0.047,289,440,0.99314,3.44,0.64,10.5,3 +6.3,0.23,0.5,10.4,0.043,61,132,0.99542,2.86,0.46,9.1,6 +5.6,0.26,0.5,11.4,0.029,25,93,0.99428,3.23,0.49,10.5,6 +6.1,0.34,0.24,18.35,0.05,33,184,0.99943,3.12,0.61,9.3,5 +6.2,0.35,0.25,18.4,0.051,28,182,0.99946,3.13,0.62,9.3,6 +6,0.14,0.37,1.2,0.032,63,148,0.99185,3.32,0.44,11.2,5 +7.3,0.36,0.62,7.1,0.033,48,185,0.99472,3.14,0.62,10.6,6 +5.1,0.25,0.36,1.3,0.035,40,78,0.9891,3.23,0.64,12.1,7 +5.5,0.16,0.26,1.5,0.032,35,100,0.99076,3.43,0.77,12,6 +6.4,0.19,0.35,10.2,0.043,40,106,0.99632,3.16,0.5,9.7,6 +6.6,0.29,0.73,2.2,0.027,21,92,0.99,3.12,0.48,12.4,6 +6,0.38,0.26,3.5,0.035,38,111,0.98872,3.18,0.47,13.6,7 +6,0.38,0.26,3.5,0.035,38,111,0.98872,3.18,0.47,13.6,7 +6.5,0.2,0.35,3.9,0.04,27,140,0.99102,2.98,0.53,11.8,6 +6.6,0.17,0.26,7.4,0.052,45,128,0.99388,3.16,0.37,10,6 +6.6,0.17,0.26,7.4,0.052,45,128,0.99388,3.16,0.37,10,6 +6.2,0.15,0.27,11,0.035,46,116,0.99602,3.12,0.38,9.1,6 +5.9,0.48,0.3,1.5,0.037,19,78,0.99057,3.47,0.42,11.9,7 +5.3,0.4,0.25,3.9,0.031,45,130,0.99072,3.31,0.58,11.75,7 +5.9,0.26,0.29,5.4,0.046,34,116,0.99224,3.24,0.41,11.4,6 +5.2,0.3,0.34,1.5,0.038,18,96,0.98942,3.56,0.48,13,8 +6.4,0.32,0.25,5,0.055,28,138,0.99171,3.27,0.5,12.4,8 +6.6,0.19,0.25,1.2,0.052,34,181,0.99352,3.3,0.42,9.4,7 +6.8,0.27,0.3,13,0.047,69,160,0.99705,3.16,0.5,9.6,6 +6.8,0.27,0.3,13,0.047,69,160,0.99705,3.16,0.5,9.6,6 +6.8,0.27,0.3,13,0.047,69,160,0.99705,3.16,0.5,9.6,6 +6.8,0.27,0.3,13,0.047,69,160,0.99705,3.16,0.5,9.6,6 +6.4,0.28,0.45,8.6,0.057,47,223,0.99654,3.16,0.51,9.1,5 +5.2,0.21,0.31,1.7,0.048,17,61,0.98953,3.24,0.37,12,7 +7.1,0.24,0.34,1.2,0.045,6,132,0.99132,3.16,0.46,11.2,4 +5,0.27,0.4,1.2,0.076,42,124,0.99204,3.32,0.47,10.1,6 +5.8,0.27,0.4,1.2,0.076,47,130,0.99185,3.13,0.45,10.3,6 +5.9,0.27,0.32,2,0.034,31,102,0.98952,3.16,0.56,12.3,6 +5.8,0.315,0.19,19.4,0.031,28,106,0.99704,2.97,0.4,10.55,6 +6,0.59,0,0.8,0.037,30,95,0.99032,3.1,0.4,10.9,4 +5.8,0.3,0.09,6.3,0.042,36,138,0.99382,3.15,0.48,9.7,5 +5.6,0.3,0.1,6.4,0.043,34,142,0.99382,3.14,0.48,9.8,5 +6.7,0.3,0.5,12.1,0.045,38,127,0.9974,3.04,0.53,8.9,6 +6.7,0.3,0.5,12.1,0.045,38,127,0.9974,3.04,0.53,8.9,6 +6.4,0.31,0.31,12.9,0.045,55,161,0.99546,3.02,0.59,10.2,5 +6.9,0.25,0.29,2.4,0.038,28,76,0.99088,3.01,0.36,11.7,7 +4.4,0.32,0.39,4.3,0.03,31,127,0.98904,3.46,0.36,12.8,8 +3.9,0.225,0.4,4.2,0.03,29,118,0.989,3.57,0.36,12.8,8 +6.4,0.31,0.31,12.9,0.045,55,161,0.99546,3.02,0.59,10.2,5 +5.5,0.62,0.33,1.7,0.037,24,118,0.98758,3.15,0.39,13.55,6 +6.2,0.3,0.42,2.2,0.036,28,128,0.9901,3.13,0.38,11.6,6 +6.7,0.3,0.5,12.1,0.045,38,127,0.9974,3.04,0.53,8.9,6 +4.7,0.785,0,3.4,0.036,23,134,0.98981,3.53,0.92,13.8,6 +6,0.31,0.32,7.4,0.175,47,159,0.9952,3.19,0.5,9.4,6 +6,0.32,0.3,7.3,0.174,46,159,0.99519,3.18,0.49,9.4,5 +6.4,0.105,0.29,1.1,0.035,44,140,0.99142,3.17,0.55,10.7,7 +6.4,0.105,0.29,1.1,0.035,44,140,0.99142,3.17,0.55,10.7,7 +5.7,0.33,0.32,1.4,0.043,28,93,0.9897,3.31,0.5,12.3,6 +5.9,0.32,0.19,14.5,0.042,37,115,0.99684,3.16,0.43,10.3,5 +6.2,0.26,0.2,8,0.047,35,111,0.99445,3.11,0.42,10.4,6 +6,0.2,0.33,1.1,0.039,45,126,0.99051,3.31,0.45,11.6,7 +6.4,0.105,0.29,1.1,0.035,44,140,0.99142,3.17,0.55,10.7,7 +5.8,0.28,0.34,2.2,0.037,24,125,0.98986,3.36,0.33,12.8,8 +6.4,0.31,0.5,5.8,0.038,42,111,0.99189,3.18,0.53,11.9,7 +6,0.35,0.46,0.9,0.033,9,65,0.98934,3.24,0.35,12.1,4 +5.1,0.26,0.34,6.4,0.034,26,99,0.99449,3.23,0.41,9.2,6 +6.6,0.28,0.09,10.9,0.051,37,131,0.99566,2.93,0.62,9.5,6 +6,0.17,0.3,7.3,0.039,39,104,0.99252,2.91,0.57,11,6 +7.3,0.35,0.67,8.3,0.053,10,100,0.9959,3.19,0.5,10.9,5 +6,0.26,0.24,1.3,0.053,66,150,0.9924,3.21,0.62,10.4,6 +5.4,0.375,0.4,3.3,0.054,29,147,0.99482,3.42,0.52,9.1,5 +7,0.17,0.42,1,0.075,19,71,0.99103,3.32,0.62,11.4,6 +5.1,0.26,0.33,1.1,0.027,46,113,0.98946,3.35,0.43,11.4,7 +5.8,0.36,0.5,1,0.127,63,178,0.99212,3.1,0.45,9.7,5 +5.7,0.4,0.35,5.1,0.026,17,113,0.99052,3.18,0.67,12.4,6 +6.2,0.76,0.01,3.2,0.041,18,120,0.99026,3.2,0.94,13.7,7 +6.1,0.41,0.2,12.6,0.032,54,136,0.99516,2.91,0.43,10.6,6 +5.8,0.385,0.25,3.7,0.031,38,122,0.99128,3.2,0.63,11.2,6 +6,0.27,0.4,1.7,0.021,18,82,0.9891,3.24,0.95,13.1333333333333,6 +5.7,0.4,0.35,5.1,0.026,17,113,0.99052,3.18,0.67,12.4,6 +5.8,0.36,0.5,1,0.127,63,178,0.99212,3.1,0.45,9.7,5 +7,0.24,0.47,1.3,0.043,18,131,0.99176,3.19,0.45,11,6 +6.8,0.23,0.48,1.5,0.036,35,165,0.99162,3.18,0.45,11.3,6 +6.5,0.28,0.34,4.6,0.054,22,130,0.99193,3.2,0.46,12,7 +6.4,0.23,0.35,10.3,0.042,54,140,0.9967,3.23,0.47,9.2,5 +6,0.34,0.29,6.1,0.046,29,134,0.99462,3.48,0.57,10.7,6 +6,0.34,0.29,6.1,0.046,29,134,0.99462,3.48,0.57,10.7,6 +6.7,0.22,0.33,1.2,0.036,36,86,0.99058,3.1,0.76,11.4,6 +6.4,0.23,0.35,10.3,0.042,54,140,0.9967,3.23,0.47,9.2,5 +6,0.32,0.33,9.9,0.032,22,90,0.99258,3.1,0.43,12.1,7 +5.8,0.29,0.27,1.6,0.062,17,140,0.99138,3.23,0.35,11.1,6 +5.8,0.38,0.26,1.1,0.058,20,140,0.99271,3.27,0.43,9.7,6 +5.9,0.32,0.26,1.5,0.057,17,141,0.9917,3.24,0.36,10.7,5 +5.6,0.33,0.28,1.2,0.031,33,97,0.99126,3.49,0.58,10.9,6 +5.9,0.37,0.3,1.5,0.033,35,95,0.98986,3.36,0.56,12,7 +5.6,0.295,0.26,1.1,0.035,40,102,0.99154,3.47,0.56,10.6,6 +6.7,0.5,0.36,11.5,0.096,18,92,0.99642,3.11,0.49,9.6,5 +6.5,0.28,0.38,7.8,0.031,54,216,0.99154,3.03,0.42,13.1,6 +5.3,0.275,0.24,7.4,0.038,28,114,0.99313,3.38,0.51,11,6 +5.2,0.405,0.15,1.45,0.038,10,44,0.99125,3.52,0.4,11.6,4 +6.8,0.34,0.36,8.9,0.029,44,128,0.99318,3.28,0.35,11.95,7 +5.7,0.22,0.25,1.1,0.05,97,175,0.99099,3.44,0.62,11.1,6 +6.2,0.28,0.57,1,0.043,50,92,0.99004,3.17,0.36,11.5,6 +5.6,0.34,0.25,2.5,0.046,47,182,0.99093,3.21,0.4,11.3,5 +4.8,0.29,0.23,1.1,0.044,38,180,0.98924,3.28,0.34,11.9,6 +6.6,0.38,0.29,2.4,0.136,15,93,0.99336,3.18,0.6,9.5,5 +5.1,0.3,0.3,2.3,0.048,40,150,0.98944,3.29,0.46,12.2,6 +4.4,0.54,0.09,5.1,0.038,52,97,0.99022,3.41,0.4,12.2,7 +7,0.36,0.35,2.5,0.048,67,161,0.99146,3.05,0.56,11.1,6 +6.4,0.33,0.44,8.9,0.055,52,164,0.99488,3.1,0.48,9.6,5 +7,0.36,0.35,2.5,0.048,67,161,0.99146,3.05,0.56,11.1,6 +6.4,0.33,0.44,8.9,0.055,52,164,0.99488,3.1,0.48,9.6,5 +6.2,0.23,0.38,1.6,0.044,12,113,0.99176,3.3,0.73,11.4,5 +5.2,0.25,0.23,1.4,0.047,20,77,0.99001,3.32,0.62,11.4,5 +6.2,0.35,0.29,3.9,0.041,22,79,0.99005,3.1,0.59,12.0666666666667,6 +7.1,0.23,0.39,13.7,0.058,26,172,0.99755,2.9,0.46,9,6 +7.1,0.23,0.39,13.7,0.058,26,172,0.99755,2.9,0.46,9,6 +7.5,0.38,0.33,9.2,0.043,19,116,0.99444,3.08,0.42,11.4,6 +6.4,0.35,0.51,7.8,0.055,53,177,0.99502,3.12,0.45,9.6,5 +6,0.43,0.34,7.6,0.045,25,118,0.99222,3.03,0.37,11,6 +6,0.52,0.33,7.7,0.046,24,119,0.99224,3.04,0.38,11,6 +5.5,0.31,0.29,3,0.027,16,102,0.99067,3.23,0.56,11.2,6 +5.9,0.22,0.3,1.3,0.052,42,86,0.99069,3.31,0.47,11.55,6 +6.2,0.36,0.32,4,0.036,44,92,0.98936,3.2,0.5,13.3,7 +6,0.41,0.23,1.1,0.066,22,148,0.99266,3.3,0.47,9.63333333333333,5 +6.2,0.355,0.35,2,0.046,31,95,0.98822,3.06,0.46,13.6,6 +5.7,0.41,0.21,1.9,0.048,30,112,0.99138,3.29,0.55,11.2,6 +5.3,0.6,0.34,1.4,0.031,3,60,0.98854,3.27,0.38,13,6 +5.8,0.23,0.31,4.5,0.046,42,124,0.99324,3.31,0.64,10.8,6 +6.6,0.24,0.33,10.1,0.032,8,81,0.99626,3.19,0.51,9.8,6 +6.1,0.32,0.28,6.6,0.021,29,132,0.99188,3.15,0.36,11.45,7 +5,0.2,0.4,1.9,0.015,20,98,0.9897,3.37,0.55,12.05,6 +6,0.42,0.41,12.4,0.032,50,179,0.99622,3.14,0.6,9.7,5 +5.7,0.21,0.32,1.6,0.03,33,122,0.99044,3.33,0.52,11.9,6 +5.6,0.2,0.36,2.5,0.048,16,125,0.99282,3.49,0.49,10,6 +7.4,0.22,0.26,1.2,0.035,18,97,0.99245,3.12,0.41,9.7,6 +6.2,0.38,0.42,2.5,0.038,34,117,0.99132,3.36,0.59,11.6,7 +5.9,0.54,0,0.8,0.032,12,82,0.99286,3.25,0.36,8.8,5 +6.2,0.53,0.02,0.9,0.035,6,81,0.99234,3.24,0.35,9.5,4 +6.6,0.34,0.4,8.1,0.046,68,170,0.99494,3.15,0.5,9.53333333333333,6 +6.6,0.34,0.4,8.1,0.046,68,170,0.99494,3.15,0.5,9.53333333333333,6 +5,0.235,0.27,11.75,0.03,34,118,0.9954,3.07,0.5,9.4,6 +5.5,0.32,0.13,1.3,0.037,45,156,0.99184,3.26,0.38,10.7,5 +4.9,0.47,0.17,1.9,0.035,60,148,0.98964,3.27,0.35,11.5,6 +6.5,0.33,0.38,8.3,0.048,68,174,0.99492,3.14,0.5,9.6,5 +6.6,0.34,0.4,8.1,0.046,68,170,0.99494,3.15,0.5,9.55,6 +6.2,0.21,0.28,5.7,0.028,45,121,0.99168,3.21,1.08,12.15,7 +6.2,0.41,0.22,1.9,0.023,5,56,0.98928,3.04,0.79,13,7 +6.8,0.22,0.36,1.2,0.052,38,127,0.9933,3.04,0.54,9.2,5 +4.9,0.235,0.27,11.75,0.03,34,118,0.9954,3.07,0.5,9.4,6 +6.1,0.34,0.29,2.2,0.036,25,100,0.98938,3.06,0.44,11.8,6 +5.7,0.21,0.32,0.9,0.038,38,121,0.99074,3.24,0.46,10.6,6 +6.5,0.23,0.38,1.3,0.032,29,112,0.99298,3.29,0.54,9.7,5 +6.2,0.21,0.29,1.6,0.039,24,92,0.99114,3.27,0.5,11.2,6 +6.6,0.32,0.36,8,0.047,57,168,0.9949,3.15,0.46,9.6,5 +6.5,0.24,0.19,1.2,0.041,30,111,0.99254,2.99,0.46,9.4,6 +5.5,0.29,0.3,1.1,0.022,20,110,0.98869,3.34,0.38,12.8,7 +6,0.21,0.38,0.8,0.02,22,98,0.98941,3.26,0.32,11.8,6 diff --git a/examples/flower_classifier/MLproject b/examples/flower_classifier/MLproject new file mode 100644 index 0000000000000..58a0b4a067c79 --- /dev/null +++ b/examples/flower_classifier/MLproject @@ -0,0 +1,22 @@ +name: FlowerClassifier + +conda_env: conda.yaml + +entry_points: + # train Keras DL model + main: + parameters: + training_data: {type: string, default: "./flower_photos"} + epochs: {type: int, default: 1} + image_width: {type: int, default: 224} + image_height: {type: int, default: 224} + batch_size: {type: int, default: 16} + test_ratio: {type: float, default: 0.2} + seed: {type: int, default: 97531} + command: "python train.py --training-data {training_data} + --batch-size {batch_size} + --epochs {epochs} + --image-width {image_width} + --image-height {image_height} + --test-ratio {test_ratio}" + diff --git a/examples/flower_classifier/README.rst b/examples/flower_classifier/README.rst new file mode 100644 index 0000000000000..be5b3dd2f6be2 --- /dev/null +++ b/examples/flower_classifier/README.rst @@ -0,0 +1,115 @@ +How To Train and Deploy Image Classifier with MLflow and Keras +--------------------------------------------------------------- + +In this example we demonstrate how to train and deploy image classification models with MLflow. +We train a VGG16 deep learning model to classify flower species from photos using a `dataset +`_ available from `tensorflow.org +`_. Note that although we use Keras to train the model in this case, +a similar approach can be applied to other deep learning frameworks such as ``PyTorch``. + +The MLflow model produced by running this example can be deployed to any MLflow supported endpoints. +All the necessary image preprocessing is packaged with the model. The model can therefore be applied +to image data directly. All that is required in order to pass new data to the model is to encode the +image binary data as base64 encoded string in pandas DataFrame (standard interface for MLflow python +function models). The included Python scripts demonstrate how the model can be deployed to a REST +API endpoint for realtime evaluation or to Spark for batch scoring.. + +In order to include custom image pre-processing logic with the model, we define the model as a +custom python function model wrapping around the underlying Keras model. The wrapper provides +necessary preprocessing to convert input data into multidimensional arrays expected by the +Keras model. The preprocessing logic is stored with the model as a code dependency. Here is an +example of the output model directory layout: + +.. code-block:: bash + + tree model + +:: + + model + ├── MLmodel + ├── code + │   └── image_pyfunc.py + ├── data + │   └── image_model + │   ├── conf.yaml + │   └── keras_model + │   ├── MLmodel + │   ├── conda.yaml + │   └── model.h5 + └── mlflow_env.yml + + + +The example contains the following files: + + * MLproject + Contains definition of this project. Contains only one entry point to train the model. + + * conda.yaml + Defines project dependencies. NOTE: You might want to change tensorflow package to tensorflow-gpu + if you have gpu(s) available. + + * train.py + Main entry point of the projects. Handles command line arguments and possibly downloads the + dataset. + + * keras_image_classifier.py + The implementation of the model train and also of the outputed custom python flavor model. Note + that the same preprocessing code that is used during model training is packaged with the output + model and is used during scoring. + + * score_images_rest.py + Score an image or a directory of images using a model deployed to a REST endpoint. + + * score_images_spark.py + Score an image or a directory of images using model deployed to Spark. + + + +Running this Example +^^^^^^^^^^^^^^^^^^^^ + +To train the model, run the example as a standard MLflow project: + + +.. code-block:: bash + + mlflow run examples/flower_classifier + +This will download the training dataset from ``tensorflow.org``, train a classifier using Keras and +log results with Mlflow. + +To test your model, run the included scoring scripts. For example, say your model was trained with +run_id ``101``. + +- To test REST api scoring do the following two steps: + + 1. Deploy the model as a local REST endpoint by running ``mlflow models serve``: + + .. code-block:: bash + + # deploy the model to local REST api endpoint + mlflow models serve --model-uri runs:/101/model --port 54321 + + + 2. Apply the model to new data using the provided score_images_rest.py script: + + .. code-block:: bash + + # score the deployed model + python score_images_rest.py --model-uri runs:/101/model --port 54321 http://127.0.0.1 --data-path /path/to/images/for/scoring + + +- To test batch scoring in Spark, run score_images_spark.py to score the model in Spark like this: + +.. code-block:: bash + + python score_images_spark.py --model-uri runs:/101/model --data-path /path/to/images/for/scoring + + + + + + + diff --git a/examples/flower_classifier/conda.yaml b/examples/flower_classifier/conda.yaml new file mode 100644 index 0000000000000..82ba15e23929a --- /dev/null +++ b/examples/flower_classifier/conda.yaml @@ -0,0 +1,13 @@ +name: flower_classifier +channels: + - defaults + - anaconda +dependencies: + - python==3.7 + - pandas + - scikit-learn + - tensorflow-mkl + - keras + - pip: + - mlflow>=1.0 + - pillow diff --git a/examples/flower_classifier/image_pyfunc.py b/examples/flower_classifier/image_pyfunc.py new file mode 100644 index 0000000000000..c8dd03560279b --- /dev/null +++ b/examples/flower_classifier/image_pyfunc.py @@ -0,0 +1,170 @@ +""" +Example of a custom python function implementing image classifier with image preprocessing embedded +in the model. +""" +import base64 +from io import BytesIO +import keras +import numpy as np +import os +import pandas as pd +import PIL +from PIL import Image +import yaml +import tensorflow as tf + +import mlflow +import mlflow.keras +from mlflow.utils import PYTHON_VERSION +from mlflow.utils.file_utils import TempDir +from mlflow.utils.environment import _mlflow_conda_env + + +def decode_and_resize_image(raw_bytes, size): + """ + Read, decode and resize raw image bytes (e.g. raw content of a jpeg file). + + :param raw_bytes: Image bits, e.g. jpeg image. + :param size: requested output dimensions + :return: Multidimensional numpy array representing the resized image. + """ + return np.asarray(Image.open(BytesIO(raw_bytes)).resize(size), dtype=np.float32) + + +class KerasImageClassifierPyfunc(object): + """ + Image classification model with embedded pre-processing. + + This class is essentially an MLflow custom python function wrapper around a Keras model. + The wrapper provides image preprocessing so that the model can be applied to images directly. + The input to the model is base64 encoded image binary data (e.g. contents of a jpeg file). + The output is the predicted class label, predicted class id followed by probabilities for each + class. + + The model declares current local versions of Keras, Tensorlow and pillow as dependencies in its + conda environment file. + """ + + def __init__(self, graph, session, model, image_dims, domain): + self._graph = graph + self._session = session + self._model = model + self._image_dims = image_dims + self._domain = domain + probs_names = ["p({})".format(x) for x in domain] + self._column_names = ["predicted_label", "predicted_label_id"] + probs_names + + def predict(self, input): + """ + Generate predictions for the data. + + :param input: pandas.DataFrame with one column containing images to be scored. The image + column must contain base64 encoded binary content of the image files. The image + format must be supported by PIL (e.g. jpeg or png). + + :return: pandas.DataFrame containing predictions with the following schema: + Predicted class: string, + Predicted class index: int, + Probability(class==0): float, + ..., + Probability(class==N): float, + """ + + # decode image bytes from base64 encoding + def decode_img(x): + return pd.Series(base64.decodebytes(bytearray(x[0], encoding="utf8"))) + + images = input.apply(axis=1, func=decode_img) + probs = self._predict_images(images) + m, n = probs.shape + label_idx = np.argmax(probs, axis=1) + labels = np.array([self._domain[i] for i in label_idx], dtype=np.str).reshape(m, 1) + output_data = np.concatenate((labels, label_idx.reshape(m, 1), probs), axis=1) + res = pd.DataFrame(columns=self._column_names, data=output_data) + res.index = input.index + return res + + def _predict_images(self, images): + """ + Generate predictions for input images. + :param images: binary image data + :return: predicted probabilities for each class + """ + + def preprocess_f(z): + return decode_and_resize_image(z, self._image_dims[:2]) + + x = np.array( + images[images.columns[0]].apply(preprocess_f).tolist()) + with self._graph.as_default(): + with self._session.as_default(): + return self._model.predict(x) + + +def log_model(keras_model, artifact_path, image_dims, domain): + """ + Log a KerasImageClassifierPyfunc model as an MLflow artifact for the current run. + + :param keras_model: Keras model to be saved. + :param artifact_path: Run-relative artifact path this model is to be saved to. + :param image_dims: Image dimensions the Keras model expects. + :param domain: Labels for the classes this model can predict. + """ + + with TempDir() as tmp: + data_path = tmp.path("image_model") + os.mkdir(data_path) + conf = { + "image_dims": "/".join(map(str, image_dims)), + "domain": "/".join(map(str, domain)) + } + with open(os.path.join(data_path, "conf.yaml"), "w") as f: + yaml.safe_dump(conf, stream=f) + keras_path = os.path.join(data_path, "keras_model") + mlflow.keras.save_model(keras_model, path=keras_path) + conda_env = tmp.path("conda_env.yaml") + with open(conda_env, "w") as f: + f.write(conda_env_template.format(python_version=PYTHON_VERSION, + keras_version=keras.__version__, + tf_name=tf.__name__, # can have optional -gpu suffix + tf_version=tf.__version__, + pillow_version=PIL.__version__)) + + mlflow.pyfunc.log_model(artifact_path=artifact_path, + loader_module=__name__, + code_path=[__file__], + data_path=data_path, + conda_env=conda_env) + + +def _load_pyfunc(path): + """ + Load the KerasImageClassifierPyfunc model. + """ + with open(os.path.join(path, "conf.yaml"), "r") as f: + conf = yaml.safe_load(f) + keras_model_path = os.path.join(path, "keras_model") + domain = conf["domain"].split("/") + image_dims = np.array([int(x) for x in conf["image_dims"].split("/")], dtype=np.int32) + # NOTE: TensorFlow based models depend on global state (Graph and Session) given by the context. + # To make sure we score the model in the same session as we loaded it in, we create a new + # session and a new graph here and store them with the model. + with tf.Graph().as_default() as g: + with tf.Session().as_default() as sess: + keras.backend.set_session(sess) + keras_model = mlflow.keras.load_model(keras_model_path) + return KerasImageClassifierPyfunc(g, sess, keras_model, image_dims, domain=domain) + + +conda_env_template = """ +name: flower_classifier +channels: + - defaults + - anaconda +dependencies: + - python=={python_version} + - keras=={keras_version} + - {tf_name}=={tf_version} + - pip: + - pillow=={pillow_version} +""" diff --git a/examples/flower_classifier/score_images_rest.py b/examples/flower_classifier/score_images_rest.py new file mode 100644 index 0000000000000..0c6da2653ed65 --- /dev/null +++ b/examples/flower_classifier/score_images_rest.py @@ -0,0 +1,64 @@ +""" +Example of scoring images with MLflow model deployed to a REST API endpoint. + +The MLflow model to be scored is expected to be an instance of KerasImageClassifierPyfunc +(e.g. produced by running this project) and deployed with MLflow prior to invoking this script. +""" +import os +import base64 +import requests + +import click +import pandas as pd + +from mlflow.utils import cli_args + + +def score_model(path, uri, port): + """ + Score images on the local path with MLflow model deployed at given uri and port. + + :param path: Path to a single image file or a directory of images. + :param uri: URI the model is deployed at + :param port: Port the model is deployed at. + :return: Server response. + """ + if os.path.isdir(path): + filenames = [os.path.join(path, x) for x in os.listdir(path) + if os.path.isfile(os.path.join(path, x))] + else: + filenames = [path] + + def read_image(x): + with open(x, "rb") as f: + return f.read() + + data = pd.DataFrame(data=[base64.encodebytes(read_image(x)) for x in filenames], + columns=["image"]).to_json(orient="split") + + response = requests.post(url='{uri}:{port}/invocations'.format(uri=uri, port=port), + data=data, + headers={"Content-Type": "application/json; format=pandas-split"}) + + if response.status_code != 200: + raise Exception("Status Code {status_code}. {text}".format( + status_code=response.status_code, + text=response.text + )) + return response + + +@click.command(help="Score images.") +@click.option("--port", type=click.INT, default=80, help="Port at which the model is deployed.") +@cli_args.MODEL_URI +@click.argument("--data-path", "-d") +def run(data_path, model_uri, port): + """ + Score images with MLflow deployed deployed at given uri and port and print out the response + to standard out. + """ + print(score_model(data_path, model_uri, port).text) + + +if __name__ == '__main__': + run() diff --git a/examples/flower_classifier/score_images_spark.py b/examples/flower_classifier/score_images_spark.py new file mode 100644 index 0000000000000..2f24bd24d155d --- /dev/null +++ b/examples/flower_classifier/score_images_spark.py @@ -0,0 +1,85 @@ +""" +Example of scoring images with MLflow model produced by running this project in Spark. + +The MLflow model is loaded to Spark using ``mlflow.pyfunc.spark_udf``. The images are read as binary +data and represented as base64 encoded string column and passed to the model. The results are +returned as a column with predicted class label, class id and probabilities for each class encoded +as an array of strings. + +""" +import os +import base64 + +import click + +import pyspark + +import mlflow +import mlflow.pyfunc +from mlflow.utils import cli_args + +from pyspark.sql.types import * +from pyspark.sql.types import Row + +import pandas as pd + + +def read_image_bytes_base64(path): + with open(path, "rb") as f: + return str(base64.encodebytes(f.read()), encoding="utf8") + + +def read_images(spark, filenames): + filenames_rdd = spark.sparkContext.parallelize(filenames) + schema = StructType( + [StructField("filename", StringType(), True), StructField("image", StringType(), True)]) + return filenames_rdd.map(lambda x: Row(filename=x, + image=read_image_bytes_base64(x))).toDF(schema=schema) + + +def score_model(spark, data_path, model_uri): + if os.path.isdir(data_path): + filenames = [os.path.abspath(os.path.join(data_path, x)) for x in os.listdir(data_path) + if os.path.isfile(os.path.join(data_path, x))] + else: + filenames = [data_path] + + image_classifier_udf = mlflow.pyfunc.spark_udf(spark=spark, + model_uri=model_uri, + result_type=ArrayType(StringType())) + + image_df = read_images(spark, filenames) + + raw_preds = image_df.withColumn("prediction", image_classifier_udf("image")).select( + ["filename", "prediction"]).toPandas() + # load the pyfunc model to get our domain + pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=model_uri) + preds = pd.DataFrame(raw_preds["filename"], index=raw_preds.index) + preds[pyfunc_model._column_names] = pd.DataFrame(raw_preds['prediction'].values.tolist(), + columns=pyfunc_model._column_names, + index=raw_preds.index) + + preds = pd.DataFrame(raw_preds["filename"], index=raw_preds.index) + + preds[pyfunc_model._column_names] = pd.DataFrame(raw_preds['prediction'].values.tolist(), + columns=pyfunc_model._column_names, + index=raw_preds.index) + return preds.to_json(orient='records') + + +@click.command(help="Score images.") +@cli_args.MODEL_URI +@click.argument("--data-path", "-d") +def run(model_uri, data_path): + with pyspark.sql.SparkSession.builder \ + .config(key="spark.python.worker.reuse", value=True) \ + .config(key="spark.ui.enabled", value=False) \ + .master("local-cluster[2, 1, 1024]") \ + .getOrCreate() as spark: + # ignore spark log output + spark.sparkContext.setLogLevel("OFF") + print(score_model(spark, data_path, model_uri)) + + +if __name__ == '__main__': + run() diff --git a/examples/flower_classifier/train.py b/examples/flower_classifier/train.py new file mode 100644 index 0000000000000..6dcb5e0eb15b9 --- /dev/null +++ b/examples/flower_classifier/train.py @@ -0,0 +1,227 @@ +""" +Example of image classification with MLflow using Keras to classify flowers from photos. The data is +taken from ``http://download.tensorflow.org/example_images/flower_photos.tgz`` and may be +downloaded during running this project if it is missing. +""" +import math +import os + +import click +import keras +from keras.utils import np_utils +from keras.models import Model +from keras.callbacks import Callback +from keras.applications import vgg16 +from keras.layers import Input, Dense, Flatten, Lambda +import numpy as np +from sklearn.model_selection import train_test_split +import tensorflow as tf + +import mlflow + +from image_pyfunc import decode_and_resize_image, log_model, KerasImageClassifierPyfunc + + +def download_input(): + import requests + url = 'http://download.tensorflow.org/example_images/flower_photos.tgz' + print("downloading '{}' into '{}'".format(url, os.path.abspath("flower_photos.tgz"))) + r = requests.get(url) + with open('flower_photos.tgz', 'wb') as f: + f.write(r.content) + import tarfile + print("decompressing flower_photos.tgz to '{}'".format(os.path.abspath("flower_photos"))) + with tarfile.open("flower_photos.tgz") as tar: + tar.extractall(path="./") + + +@click.command(help="Trains an Keras model on flower_photos dataset." + "The input is expected as a directory tree with pictures for each category in a" + " folder named by the category." + "The model and its metrics are logged with mlflow.") +@click.option("--epochs", type=click.INT, default=1, help="Maximum number of epochs to evaluate.") +@click.option("--batch-size", type=click.INT, default=1, + help="Batch size passed to the learning algo.") +@click.option("--image-width", type=click.INT, default=224, help="Input image width in pixels.") +@click.option("--image-height", type=click.INT, default=224, help="Input image height in pixels.") +@click.option("--seed", type=click.INT, default=97531, help="Seed for the random generator.") +@click.option("--training-data") +@click.option("--test-ratio", type=click.FLOAT, default=0.2) +def run(training_data, test_ratio, epochs, batch_size, image_width, image_height, seed): + image_files = [] + labels = [] + domain = {} + print("Training model with the following parameters:") + for param, value in locals().items(): + print(" ", param, "=", value) + + if training_data == "./flower_photos" and not os.path.exists(training_data): + print("Input data not found, attempting to download the data from the web.") + download_input() + + for (dirname, _, files) in os.walk(training_data): + for filename in files: + if filename.endswith("jpg"): + image_files.append(os.path.join(dirname, filename)) + clazz = os.path.basename(dirname) + if clazz not in domain: + domain[clazz] = len(domain) + labels.append(domain[clazz]) + + train(image_files, labels, domain, + epochs=epochs, + test_ratio=test_ratio, + batch_size=batch_size, + image_width=image_width, + image_height=image_height, + seed=seed) + + +class MLflowLogger(Callback): + """ + Keras callback for logging metrics and final model with MLflow. + + Metrics are logged after every epoch. The logger keeps track of the best model based on the + validation metric. At the end of the training, the best model is logged with MLflow. + """ + def __init__(self, model, x_train, y_train, x_valid, y_valid, + **kwargs): + self._model = model + self._best_val_loss = math.inf + self._train = (x_train, y_train) + self._valid = (x_valid, y_valid) + self._pyfunc_params = kwargs + self._best_weights = None + + def on_epoch_end(self, epoch, logs=None): + """ + Log Keras metrics with MLflow. Update the best model if the model improved on the validation + data. + """ + if not logs: + return + for name, value in logs.items(): + if name.startswith("val_"): + name = "valid_" + name[4:] + else: + name = "train_" + name + mlflow.log_metric(name, value) + val_loss = logs["val_loss"] + if val_loss < self._best_val_loss: + # Save the "best" weights + self._best_val_loss = val_loss + self._best_weights = [x.copy() for x in self._model.get_weights()] + + def on_train_end(self, *args, **kwargs): + """ + Log the best model with MLflow and evaluate it on the train and validation data so that the + metrics stored with MLflow reflect the logged model. + """ + self._model.set_weights(self._best_weights) + x, y = self._train + train_res = self._model.evaluate(x=x, y=y) + for name, value in zip(self._model.metrics_names, train_res): + mlflow.log_metric("train_{}".format(name), value) + x, y = self._valid + valid_res = self._model.evaluate(x=x, y=y) + for name, value in zip(self._model.metrics_names, valid_res): + mlflow.log_metric("valid_{}".format(name), value) + log_model(keras_model=self._model, **self._pyfunc_params) + + +def _imagenet_preprocess_tf(x): + return (x / 127.5) - 1 + + +def _create_model(input_shape, classes): + image = Input(input_shape) + lambda_layer = Lambda(_imagenet_preprocess_tf) + preprocessed_image = lambda_layer(image) + model = vgg16.VGG16(classes=classes, + input_tensor=preprocessed_image, + weights=None, + include_top=False) + + x = Flatten(name='flatten')(model.output) + x = Dense(4096, activation='relu', name='fc1')(x) + x = Dense(4096, activation='relu', name='fc2')(x) + x = Dense(classes, activation='softmax', name='predictions')(x) + return Model(inputs=model.input, outputs=x) + + +def train(image_files, + labels, + domain, + image_width=224, + image_height=224, + epochs=1, + batch_size=16, + test_ratio=0.2, + seed=None): + """ + Train VGG16 model on provided image files. This will create a new MLflow run and log all + parameters, metrics and the resulting model with MLflow. The resulting model is an instance + of KerasImageClassifierPyfunc - a custom python function model that embeds all necessary + preprocessing together with the VGG16 Keras model. The resulting model can be applied + directly to image base64 encoded image data. + + :param image_height: Height of the input image in pixels. + :param image_width: Width of the input image in pixels. + :param image_files: List of image files to be used for training. + :param labels: List of labels for the image files. + :param domain: Dictionary representing the domain of the reponse. + Provides mapping label-name -> label-id. + :param epochs: Number of epochs to train the model for. + :param batch_size: Batch size used during training. + :param test_ratio: Fraction of dataset to be used for validation. This data will not be used + during training. + :param seed: Random seed. Used e.g. when splitting the dataset into train / validation. + """ + assert len(set(labels)) == len(domain) + + input_shape = (image_width, image_height, 3) + + with mlflow.start_run() as run: + mlflow.log_param("epochs", str(epochs)) + mlflow.log_param("batch_size", str(batch_size)) + mlflow.log_param("validation_ratio", str(test_ratio)) + if seed: + mlflow.log_param("seed", str(seed)) + + def _read_image(filename): + with open(filename, "rb") as f: + return f.read() + + with tf.Graph().as_default() as g: + with tf.Session(graph=g).as_default(): + dims = input_shape[:2] + x = np.array([decode_and_resize_image(_read_image(x), dims) + for x in image_files]) + y = np_utils.to_categorical(np.array(labels), num_classes=len(domain)) + train_size = 1 - test_ratio + x_train, x_valid, y_train, y_valid = train_test_split(x, y, random_state=seed, + train_size=train_size) + model = _create_model(input_shape=input_shape, classes=len(domain)) + model.compile( + optimizer=keras.optimizers.SGD(decay=1e-5, nesterov=True, momentum=.9), + loss=keras.losses.categorical_crossentropy, + metrics=["accuracy"]) + sorted_domain = sorted(domain.keys(), key=lambda x: domain[x]) + model.fit( + x=x_train, + y=y_train, + validation_data=(x_valid, y_valid), + epochs=epochs, + batch_size=batch_size, + callbacks=[MLflowLogger(model=model, + x_train=x_train, + y_train=y_train, + x_valid=x_valid, + y_valid=y_valid, + artifact_path="model", + domain=sorted_domain, + image_dims=input_shape)]) + + +if __name__ == '__main__': + run() diff --git a/examples/h2o/MLproject b/examples/h2o/MLproject new file mode 100644 index 0000000000000..4b6c3d926699a --- /dev/null +++ b/examples/h2o/MLproject @@ -0,0 +1,6 @@ +name: h2o-example +conda_env: conda.yaml + +entry_points: + main: + command: "python random_forest.py" diff --git a/examples/h2o/conda.yaml b/examples/h2o/conda.yaml new file mode 100644 index 0000000000000..3a2d2569e6c84 --- /dev/null +++ b/examples/h2o/conda.yaml @@ -0,0 +1,11 @@ +name: h2o_example +channels: + - defaults + - anaconda +dependencies: + - python==3.6 + - numpy==1.14.2 + - pandas + - pip: + - h2o + - mlflow>=1.0 diff --git a/examples/h2o/random_forest.ipynb b/examples/h2o/random_forest.ipynb index dcab9b7d8f662..973fd7e918398 100644 --- a/examples/h2o/random_forest.ipynb +++ b/examples/h2o/random_forest.ipynb @@ -9,37 +9,44 @@ "name": "stdout", "output_type": "stream", "text": [ - "Checking whether there is an H2O instance running at http://localhost:54321. connected.\n", - "Warning: Your H2O cluster version is too old (4 months and 14 days)! Please download and install the latest version from http://h2o.ai/download/\n" + "Checking whether there is an H2O instance running at http://localhost:54321..... not found.\n", + "Attempting to start a local H2O server...\n", + " Java Version: openjdk version \"1.8.0_181\"; OpenJDK Runtime Environment (build 1.8.0_181-8u181-b13-2~deb9u1-b13); OpenJDK 64-Bit Server VM (build 25.181-b13, mixed mode)\n", + " Starting server from /opt/conda/lib/python2.7/site-packages/h2o/backend/bin/h2o.jar\n", + " Ice root: /tmp/tmpz8qTmm\n", + " JVM stdout: /tmp/tmpz8qTmm/h2o_unknownUser_started_from_python.out\n", + " JVM stderr: /tmp/tmpz8qTmm/h2o_unknownUser_started_from_python.err\n", + " Server is running at http://127.0.0.1:54321\n", + "Connecting to H2O server at http://127.0.0.1:54321... successful.\n" ] }, { "data": { "text/html": [ "
\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", @@ -47,26 +54,26 @@ "\n", "\n", "\n", - "
H2O cluster uptime:53 mins 41 secs
01 secs
H2O cluster timezone:Europe/Brussels
Etc/UTC
H2O data parsing timezone:UTC
H2O cluster version:3.18.0.2
3.22.1.1
H2O cluster version age:4 months and 14 days !!!
23 days
H2O cluster name:H2O_from_python_toon_2s41u5
H2O_from_python_unknownUser_ukj9f9
H2O cluster total nodes:1
H2O cluster free memory:3.204 Gb
3.042 Gb
H2O cluster total cores:8
7
H2O cluster allowed cores:8
7
H2O cluster status:locked, healthy
accepting new members, healthy
H2O connection url:http://localhost:54321
http://127.0.0.1:54321
H2O connection proxy:None
H2O internal security:
H2O API Extensions:XGBoost, Algos, AutoML, Core V3, Core V4
Python version:3.6.5 final
" + "2.7.15 final" ], "text/plain": [ "-------------------------- ----------------------------------------\n", - "H2O cluster uptime: 53 mins 41 secs\n", - "H2O cluster timezone: Europe/Brussels\n", + "H2O cluster uptime: 01 secs\n", + "H2O cluster timezone: Etc/UTC\n", "H2O data parsing timezone: UTC\n", - "H2O cluster version: 3.18.0.2\n", - "H2O cluster version age: 4 months and 14 days !!!\n", - "H2O cluster name: H2O_from_python_toon_2s41u5\n", + "H2O cluster version: 3.22.1.1\n", + "H2O cluster version age: 23 days\n", + "H2O cluster name: H2O_from_python_unknownUser_ukj9f9\n", "H2O cluster total nodes: 1\n", - "H2O cluster free memory: 3.204 Gb\n", - "H2O cluster total cores: 8\n", - "H2O cluster allowed cores: 8\n", - "H2O cluster status: locked, healthy\n", - "H2O connection url: http://localhost:54321\n", + "H2O cluster free memory: 3.042 Gb\n", + "H2O cluster total cores: 7\n", + "H2O cluster allowed cores: 7\n", + "H2O cluster status: accepting new members, healthy\n", + "H2O connection url: http://127.0.0.1:54321\n", "H2O connection proxy:\n", "H2O internal security: False\n", "H2O API Extensions: XGBoost, Algos, AutoML, Core V3, Core V4\n", - "Python version: 3.6.5 final\n", + "Python version: 2.7.15 final\n", "-------------------------- ----------------------------------------" ] }, @@ -101,7 +108,7 @@ "metadata": {}, "outputs": [], "source": [ - "def trainRandomForest(ntrees):\n", + "def train_random_forest(ntrees):\n", " with mlflow.start_run():\n", " rf = H2ORandomForestEstimator(ntrees=ntrees)\n", " train_cols = [n for n in wine.col_names if n != \"quality\"]\n", @@ -135,7 +142,7 @@ ], "source": [ "for ntrees in [10, 20, 50, 100, 200]:\n", - " trainRandomForest(ntrees)" + " train_random_forest(ntrees)" ] }, { @@ -155,7 +162,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 5, @@ -177,21 +184,21 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 2", "language": "python", - "name": "python3" + "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 3 + "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" + "pygments_lexer": "ipython2", + "version": "2.7.15" } }, "nbformat": 4, diff --git a/examples/h2o/random_forest.py b/examples/h2o/random_forest.py index f176efafa70f3..c32ff928c4316 100644 --- a/examples/h2o/random_forest.py +++ b/examples/h2o/random_forest.py @@ -29,4 +29,4 @@ def train_random_forest(ntrees): if __name__ == "__main__": for ntrees in [10, 20, 50, 100, 200]: - trainRandomForest(ntrees) + train_random_forest(ntrees) diff --git a/examples/hyperparam/MLproject b/examples/hyperparam/MLproject index 1bf1ae369820d..bb4e519c9c72b 100644 --- a/examples/hyperparam/MLproject +++ b/examples/hyperparam/MLproject @@ -27,14 +27,13 @@ entry_points: epochs: {type: int, default: 32} metric: {type: string, default: "rmse"} seed: {type: int, default: 97531} - training_experiment_id: {type: int, default: "-1"} command: "python search_random.py {training_data} --max-runs {max_runs} --max-p {max_p} --epochs {epochs} --metric {metric} - --seed {seed} - --training-experiment-id {training_experiment_id}" + --seed {seed}" + # Use GPyOpt to optimize hyperparams of the train entry_point. @@ -50,7 +49,6 @@ entry_points: gpy_acquisition: {type: string, default: "EI"} initial_design: {type: string, default: "random"} seed: {type: int, default: 97531} - training_experiment_id: {type: int, default: "-1"} command: "python search_gpyopt.py {training_data} --max-runs {max_runs} @@ -61,8 +59,7 @@ entry_points: --gpy-model {gpy_model} --gpy-acquisition {gpy_acquisition} --initial-design {initial_design} - --seed {seed} - --training-experiment-id {training_experiment_id}" + --seed {seed}" # Use Hyperopt to optimize hyperparams of the train entry_point. hyperopt: @@ -73,15 +70,17 @@ entry_points: metric: {type: string, default: "rmse"} algo: {type: string, default: "tpe.suggest"} seed: {type: int, default: 97531} - training_experiment_id: {type: int, default: "-1"} command: "python -O search_hyperopt.py {training_data} --max-runs {max_runs} --epochs {epochs} --metric {metric} --algo {algo} - --seed {seed} - --training-experiment-id {training_experiment_id}" + --seed {seed}" + main: + parameters: + training_data: {type: string, default: "../sklearn_elasticnet_wine/wine-quality.csv"} + command: "python search_random.py {training_data}" diff --git a/examples/hyperparam/README.rst b/examples/hyperparam/README.rst index 013572ef94a0b..1dc384a1cc1bf 100644 --- a/examples/hyperparam/README.rst +++ b/examples/hyperparam/README.rst @@ -25,48 +25,41 @@ examples/hyperparam/MLproject has 4 targets: * hyperopt use `Hyperopt `_ to optimize hyperparameters. -All the hyperparameter targets take an optional experiment ID for training runs. If provided, -training runs are logged under this experiment ID. This organizes the runs so that it is -easy to view individual training runs and the hyperparameter runs separately. - Running this Example ^^^^^^^^^^^^^^^^^^^^ You can run any of the targets as a standard MLflow run. -.. code:: bash +.. code-block:: bash - mlflow experiments create individual_runs + mlflow experiments create -n individual_runs Creates experiment for individual runs and return its experiment ID. -.. code:: bash +.. code-block:: bash - mlflow experiments create hyper_param_runs + mlflow experiments create -n hyper_param_runs Creates an experiment for hyperparam runs and return its experiment ID. -.. code:: bash +.. code-block:: bash - mlflow run -e train --experiment-id example/hyperparam + mlflow run -e train --experiment-id examples/hyperparam Runs the Keras deep learning training with default parameters and log it in experiment 1. -.. code:: bash +.. code-block:: bash - mlflow run -e random --experiment-id -P \ - training_experiment_id= example/hyperparam + mlflow run -e random --experiment-id -P examples/hyperparam -.. code:: bash +.. code-block:: bash - mlflow run -e gpyopt --experiment-id -P \ - training_experiment_id= example/hyperparam + mlflow run -e gpyopt --experiment-id -P examples/hyperparam -.. code:: bash +.. code-block:: bash - mlflow run -e hyperopt --experiment-id -P \ - training_experiment_id= example/hyperparam + mlflow run -e hyperopt --experiment-id -P examples/hyperparam Runs the hyperparameter tuning with either random search or GpyOpt or Hyperopt and log the results under ``hyperparam_experiment_id``. diff --git a/examples/hyperparam/conda.yaml b/examples/hyperparam/conda.yaml index ec060f42a01b7..4fad49dccb232 100644 --- a/examples/hyperparam/conda.yaml +++ b/examples/hyperparam/conda.yaml @@ -7,13 +7,12 @@ dependencies: - pandas=0.22.0 - scikit-learn=0.19.1 - matplotlib=2.2.2 + - tensorflow-mkl==1.13.1 + - keras==2.2.2 - pip: - - mlflow>=0.5.2 - - click==6.7 + - mlflow>=1.0 - Gpy==1.9.2 - GpyOpt==1.2.5 - - tensorflow==1.10.1 - - keras==2.2.2 - pyDOE==0.3.8 - hyperopt==0.1 diff --git a/examples/hyperparam/search_gpyopt.py b/examples/hyperparam/search_gpyopt.py index cfc42e6f708b9..8f8ffb93213c5 100644 --- a/examples/hyperparam/search_gpyopt.py +++ b/examples/hyperparam/search_gpyopt.py @@ -9,19 +9,23 @@ Several runs can be run in parallel. """ -import math import os -import shutil -import tempfile import click import GPyOpt +import matplotlib +from matplotlib import pyplot as plt +import numpy as np import mlflow import mlflow.sklearn import mlflow.tracking import mlflow.projects +from mlflow.tracking.client import MlflowClient +from mlflow.utils.file_utils import TempDir + +_inf = np.finfo(np.float64).max @click.command(help="Perform hyperparameter search with GPyOpt library." @@ -44,18 +48,14 @@ help="Optimizer algorhitm.") @click.option("--seed", type=click.INT, default=97531, help="Seed for the random generator") -@click.option("--training-experiment-id", type=click.INT, default=-1, - help="Maximum number of runs to evaluate. Inherit parent;s experiment if == -1.") @click.argument("training_data") def run(training_data, max_runs, batch_size, max_p, epochs, metric, gpy_model, gpy_acquisition, - initial_design, seed, training_experiment_id): + initial_design, seed): bounds = [ {'name': 'lr', 'type': 'continuous', 'domain': (1e-5, 1e-1)}, {'name': 'momentum', 'type': 'continuous', 'domain': (0.0, 1.0)}, ] # create random file to store run ids of the training tasks - tmp = tempfile.mkdtemp() - results_path = os.path.join(tmp, "results.txt") tracking_client = mlflow.tracking.MlflowClient() def new_eval(nepochs, @@ -89,33 +89,32 @@ def eval(params): :return: The metric value evaluated on the validation data. """ lr, momentum = params[0] - p = mlflow.projects.run( - uri=".", - entry_point="train", - parameters={ - "training_data": training_data, - "epochs": str(nepochs), - "learning_rate": str(lr), - "momentum": str(momentum), - "seed": str(seed)}, - experiment_id=experiment_id, - block=False - ) - - if p.wait(): + with mlflow.start_run(nested=True) as child_run: + p = mlflow.projects.run( + run_id=child_run.info.run_id, + uri=".", + entry_point="train", + parameters={ + "training_data": training_data, + "epochs": str(nepochs), + "learning_rate": str(lr), + "momentum": str(momentum), + "seed": str(seed)}, + experiment_id=experiment_id, + synchronous=False + ) + succeeded = p.wait() + if succeeded: training_run = tracking_client.get_run(p.run_id) - - def get_metric(metric_name): - return [m.value for m in training_run.data.metrics if m.key == metric_name][0] + metrics = training_run.data.metrics # cap the loss at the loss of the null model train_loss = min(null_valid_loss, - get_metric("train_{}".format(metric))) + metrics["train_{}".format(metric)]) valid_loss = min(null_valid_loss, - get_metric("val_{}".format(metric))) + metrics["val_{}".format(metric)]) test_loss = min(null_test_loss, - get_metric("test_{}".format(metric))) - + metrics["test_{}".format(metric)]) else: # run failed => return null loss tracking_client.set_terminated(p.run_id, "FAILED") @@ -123,14 +122,12 @@ def get_metric(metric_name): valid_loss = null_valid_loss test_loss = null_test_loss - mlflow.log_metric("train_{}".format(metric), valid_loss) - mlflow.log_metric("val_{}".format(metric), valid_loss) - mlflow.log_metric("test_{}".format(metric), test_loss) - with open(results_path, "a") as f: - f.write("{runId} {train} {val} {test}\n".format(runId=p.run_id, - train=train_loss, - val=valid_loss, - test=test_loss)) + mlflow.log_metrics({ + "train_{}".format(metric): train_loss, + "val_{}".format(metric): valid_loss, + "test_{}".format(metric): test_loss + }) + if return_all: return train_loss, valid_loss, test_loss else: @@ -139,18 +136,17 @@ def get_metric(metric_name): return eval with mlflow.start_run() as run: - experiment_id = run.info.experiment_id if training_experiment_id == -1 \ - else training_experiment_id + experiment_id = run.info.experiment_id # Evaluate null model first. # We use null model (predict everything to the mean) as a reasonable upper bound on loss. # We need an upper bound to handle the failed runs (e.g. return NaNs) because GPyOpt can not # handle Infs. - # Allways including a null model in our results is also a good ML practice. + # Always including a null model in our results is also a good ML practice. train_null_loss, valid_null_loss, test_null_loss = new_eval(0, experiment_id, - math.inf, - math.inf, - math.inf, + _inf, + _inf, + _inf, True)(params=[[0, 0]]) myProblem = GPyOpt.methods.BayesianOptimization(new_eval(epochs, experiment_id, @@ -170,42 +166,39 @@ def get_metric(metric_name): initial_design_numdata=max_runs >> 2, exact_feval=False) myProblem.run_optimization(max_runs) - import matplotlib matplotlib.use('agg') - from matplotlib import pyplot as plt plt.switch_backend('agg') - acquisition_plot = os.path.join(tmp, "acquisition_plot.png") - convergence_plot = os.path.join(tmp, "convergence_plot.png") - myProblem.plot_acquisition(filename=acquisition_plot) - myProblem.plot_convergence(filename=convergence_plot) - if os.path.exists(convergence_plot): - mlflow.log_artifact(convergence_plot, "converegence_plot") - if os.path.exists(acquisition_plot): - mlflow.log_artifact(acquisition_plot, "acquisition_plot") - best_val_train = math.inf - best_val_valid = math.inf - best_val_test = math.inf + with TempDir() as tmp: + acquisition_plot = tmp.path("acquisition_plot.png") + convergence_plot = tmp.path("convergence_plot.png") + myProblem.plot_acquisition(filename=acquisition_plot) + myProblem.plot_convergence(filename=convergence_plot) + if os.path.exists(convergence_plot): + mlflow.log_artifact(convergence_plot, "converegence_plot") + if os.path.exists(acquisition_plot): + mlflow.log_artifact(acquisition_plot, "acquisition_plot") + + # find the best run, log its metrics as the final metrics of this run. + client = MlflowClient() + runs = client.search_runs([experiment_id], "tags.mlflow.parentRunId = '{run_id}' ".format( + run_id=run.info.run_id + )) + best_val_train = _inf + best_val_valid = _inf + best_val_test = _inf best_run = None - # we do not have tags yet, for now store the list of executed runs as an artifact - mlflow.log_artifact(results_path, "training_runs") - with open(results_path) as f: - for line in f.readlines(): - run_id, str_val, str_val2, str_val3 = line.split(" ") - val = float(str_val2) - if val < best_val_valid: - best_val_train = float(str_val) - best_val_valid = val - best_val_test = float(str_val3) - best_run = run_id - # record which run produced the best results, store it as a param for now - best_run_path = os.path.join(os.path.join(tmp, "best_run.txt")) - with open(best_run_path, "w") as f: - f.write("{run_id} {val}\n".format(run_id=best_run, val=best_val_valid)) - mlflow.log_artifact(best_run_path, "best-run") - mlflow.log_metric("train_{}".format(metric), best_val_train) - mlflow.log_metric("val_{}".format(metric), best_val_valid) - mlflow.log_metric("test_{}".format(metric), best_val_test) - shutil.rmtree(tmp) + for r in runs: + if r.data.metrics["val_rmse"] < best_val_valid: + best_run = r + best_val_train = r.data.metrics["train_rmse"] + best_val_valid = r.data.metrics["val_rmse"] + best_val_test = r.data.metrics["test_rmse"] + mlflow.set_tag("best_run", best_run.info.run_id) + mlflow.log_metrics({ + "train_{}".format(metric): best_val_train, + "val_{}".format(metric): best_val_valid, + "test_{}".format(metric): best_val_test + }) if __name__ == '__main__': diff --git a/examples/hyperparam/search_hyperopt.py b/examples/hyperparam/search_hyperopt.py index b0ad2611a4763..63001b6429330 100644 --- a/examples/hyperparam/search_hyperopt.py +++ b/examples/hyperparam/search_hyperopt.py @@ -12,15 +12,14 @@ """ import click -import math - -import os -import shutil -import tempfile +import numpy as np from hyperopt import fmin, hp, tpe, rand import mlflow.projects +from mlflow.tracking.client import MlflowClient + +_inf = np.finfo(np.float64).max @click.command(help="Perform hyperparameter search with Hyperopt library." @@ -35,16 +34,12 @@ help="Optimizer algorhitm.") @click.option("--seed", type=click.INT, default=97531, help="Seed for the random generator") -@click.option("--training-experiment-id", type=click.INT, default=-1, - help="Maximum number of runs to evaluate. Inherit parent;s experiment if == -1.") @click.argument("training_data") -def train(training_data, max_runs, epochs, metric, algo, seed, training_experiment_id): +def train(training_data, max_runs, epochs, metric, algo, seed): """ Run hyperparameter optimization. """ # create random file to store run ids of the training tasks - tmp = tempfile.mkdtemp() - results_path = os.path.join(tmp, "results") tracking_client = mlflow.tracking.MlflowClient() def new_eval(nepochs, @@ -79,31 +74,31 @@ def eval(params): """ import mlflow.tracking lr, momentum = params - p = mlflow.projects.run( - uri=".", - entry_point="train", - parameters={ - "training_data": training_data, - "epochs": str(nepochs), - "learning_rate": str(lr), - "momentum": str(momentum), - "seed": seed}, - experiment_id=experiment_id - ) - - if p.wait(): + with mlflow.start_run(nested=True) as child_run: + p = mlflow.projects.run( + uri=".", + entry_point="train", + run_id=child_run.info.run_id, + parameters={ + "training_data": training_data, + "epochs": str(nepochs), + "learning_rate": str(lr), + "momentum": str(momentum), + "seed": seed}, + experiment_id=experiment_id, + use_conda=False # We are already in the environment + ) + succeded = p.wait() + if succeded: training_run = tracking_client.get_run(p.run_id) - - def get_metric(metric_name): - return [m.value for m in training_run.data.metrics if m.key == metric_name][0] - + metrics = training_run.data.metrics # cap the loss at the loss of the null model train_loss = min(null_train_loss, - get_metric("train_{}".format(metric))) + metrics["train_{}".format(metric)]) valid_loss = min(null_valid_loss, - get_metric("val_{}".format(metric))) + metrics["val_{}".format(metric)]) test_loss = min(null_test_loss, - get_metric("test_{}".format(metric))) + metrics["test_{}".format(metric)]) else: # run failed => return null loss tracking_client.set_terminated(p.run_id, "FAILED") @@ -111,15 +106,12 @@ def get_metric(metric_name): valid_loss = null_valid_loss test_loss = null_test_loss - mlflow.log_metric("train_{}".format(metric), train_loss) - mlflow.log_metric("val_{}".format(metric), valid_loss) - mlflow.log_metric("test_{}".format(metric), test_loss) + mlflow.log_metrics({ + "train_{}".format(metric): train_loss, + "val_{}".format(metric): valid_loss, + "test_{}".format(metric): test_loss + }) - with open(results_path, "a") as f: - f.write("{runId} {train} {val} {test}\n".format(runId=p.run_id, - train=train_loss, - val=valid_loss, - test=test_loss)) if return_all: return train_loss, valid_loss, test_loss else: @@ -133,14 +125,13 @@ def get_metric(metric_name): ] with mlflow.start_run() as run: - experiment_id = run.info.experiment_id if training_experiment_id == -1 \ - else training_experiment_id + experiment_id = run.info.experiment_id # Evaluate null model first. train_null_loss, valid_null_loss, test_null_loss = new_eval(0, experiment_id, - math.inf, - math.inf, - math.inf, + _inf, + _inf, + _inf, True)(params=[0, 0]) best = fmin(fn=new_eval(epochs, experiment_id, @@ -150,31 +141,28 @@ def get_metric(metric_name): space=space, algo=tpe.suggest if algo == "tpe.suggest" else rand.suggest, max_evals=max_runs) - print("best", best) - best_val_train = math.inf - best_val_valid = math.inf - best_val_test = math.inf + mlflow.set_tag("best params", str(best)) + # find the best run, log its metrics as the final metrics of this run. + client = MlflowClient() + runs = client.search_runs([experiment_id], "tags.mlflow.parentRunId = '{run_id}' ".format( + run_id=run.info.run_id + )) + best_val_train = _inf + best_val_valid = _inf + best_val_test = _inf best_run = None - # we do not have tags yet, for now store list of executed runs as an artifact - mlflow.log_artifact(results_path, "training_runs") - with open(results_path) as f: - for line in f.readlines(): - run_id, str_val, str_val2, str_val3 = line.split(" ") - val = float(str_val2) - if val < best_val_valid: - best_val_train = float(str_val) - best_val_valid = val - best_val_test = float(str_val3) - best_run = run_id - # record which run produced the best results, store it as a param for now - best_run_path = os.path.join(os.path.join(tmp, "best_run.txt")) - with open(best_run_path, "w") as f: - f.write("{run_id} {val}\n".format(run_id=best_run, val=best_val_valid)) - mlflow.log_artifact(best_run_path, "best-run") - mlflow.log_metric("train_{}".format(metric), best_val_train) - mlflow.log_metric("val_{}".format(metric), best_val_valid) - mlflow.log_metric("test_{}".format(metric), best_val_test) - shutil.rmtree(tmp) + for r in runs: + if r.data.metrics["val_rmse"] < best_val_valid: + best_run = r + best_val_train = r.data.metrics["train_rmse"] + best_val_valid = r.data.metrics["val_rmse"] + best_val_test = r.data.metrics["test_rmse"] + mlflow.set_tag("best_run", best_run.info.run_id) + mlflow.log_metrics({ + "train_{}".format(metric): best_val_train, + "val_{}".format(metric): best_val_valid, + "test_{}".format(metric): best_val_test + }) if __name__ == '__main__': diff --git a/examples/hyperparam/search_random.py b/examples/hyperparam/search_random.py index 31ccc19d905d3..10aefdbf73239 100644 --- a/examples/hyperparam/search_random.py +++ b/examples/hyperparam/search_random.py @@ -9,11 +9,6 @@ Several runs can be run in parallel. """ -import math - -import os -import shutil -import tempfile from concurrent.futures import ThreadPoolExecutor import click @@ -24,6 +19,9 @@ import mlflow.sklearn import mlflow.tracking import mlflow.projects +from mlflow.tracking.client import MlflowClient + +_inf = np.finfo(np.float64).max @click.command(help="Perform grid search over train (main entry point).") @@ -37,10 +35,8 @@ help="Metric to optimize on.") @click.option("--seed", type=click.INT, default=97531, help="Seed for the random generator") -@click.option("--training-experiment-id", type=click.INT, default=-1, - help="Maximum number of runs to evaluate. Inherit parent;s experiment if == -1.") @click.argument("training_data") -def run(training_data, max_runs, max_p, epochs, metric, seed, training_experiment_id): +def run(training_data, max_runs, max_p, epochs, metric, seed): train_metric = "train_{}".format(metric) val_metric = "val_{}".format(metric) test_metric = "test_{}".format(metric) @@ -49,87 +45,81 @@ def run(training_data, max_runs, max_p, epochs, metric, seed, training_experimen def new_eval(nepochs, experiment_id, - null_train_loss=math.inf, - null_val_loss=math.inf, - null_test_loss=math.inf): + null_train_loss=_inf, + null_val_loss=_inf, + null_test_loss=_inf): def eval(parms): lr, momentum = parms - p = mlflow.projects.run( - uri=".", - entry_point="train", - parameters={ - "training_data": training_data, - "epochs": str(nepochs), - "learning_rate": str(lr), - "momentum": str(momentum), - "seed": str(seed)}, - experiment_id=experiment_id, - block=False) - if p.wait(): + with mlflow.start_run(nested=True) as child_run: + p = mlflow.projects.run( + run_id=child_run.info.run_id, + uri=".", + entry_point="train", + parameters={ + "training_data": training_data, + "epochs": str(nepochs), + "learning_rate": str(lr), + "momentum": str(momentum), + "seed": str(seed)}, + experiment_id=experiment_id, + synchronous=False) + succeeded = p.wait() + if succeeded: training_run = tracking_client.get_run(p.run_id) - - def get_metric(metric_name): - return [m.value for m in training_run.data.metrics if m.key == metric_name][0] - + metrics = training_run.data.metrics # cap the loss at the loss of the null model - train_loss = min(null_train_loss, get_metric(train_metric)) - val_loss = min(null_val_loss, get_metric(val_metric)) - test_loss = min(null_test_loss, get_metric(test_metric)) + train_loss = min(null_train_loss, metrics[train_metric]) + val_loss = min(null_val_loss, metrics[val_metric]) + test_loss = min(null_test_loss, metrics[test_metric]) else: # run failed => return null loss tracking_client.set_terminated(p.run_id, "FAILED") train_loss = null_train_loss val_loss = null_val_loss test_loss = null_test_loss - mlflow.log_metric(train_metric, train_loss) - mlflow.log_metric(val_metric, val_loss) - mlflow.log_metric(test_metric, test_loss) + mlflow.log_metrics({ + "train_{}".format(metric): train_loss, + "val_{}".format(metric): val_loss, + "test_{}".format(metric): test_loss + }) return p.run_id, train_loss, val_loss, test_loss return eval with mlflow.start_run() as run: - experiment_id = run.info.experiment_id if training_experiment_id == -1 \ - else training_experiment_id + experiment_id = run.info.experiment_id _, null_train_loss, null_val_loss, null_test_loss = new_eval(0, experiment_id)((0, 0)) runs = [(np.random.uniform(1e-5, 1e-1), np.random.uniform(0, 1.0)) for _ in range(max_runs)] - best_train_loss = math.inf - best_val_loss = math.inf - best_test_loss = math.inf - best_run = None with ThreadPoolExecutor(max_workers=max_p) as executor: - result = executor.map(new_eval(epochs, - experiment_id, - null_train_loss, - null_val_loss, - null_test_loss), - runs) - tmp = tempfile.mkdtemp() - results_file_path = os.path.join(str(tmp), "results.txt") - with open(results_file_path, "w") as f: - for res in result: - run_id, train_loss, val_loss, test_loss = res - if val_loss < best_val_loss: - best_run = run_id - best_train_loss = train_loss - best_val_loss = val_loss - best_test_loss = test_loss - f.write("{run_id} {train} {val} {test}\n".format(run_id=run_id, - train=train_loss, - val=val_loss, - test=test_loss)) - mlflow.log_artifact(results_file_path, "training_runs.txt") - # record which run produced the best results, store it as an artifact - best_run_path = os.path.join(os.path.join(tmp, "best_run.txt")) - with open(best_run_path, "w") as f: - f.write("{run_id} {train} {val} {test}\n".format(run_id=best_run, - train=best_train_loss, - val=best_val_loss, - test=best_test_loss)) - mlflow.log_artifact(best_run_path, "best-run") - mlflow.log_metric(val_metric, best_val_loss) - mlflow.log_metric(test_metric, best_test_loss) - shutil.rmtree(tmp) + _ = executor.map(new_eval(epochs, + experiment_id, + null_train_loss, + null_val_loss, + null_test_loss), + runs) + + # find the best run, log its metrics as the final metrics of this run. + client = MlflowClient() + runs = client.search_runs([experiment_id], + "tags.mlflow.parentRunId = '{run_id}' ".format( + run_id=run.info.run_id + )) + best_val_train = _inf + best_val_valid = _inf + best_val_test = _inf + best_run = None + for r in runs: + if r.data.metrics["val_rmse"] < best_val_valid: + best_run = r + best_val_train = r.data.metrics["train_rmse"] + best_val_valid = r.data.metrics["val_rmse"] + best_val_test = r.data.metrics["test_rmse"] + mlflow.set_tag("best_run", best_run.info.run_id) + mlflow.log_metrics({ + "train_{}".format(metric): best_val_train, + "val_{}".format(metric): best_val_valid, + "test_{}".format(metric): best_val_test + }) if __name__ == '__main__': diff --git a/examples/hyperparam/train.py b/examples/hyperparam/train.py index ff0318eb4e369..6db07b77fbcc9 100644 --- a/examples/hyperparam/train.py +++ b/examples/hyperparam/train.py @@ -29,9 +29,9 @@ import mlflow.keras -def eval_and_log_metrics(prefix, actual, pred): +def eval_and_log_metrics(prefix, actual, pred, epoch): rmse = np.sqrt(mean_squared_error(actual, pred)) - mlflow.log_metric("{}_rmse".format(prefix), rmse) + mlflow.log_metric("{}_rmse".format(prefix), rmse, step=epoch) return rmse @@ -61,6 +61,7 @@ def __init__(self, test_x, test_y, loss="rmse"): self._best_train_loss = math.inf self._best_val_loss = math.inf self._best_model = None + self._next_step = 0 def __enter__(self): return self @@ -71,8 +72,8 @@ def __exit__(self, exc_type, exc_val, exc_tb): """ if not self._best_model: raise Exception("Failed to build any model") - mlflow.log_metric(self.train_loss, self._best_train_loss) - mlflow.log_metric(self.val_loss, self._best_val_loss) + mlflow.log_metric(self.train_loss, self._best_train_loss, step=self._next_step) + mlflow.log_metric(self.val_loss, self._best_val_loss, step=self._next_step) mlflow.keras.log_model(self._best_model, "model") def on_epoch_end(self, epoch, logs=None): @@ -82,10 +83,13 @@ def on_epoch_end(self, epoch, logs=None): """ if not logs: return + self._next_step = epoch + 1 train_loss = logs["loss"] val_loss = logs["val_loss"] - mlflow.log_metric(self.train_loss, train_loss) - mlflow.log_metric(self.val_loss, val_loss) + mlflow.log_metrics({ + self.train_loss: train_loss, + self.val_loss: val_loss + }, step=epoch) if val_loss < self._best_val_loss: # The result improved in the validation set. @@ -94,8 +98,8 @@ def on_epoch_end(self, epoch, logs=None): self._best_val_loss = val_loss self._best_model = keras.models.clone_model(self.model) self._best_model.set_weights([x.copy() for x in self.model.get_weights()]) - preds = self._best_model.predict((self._test_x)) - eval_and_log_metrics("test", self._test_y, preds) + preds = self._best_model.predict(self._test_x) + eval_and_log_metrics("test", self._test_y, preds, epoch) @click.command(help="Trains an Keras model on wine-quality dataset." @@ -128,9 +132,10 @@ def run(training_data, epochs, batch_size, learning_rate, momentum, seed): with mlflow.start_run(): if epochs == 0: # score null model - eval_and_log_metrics("train", train_y, np.ones(len(train_y)) * np.mean(train_y)) - eval_and_log_metrics("val", valid_y, np.ones(len(valid_y)) * np.mean(valid_y)) - eval_and_log_metrics("test", test_y, np.ones(len(test_y)) * np.mean(test_y)) + eval_and_log_metrics("train", train_y, np.ones(len(train_y)) * np.mean(train_y), + epoch=-1) + eval_and_log_metrics("val", valid_y, np.ones(len(valid_y)) * np.mean(valid_y), epoch=-1) + eval_and_log_metrics("test", test_y, np.ones(len(test_y)) * np.mean(test_y), epoch=-1) else: with MLflowCheckpoint(test_x, test_y) as mlflow_logger: model = Sequential() diff --git a/examples/multistep_workflow/MLproject b/examples/multistep_workflow/MLproject index 2f9f39ef11aa1..b3190fba5fe33 100644 --- a/examples/multistep_workflow/MLproject +++ b/examples/multistep_workflow/MLproject @@ -9,7 +9,8 @@ entry_points: etl_data: parameters: ratings_csv: path - command: "python etl_data.py --ratings-csv {ratings_csv}" + max_row_limit: {type: int, default: 100000} + command: "python etl_data.py --ratings-csv {ratings_csv} --max-row-limit {max_row_limit}" als: parameters: @@ -30,4 +31,7 @@ entry_points: parameters: als_max_iter: {type: int, default: 10} keras_hidden_units: {type: int, default: 20} - command: "python main.py --als-max-iter {als_max_iter} --keras-hidden-units {keras_hidden_units}" + max_row_limit: {type: int, default: 100000} + command: "python main.py --als-max-iter {als_max_iter} --keras-hidden-units {keras_hidden_units} + --max-row-limit {max_row_limit}" + diff --git a/examples/multistep_workflow/README.rst b/examples/multistep_workflow/README.rst index 8eb22387488ec..e72bd7fe1eaf5 100644 --- a/examples/multistep_workflow/README.rst +++ b/examples/multistep_workflow/README.rst @@ -46,19 +46,19 @@ execute ``mlflow run`` from this directory. So, in order to find out if the Keras model does in fact improve upon the ALS model, you can simply run: -.. code:: +.. code-block:: bash - cd example/multistep + cd examples/multistep_workflow mlflow run . -This will download and transform the MovieLens dataset, train an ALS -model, and then train a Keras model -- you can compare the results by -using ``mlflow ui``! +This downloads and transforms the MovieLens dataset, trains an ALS +model, and then trains a Keras model -- you can compare the results by +using ``mlflow ui``. You can also try changing the number of ALS iterations or Keras hidden units: -.. code:: +.. code-block:: bash mlflow run . -P als_max_iter=20 -P keras_hidden_units=50 diff --git a/examples/multistep_workflow/conda.yaml b/examples/multistep_workflow/conda.yaml index 98e199b16863f..ea0d7ce2be6aa 100644 --- a/examples/multistep_workflow/conda.yaml +++ b/examples/multistep_workflow/conda.yaml @@ -4,8 +4,9 @@ channels: dependencies: - python=3.6 - pyspark - - keras - requests - click - pip: - - mlflow + - tensorflow==1.13.1 + - keras==2.2.4 + - mlflow>=1.0 diff --git a/examples/multistep_workflow/etl_data.py b/examples/multistep_workflow/etl_data.py index caf367b0ee333..3874b64dbb5c7 100644 --- a/examples/multistep_workflow/etl_data.py +++ b/examples/multistep_workflow/etl_data.py @@ -2,13 +2,10 @@ Converts the raw CSV form to a Parquet form with just the columns we want """ - from __future__ import print_function -import requests import tempfile import os -import zipfile import pyspark import mlflow import click @@ -17,11 +14,12 @@ @click.command(help="Given a CSV file (see load_raw_data), transforms it into Parquet " "in an mlflow artifact called 'ratings-parquet-dir'") @click.option("--ratings-csv") -def etl_data(ratings_csv): +@click.option("--max-row-limit", default=10000, + help="Limit the data size to run comfortably on a laptop.") +def etl_data(ratings_csv, max_row_limit): with mlflow.start_run() as mlrun: tmpdir = tempfile.mkdtemp() ratings_parquet_dir = os.path.join(tmpdir, 'ratings-parquet') - spark = pyspark.sql.SparkSession.builder.getOrCreate() print("Converting ratings CSV %s to Parquet %s" % (ratings_csv, ratings_parquet_dir)) ratings_df = spark.read \ @@ -30,8 +28,9 @@ def etl_data(ratings_csv): .csv(ratings_csv) \ .drop("timestamp") # Drop unused column ratings_df.show() + if max_row_limit != -1: + ratings_df = ratings_df.limit(max_row_limit) ratings_df.write.parquet(ratings_parquet_dir) - print("Uploading Parquet ratings: %s" % ratings_parquet_dir) mlflow.log_artifacts(ratings_parquet_dir, "ratings-parquet-dir") diff --git a/examples/multistep_workflow/main.py b/examples/multistep_workflow/main.py index 49c446646b196..7add40190eee1 100644 --- a/examples/multistep_workflow/main.py +++ b/examples/multistep_workflow/main.py @@ -7,22 +7,18 @@ import click import os -import tempfile + import mlflow -from mlflow.entities import RunStatus, Run +from mlflow.utils import mlflow_tags +from mlflow.entities import RunStatus from mlflow.utils.logging_utils import eprint import six from mlflow.tracking.fluent import _get_experiment_id -def _get_params(run): - """Converts [mlflow.entities.Param] to a dictionary of {k: v}.""" - return {param.key: param.value for param in run.data.params} - - -def _already_ran(entry_point_name, parameters, source_version, experiment_id=None): +def _already_ran(entry_point_name, parameters, git_commit, experiment_id=None): """Best-effort detection of if a run with the given entrypoint name, parameters, and experiment id already ran. The run must have completed successfully and have at least the parameters provided. @@ -31,14 +27,13 @@ def _already_ran(entry_point_name, parameters, source_version, experiment_id=Non client = mlflow.tracking.MlflowClient() all_run_infos = reversed(client.list_run_infos(experiment_id)) for run_info in all_run_infos: - if run_info.entry_point_name != entry_point_name: + full_run = client.get_run(run_info.run_id) + tags = full_run.data.tags + if tags.get(mlflow_tags.MLFLOW_PROJECT_ENTRY_POINT, None) != entry_point_name: continue - - full_run = client.get_run(run_info.run_uuid) - run_params = _get_params(full_run) match_failed = False for param_key, param_value in six.iteritems(parameters): - run_value = run_params.get(param_key) + run_value = full_run.data.params.get(param_key) if run_value != param_value: match_failed = True break @@ -47,21 +42,24 @@ def _already_ran(entry_point_name, parameters, source_version, experiment_id=Non if run_info.status != RunStatus.FINISHED: eprint(("Run matched, but is not FINISHED, so skipping " - "(run_id=%s, status=%s)") % (run_info.run_uuid, run_info.status)) + "(run_id=%s, status=%s)") % (run_info.run_id, run_info.status)) continue - if run_info.source_version != source_version: + + previous_version = tags.get(mlflow_tags.MLFLOW_GIT_COMMIT, None) + if git_commit != previous_version: eprint(("Run matched, but has a different source version, so skipping " - "(found=%s, expected=%s)") % (run_info.source_version, source_version)) + "(found=%s, expected=%s)") % previous_version, git_commit) continue - return client.get_run(run_info.run_uuid) + return client.get_run(run_info.run_id) + eprint("No matching run has been found.") return None # TODO(aaron): This is not great because it doesn't account for: # - changes in code # - changes in dependant steps -def _get_or_run(entrypoint, parameters, source_version, use_cache=True): - existing_run = _already_ran(entrypoint, parameters, source_version) +def _get_or_run(entrypoint, parameters, git_commit, use_cache=True): + existing_run = _already_ran(entrypoint, parameters, git_commit) if use_cache and existing_run: print("Found existing run for entrypoint=%s and parameters=%s" % (entrypoint, parameters)) return existing_run @@ -73,22 +71,26 @@ def _get_or_run(entrypoint, parameters, source_version, use_cache=True): @click.command() @click.option("--als-max-iter", default=10, type=int) @click.option("--keras-hidden-units", default=20, type=int) -def workflow(als_max_iter, keras_hidden_units): +@click.option("--max-row-limit", default=100000, type=int) +def workflow(als_max_iter, keras_hidden_units, max_row_limit): # Note: The entrypoint names are defined in MLproject. The artifact directories # are documented by each step's .py file. with mlflow.start_run() as active_run: os.environ['SPARK_CONF_DIR'] = os.path.abspath('.') - source_version = active_run.info.source_version - load_raw_data_run = _get_or_run("load_raw_data", {}, source_version) + git_commit = active_run.data.tags.get(mlflow_tags.MLFLOW_GIT_COMMIT) + load_raw_data_run = _get_or_run("load_raw_data", {}, git_commit) ratings_csv_uri = os.path.join(load_raw_data_run.info.artifact_uri, "ratings-csv-dir") - etl_data_run = _get_or_run("etl_data", {"ratings_csv": ratings_csv_uri}, source_version) + etl_data_run = _get_or_run("etl_data", + {"ratings_csv": ratings_csv_uri, + "max_row_limit": max_row_limit}, + git_commit) ratings_parquet_uri = os.path.join(etl_data_run.info.artifact_uri, "ratings-parquet-dir") # We specify a spark-defaults.conf to override the default driver memory. ALS requires # significant memory. The driver memory property cannot be set by the application itself. als_run = _get_or_run("als", {"ratings_data": ratings_parquet_uri, "max_iter": str(als_max_iter)}, - source_version) + git_commit) als_model_uri = os.path.join(als_run.info.artifact_uri, "als-model") keras_params = { @@ -96,7 +98,7 @@ def workflow(als_max_iter, keras_hidden_units): "als_model_uri": als_model_uri, "hidden_units": keras_hidden_units, } - _get_or_run("train_keras", keras_params, source_version, use_cache=False) + _get_or_run("train_keras", keras_params, git_commit, use_cache=False) if __name__ == '__main__': diff --git a/examples/multistep_workflow/train_keras.py b/examples/multistep_workflow/train_keras.py index 5bbc891eb9617..d584688b4d9a6 100644 --- a/examples/multistep_workflow/train_keras.py +++ b/examples/multistep_workflow/train_keras.py @@ -4,12 +4,10 @@ will use to supplement our input and train using. """ import click -import tempfile import mlflow import mlflow.keras import mlflow.spark -import time from itertools import chain import pyspark @@ -17,11 +15,10 @@ from pyspark.sql.types import * import tensorflow as tf -import keras -from keras.models import Sequential -from keras.layers import Dense -from keras.callbacks import ModelCheckpoint, EarlyStopping -from keras.regularizers import l2 +import tensorflow.keras as keras +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Dense +from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping import numpy as np import pandas as pd @@ -99,7 +96,7 @@ def concat_arrays(*args): mlflow.log_metric("train_mse", train_mse) print('The model had a MSE on the test set of {0}'.format(test_mse)) - mlflow.keras.save_model(model, "keras-model") + mlflow.keras.log_model(model, "keras-model") if __name__ == '__main__': diff --git a/examples/pytorch/MLproject b/examples/pytorch/MLproject new file mode 100644 index 0000000000000..edf796d7523db --- /dev/null +++ b/examples/pytorch/MLproject @@ -0,0 +1,25 @@ +name: pytorch_tutorial + +conda_env: conda.yaml + +entry_points: + main: + parameters: + batch-size: {type: int, default: 64} + test-batch-size: {type: int, default: 1000} + epochs: {type: int, default: 10} + lr: {type: float, default: 0.01} + momentum: {type: float, default: 0.5} + enable-cuda: {type: string, default: 'True'} + seed: {type: int, default: 5} + log-interval: {type: int, default: 100} + command: | + python mnist_tensorboard_artifact.py \ + --batch-size {batch-size} \ + --test-batch-size {test-batch-size} \ + --epochs {epochs} \ + --lr {lr} \ + --momentum {momentum} \ + --enable-cuda {enable-cuda} \ + --seed {seed} \ + --log-interval {log-interval} diff --git a/examples/pytorch/conda.yaml b/examples/pytorch/conda.yaml new file mode 100644 index 0000000000000..f6e3e256e7dc9 --- /dev/null +++ b/examples/pytorch/conda.yaml @@ -0,0 +1,11 @@ +name: pytorch_example +channels: + - defaults + - pytorch +dependencies: + - python=3.6 + - pytorch + - torchvision + - pip: + - mlflow>=1.0 + - tensorboardX diff --git a/examples/pytorch/mnist_tensorboard_artifact.py b/examples/pytorch/mnist_tensorboard_artifact.py index 5f2a854e5ffcd..e36f812009285 100644 --- a/examples/pytorch/mnist_tensorboard_artifact.py +++ b/examples/pytorch/mnist_tensorboard_artifact.py @@ -34,14 +34,17 @@ help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') -parser.add_argument('--no-cuda', action='store_true', default=False, - help='disables CUDA training') +parser.add_argument('--enable-cuda', type=str, choices=['True', 'False'], default='True', + help='enables or disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=100, metavar='N', help='how many batches to wait before logging training status') args = parser.parse_args() -args.cuda = not args.no_cuda and torch.cuda.is_available() + +enable_cuda_flag = True if args.enable_cuda == 'True' else False + +args.cuda = enable_cuda_flag and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: diff --git a/examples/r_wine/r-dependencies.txt b/examples/r_wine/r-dependencies.txt deleted file mode 100644 index a44283fba2b84..0000000000000 --- a/examples/r_wine/r-dependencies.txt +++ /dev/null @@ -1,351 +0,0 @@ -PackratFormat: 1.4 -PackratVersion: 0.4.9.3 -RVersion: 3.5.1 -Repos: CRAN=https://cran.rstudio.com/ - -Package: BH -Source: CRAN -Version: 1.66.0-1 -Hash: 4cc8883584b955ed01f38f68bc03af6d - -Package: Matrix -Source: CRAN -Version: 1.2-14 -Hash: 521aa8772a1941dfdb007bf532d19dde -Requires: lattice - -Package: R6 -Source: CRAN -Version: 2.2.2 -Hash: b2366cd9d2f3851a5704b4e192b985c2 - -Package: Rcpp -Source: CRAN -Version: 0.12.18 -Hash: c80398774358bc105b9f91f7f7519d7a - -Package: assertthat -Source: CRAN -Version: 0.2.0 -Hash: e8805df54c65ac96d50235c44a82615c - -Package: aws.s3 -Source: CRAN -Version: 0.3.12 -Hash: cf537df8c27f1fcf6228dcf504263579 -Requires: aws.signature, base64enc, digest, httr, xml2 - -Package: aws.signature -Source: CRAN -Version: 0.4.4 -Hash: c17d69744d6a4031f9f7abbf1758b54e -Requires: base64enc, digest - -Package: backports -Source: CRAN -Version: 1.1.2 -Hash: 5ae7b3466e529e4400951ca18c137e40 - -Package: base64enc -Source: CRAN -Version: 0.1-3 -Hash: c590d29e555926af053055e23ee79efb - -Package: carrier -Source: github -Version: 0.0.0.9000 -Hash: 734fd13bc5a7782b88e1df658d371171 -Requires: pryr, rlang -GithubRepo: carrier -GithubUsername: r-lib -GithubRef: master -GithubSha1: 2e1bd1410968d87b8d010531420873930b62be65 -RemoteHost: https://api.github.com -RemoteRepo: carrier -RemoteUsername: r-lib -RemoteSha: 2e1bd1410968d87b8d010531420873930b62be65 - -Package: cli -Source: CRAN -Version: 1.0.0 -Hash: f4239f89feb7ddc65821e4514e9734ae -Requires: assertthat, crayon - -Package: codetools -Source: CRAN -Version: 0.2-15 -Hash: a4268e48f71dc70ac2203aced7cdd8ea - -Package: crayon -Source: CRAN -Version: 1.3.4 -Hash: ff2840dd9b0d563fc80377a5a45510cd - -Package: curl -Source: CRAN -Version: 3.2 -Hash: e3318ec2d42d15a38485bab047d114ba - -Package: digest -Source: CRAN -Version: 0.6.15 -Hash: 1f202ebd812ac7192be3739245746a13 - -Package: evaluate -Source: CRAN -Version: 0.11 -Hash: 2d90e734f5c12a3473f4be8ef3c15ebb -Requires: stringr - -Package: fansi -Source: CRAN -Version: 0.3.0 -Hash: 8d0a24d8245abbfa2b030460734495ab - -Package: foreach -Source: CRAN -Version: 1.4.4 -Hash: 4df5ab2c8c35382dacab75d414da6748 -Requires: codetools, iterators - -Package: forge -Source: github -Version: 0.1.0 -Hash: 09806d1bf8fbdc5a726a4efe300bdf47 -Requires: rlang -GithubRepo: forge -GithubUsername: rstudio -GithubRef: master -GithubSha1: f84b834e06dc73bdad348480acc997e92f2540db -RemoteHost: https://api.github.com -RemoteRepo: forge -RemoteUsername: rstudio -RemoteSha: f84b834e06dc73bdad348480acc997e92f2540db - -Package: fs -Source: CRAN -Version: 1.2.6 -Hash: 9f891b01356dc384c299386f87176e7d -Requires: Rcpp - -Package: git2r -Source: CRAN -Version: 0.23.0 -Hash: f7190a7a4b5c5f0983b4b6ee29ed7bc3 - -Package: glmnet -Source: CRAN -Version: 2.0-16 -Hash: 99b8a0322dff077a3eb167b83681ca5a -Requires: Matrix, foreach - -Package: glue -Source: CRAN -Version: 1.3.0 -Hash: 1fbde6dec830370be696eee8ef31c9e4 - -Package: highr -Source: CRAN -Version: 0.7 -Hash: 20757f5c393ed0ecf96c9e8e6d8d514c - -Package: htmltools -Source: CRAN -Version: 0.3.6 -Hash: 87bd72cdfc46f686bbd46b180cb5f0b5 -Requires: Rcpp, digest - -Package: httpuv -Source: CRAN -Version: 1.4.5 -Hash: a21b769562c800e73d417208d7b242ab -Requires: BH, Rcpp, later, promises - -Package: httr -Source: CRAN -Version: 1.3.1 -Hash: 2d32e01e53d532c812052e27a1021441 -Requires: R6, curl, jsonlite, mime, openssl - -Package: iterators -Source: CRAN -Version: 1.0.10 -Hash: 9f8af91e4a3487a282d7dea67513e519 - -Package: jsonlite -Source: CRAN -Version: 1.5 -Hash: 9c51936d8dd00b2f1d4fe9d10499694c - -Package: knitr -Source: CRAN -Version: 1.20 -Hash: 9c6b215d1d02b97586c8232e94533e6a -Requires: evaluate, highr, markdown, stringr, yaml - -Package: later -Source: CRAN -Version: 0.7.3 -Hash: 087b02f5b287d252fc78a4489de3ff3b -Requires: BH, Rcpp, rlang - -Package: lattice -Source: CRAN -Version: 0.20-35 -Hash: 26b9d7f0d0cb4e1d1bbb97f867c82d89 - -Package: magrittr -Source: CRAN -Version: 1.5 -Hash: bdc4d48c3135e8f3b399536ddf160df4 - -Package: markdown -Source: CRAN -Version: 0.8 -Hash: 045d7c594d503b41f1c28946d076c8aa -Requires: mime - -Package: mime -Source: CRAN -Version: 0.5 -Hash: 463550cf44fb6f0a2359368f42eebe62 - -Package: mlflow -Source: github -Version: 0.1.0 -Hash: dd22f58641fca8714810a3a24a9c2b4d -Requires: aws.s3, carrier, forge, fs, git2r, httpuv, httr, jsonlite, - openssl, packrat, processx, purrr, reticulate, rlang, swagger, - withr, xml2, yaml -GithubRepo: mlflow -GithubUsername: rstudio -GithubRef: master -GithubSha1: 6afab6885b922da24ec195416a5c6e79d9ffbe5a -RemoteHost: https://api.github.com -RemoteRepo: mlflow -RemoteUsername: rstudio -RemoteSha: 6afab6885b922da24ec195416a5c6e79d9ffbe5a -GithubSubdir: R/mlflow - -Package: openssl -Source: CRAN -Version: 1.0.2 -Hash: 12a42cbd5aecdb887782247856ccbafd - -Package: packrat -Source: CRAN -Version: 0.4.9-3 -Hash: 03fb817297975f1da0d1b774b47620b3 - -Package: pillar -Source: CRAN -Version: 1.3.0 -Hash: 3e43f774fa6dfba877caca1aebbeaa6a -Requires: cli, crayon, fansi, rlang, utf8 - -Package: processx -Source: CRAN -Version: 3.2.0 -Hash: 906405f0bc681c9438826952417d5d5a -Requires: R6, assertthat, crayon, ps - -Package: promises -Source: CRAN -Version: 1.0.1 -Hash: 1e5d5469b95be6e1b6cd8c35e3bdea60 -Requires: R6, Rcpp, later, magrittr, rlang - -Package: pryr -Source: CRAN -Version: 0.1.4 -Hash: 2ce66f4150df91db2f6fe1d89a2a9f90 -Requires: Rcpp, codetools, stringr - -Package: ps -Source: CRAN -Version: 1.1.0 -Hash: 730119cd27ec2d3270bc572a532e3f81 - -Package: purrr -Source: CRAN -Version: 0.2.5 -Hash: 8b0c16db10c7e20b70cd37779a673a8b -Requires: magrittr, rlang, tibble - -Package: reticulate -Source: CRAN -Version: 1.10 -Hash: e285b294fba6730297085995613d352f -Requires: Matrix, Rcpp, jsonlite - -Package: rlang -Source: CRAN -Version: 0.2.2 -Hash: 49b735b7b1d14f561d21cce12c6703df - -Package: rmarkdown -Source: CRAN -Version: 1.10 -Hash: 02f1aac33000c63986c6b5585e36e7ae -Requires: base64enc, evaluate, htmltools, jsonlite, knitr, mime, - rprojroot, stringr, tinytex, yaml - -Package: rprojroot -Source: CRAN -Version: 1.3-2 -Hash: a25c3f70c166fb3fbabc410eb32b6366 -Requires: backports - -Package: stringi -Source: CRAN -Version: 1.2.4 -Hash: 03ab60ef7fa4627b38ad67c95ce6b04c - -Package: stringr -Source: CRAN -Version: 1.3.1 -Hash: 9f417a1d899ed1f080942ab36998e8b5 -Requires: glue, magrittr, stringi - -Package: swagger -Source: CRAN -Version: 3.9.2 -Hash: e2f255645c2ce1832f72284283368a42 - -Package: tibble -Source: CRAN -Version: 1.4.2 -Hash: 83895360ce4f8d2ce92eee00526b5b0b -Requires: cli, crayon, pillar, rlang - -Package: tinytex -Source: CRAN -Version: 0.7 -Hash: dd8bee60e088d200d7d0f7bde5be4375 -Requires: xfun - -Package: utf8 -Source: CRAN -Version: 1.1.4 -Hash: f3f97ce59092abc8ed3fd098a59e236c - -Package: withr -Source: CRAN -Version: 2.1.2 -Hash: d534108bcd5f34ec73e9eb523751ba20 - -Package: xfun -Source: CRAN -Version: 0.3 -Hash: 3b1cb5fdcfb96c88e3f9ab0239fbd048 - -Package: xml2 -Source: CRAN -Version: 1.2.0 -Hash: 1ee43aa8f57efa79384f290459c138b6 -Requires: Rcpp - -Package: yaml -Source: CRAN -Version: 2.2.0 -Hash: a5ad5616d83d89f8d84cbf3cf4034e13 diff --git a/examples/r_wine/train.R b/examples/r_wine/train.R index 09ccfba4f8e9b..71125465a8061 100644 --- a/examples/r_wine/train.R +++ b/examples/r_wine/train.R @@ -4,6 +4,7 @@ library(mlflow) library(glmnet) +library(carrier) set.seed(40) @@ -26,7 +27,7 @@ lambda <- mlflow_param("lambda", 0.5, "numeric") with(mlflow_start_run(), { model <- glmnet(train_x, train_y, alpha = alpha, lambda = lambda, family= "gaussian", standardize = FALSE) - predictor <- crate(~ glmnet::predict.glmnet(model, as.matrix(.x)), model) + predictor <- crate(~ glmnet::predict.glmnet(!!model, as.matrix(.x)), !!model) predicted <- predictor(test_x) rmse <- sqrt(mean((predicted - test_y) ^ 2)) diff --git a/examples/r_wine/train.Rmd b/examples/r_wine/train.Rmd index e2adc8f326839..4921c938eb2b5 100644 --- a/examples/r_wine/train.Rmd +++ b/examples/r_wine/train.Rmd @@ -48,7 +48,7 @@ with(mlflow_start_run(), { rmse <- sqrt(mean((predicted - test_y) ^ 2)) mae <- mean(abs(predicted - test_y)) - r2 <- cor(predicted, test_y) ^ 2 + r2 <- as.numeric(cor(predicted, test_y) ^ 2) message("Elasticnet model (alpha=", alpha, ", lambda=", lambda, "):") message(" RMSE: ", rmse) diff --git a/examples/remote_store/remote_server.py b/examples/remote_store/remote_server.py index 242a24cdda02f..cd6cb7ff996b8 100644 --- a/examples/remote_store/remote_server.py +++ b/examples/remote_store/remote_server.py @@ -17,14 +17,14 @@ log_metric("foo", 6) log_metric("foo", 7) log_metric("random_int", random.randint(0, 100)) - run_uuid = active_run().info.run_uuid + run_id = active_run().info.run_id # Get run metadata & data from the tracking server service = mlflow.tracking.MlflowClient() - run = service.get_run(run_uuid) - print("Metadata & data for run with UUID %s: %s" % (run_uuid, run)) + run = service.get_run(run_id) + print("Metadata & data for run with UUID %s: %s" % (run_id, run)) local_dir = tempfile.mkdtemp() message = "test artifact written during run %s within artifact URI %s\n" \ - % (active_run().info.run_uuid, get_artifact_uri()) + % (active_run().info.run_id, get_artifact_uri()) try: file_path = os.path.join(local_dir, "some_output_file.txt") with open(file_path, "w") as handle: diff --git a/examples/rest_api/README.rst b/examples/rest_api/README.rst new file mode 100644 index 0000000000000..a73a4426d9cb9 --- /dev/null +++ b/examples/rest_api/README.rst @@ -0,0 +1,32 @@ +mlflow REST API Example +----------------------- +This simple example shows how you could use MLFlow REST API to create new +runs inside an experiment to log parameters/metrics. + +To run this example code do the following: + +Open a terminal and navigate to the ``/tmp`` directory and start the mlflow tracking server:: + + mlflow server + +In another terminal window navigate to the ``mlflow/examples/rest_api`` directory. Run the example code +with this command:: + + python mlflow_tracking_rest_api.py + +Program options:: + + usage: mlflow_tracking_rest_api.py [-h] [--hostname HOSTNAME] [--port PORT] + [--experiment-id EXPERIMENT_ID] + + MLFlow REST API Example + + optional arguments: + -h, --help show this help message and exit + --hostname HOSTNAME MLFlow server hostname/ip (default: localhost) + --port PORT MLFlow server port number (default: 5000) + --experiment-id EXPERIMENT_ID + Experiment ID (default: 0) + + + diff --git a/examples/rest_api/mlflow_tracking_rest_api.py b/examples/rest_api/mlflow_tracking_rest_api.py new file mode 100644 index 0000000000000..b939d9db8ec57 --- /dev/null +++ b/examples/rest_api/mlflow_tracking_rest_api.py @@ -0,0 +1,102 @@ +""" +This simple example shows how you could use MLFlow REST API to create new +runs inside an experiment to log parameters/metrics. Using MLFlow REST API +instead of MLFlow library might be useful to embed in an application where +you don't want to depend on the whole mlflow library, or to make +your own HTTP requests in another programming language (not Python). +For more details on MLFLow REST API endpoints check the following page: + +https://www.mlflow.org/docs/latest/rest-api.html +""" + +import argparse +import os +import time +import requests + +_DEFAULT_USER_ID = "unknown" + + +class MLFlowTrackingRestApi: + def __init__(self, hostname, port, experiment_id): + self.base_url = 'http://' + hostname + ':' + str(port) + '/api/2.0/preview/mlflow' + self.experiment_id = experiment_id + self.run_id = self.create_run() + + def create_run(self): + """Create a new run for tracking.""" + url = self.base_url + '/runs/create' + # user_id is deprecated and will be removed from the API in a future release + payload = {'experiment_id': self.experiment_id, 'start_time': int(time.time() * 1000), 'user_id': _get_user_id()} + r = requests.post(url, json=payload) + run_id = None + if r.status_code == 200: + run_id = r.json()['run']['info']['run_uuid'] + else: + print("Creating run failed!") + return run_id + + def list_experiments(self): + """Get all experiments.""" + url = self.base_url + '/experiments/list' + r = requests.get(url) + experiments = None + if r.status_code == 200: + experiments = r.json()['experiments'] + return experiments + + def log_param(self, param): + """Log a parameter dict for the given run.""" + url = self.base_url + '/runs/log-parameter' + payload = {'run_uuid': self.run_id, 'key': param['key'], 'value': param['value']} + r = requests.post(url, json=payload) + return r.status_code + + def log_metric(self, metric): + """Log a metric dict for the given run.""" + url = self.base_url + '/runs/log-metric' + payload = {'run_uuid': self.run_id, 'key': metric['key'], 'value': metric['value']} + r = requests.post(url, json=payload) + return r.status_code + +def _get_user_id(): + """Get the ID of the user for the current run.""" + try: + import pwd + return pwd.getpwuid(os.getuid())[0] + except ImportError: + return _DEFAULT_USER_ID + + +if __name__ == "__main__": + # Command-line arguments + parser = argparse.ArgumentParser(description='MLFlow REST API Example') + + parser.add_argument('--hostname', type=str, default='localhost', dest='hostname', + help='MLFlow server hostname/ip (default: localhost)') + + parser.add_argument('--port', type=int, default=5000, dest='port', + help='MLFlow server port number (default: 5000)') + + parser.add_argument('--experiment-id', type=int, default=0, dest='experiment_id', + help='Experiment ID (default: 0)') + + print("Running mlflow_tracking_rest_api.py") + + args = parser.parse_args() + + mlflow_rest = MLFlowTrackingRestApi(args.hostname, args.port, args.experiment_id) + # Parameter is a key/val pair (str types) + param = {'key': 'alpha', 'value': '0.1980'} + status_code = mlflow_rest.log_param(param) + if status_code == 200: + print("Successfully logged parameter: {} with value: {}".format(param['key'], param['value'])) + else: + print("Logging parameter failed!") + # Metric is a key/val pair (key/val have str/float types) + metric = {'key': 'precision', 'value': 0.769} + status_code = mlflow_rest.log_metric(metric) + if status_code == 200: + print("Successfully logged parameter: {} with value: {}".format(metric['key'], metric['value'])) + else: + print("Logging metric failed!") diff --git a/examples/sklearn_elasticnet_diabetes/README.md b/examples/sklearn_elasticnet_diabetes/README.md new file mode 100644 index 0000000000000..98c3d5d511805 --- /dev/null +++ b/examples/sklearn_elasticnet_diabetes/README.md @@ -0,0 +1,3 @@ +# Scikit-learn ElasticNet Diabetes Example + +This example trains an ElasticNet regression model for predicting diabetes progression. The example uses [matplotlib](https://matplotlib.org/), which requires different Python dependencies for Linux and OSX. The [linux](linux) and [osx](osx) subdirectories include appropriate MLflow projects for each respective platform. diff --git a/examples/sklearn_elasticnet_diabetes/linux/MLproject b/examples/sklearn_elasticnet_diabetes/linux/MLproject new file mode 100644 index 0000000000000..ebff99a040b01 --- /dev/null +++ b/examples/sklearn_elasticnet_diabetes/linux/MLproject @@ -0,0 +1,10 @@ +name: tutorial + +conda_env: conda.yaml + +entry_points: + main: + parameters: + alpha: {type: float, default: 0.01} + l1_ratio: {type: float, default: 0.1} + command: "python train_diabetes.py {alpha} {l1_ratio}" diff --git a/examples/sklearn_elasticnet_diabetes/linux/conda.yaml b/examples/sklearn_elasticnet_diabetes/linux/conda.yaml new file mode 100644 index 0000000000000..a7fed147981c3 --- /dev/null +++ b/examples/sklearn_elasticnet_diabetes/linux/conda.yaml @@ -0,0 +1,12 @@ +name: tutorial +channels: + - defaults +dependencies: + - cloudpickle=0.6.1 + - python=3.6 + - numpy=1.14.3 + - matplotlib=3.0.2 + - pandas=0.22.0 + - scikit-learn=0.19.1 + - pip: + - mlflow diff --git a/examples/sklearn_elasticnet_diabetes/train_diabetes.py b/examples/sklearn_elasticnet_diabetes/linux/train_diabetes.py similarity index 98% rename from examples/sklearn_elasticnet_diabetes/train_diabetes.py rename to examples/sklearn_elasticnet_diabetes/linux/train_diabetes.py index 420ac923ec535..13136859d08d5 100644 --- a/examples/sklearn_elasticnet_diabetes/train_diabetes.py +++ b/examples/sklearn_elasticnet_diabetes/linux/train_diabetes.py @@ -122,4 +122,4 @@ def eval_metrics(actual, pred): plt.close(fig) # Log artifacts (output files) - mlflow.log_artifact("ElasticNet-paths.png") \ No newline at end of file + mlflow.log_artifact("ElasticNet-paths.png") diff --git a/examples/sklearn_elasticnet_diabetes/osx/MLproject b/examples/sklearn_elasticnet_diabetes/osx/MLproject new file mode 100644 index 0000000000000..9a022b973eccb --- /dev/null +++ b/examples/sklearn_elasticnet_diabetes/osx/MLproject @@ -0,0 +1,10 @@ +name: tutorial + +conda_env: conda.yaml + +entry_points: + main: + parameters: + alpha: {type: float, default: 0.01} + l1_ratio: {type: float, default: 0.1} + command: "pythonw train_diabetes.py {alpha} {l1_ratio}" diff --git a/examples/sklearn_elasticnet_diabetes/osx/conda.yaml b/examples/sklearn_elasticnet_diabetes/osx/conda.yaml new file mode 100644 index 0000000000000..bf903239c0264 --- /dev/null +++ b/examples/sklearn_elasticnet_diabetes/osx/conda.yaml @@ -0,0 +1,13 @@ +name: tutorial +channels: + - defaults +dependencies: + - cloudpickle=0.6.1 + - python=3.6 + - numpy=1.14.3 + - matplotlib=3.0.2 + - pandas=0.22.0 + - scikit-learn=0.19.1 + - python.app + - pip: + - mlflow>=1.0 diff --git a/examples/sklearn_elasticnet_diabetes/osx/train_diabetes.py b/examples/sklearn_elasticnet_diabetes/osx/train_diabetes.py new file mode 100644 index 0000000000000..13136859d08d5 --- /dev/null +++ b/examples/sklearn_elasticnet_diabetes/osx/train_diabetes.py @@ -0,0 +1,125 @@ +# +# train_diabetes.py +# +# MLflow model using ElasticNet (sklearn) and Plots ElasticNet Descent Paths +# +# Uses the sklearn Diabetes dataset to predict diabetes progression using ElasticNet +# The predicted "progression" column is a quantitative measure of disease progression one year after baseline +# http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html +# Combines the above with the Lasso Coordinate Descent Path Plot +# http://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_coordinate_descent_path.html +# Original author: Alexandre Gramfort ; License: BSD 3 clause +# +# Usage: +# python train_diabetes.py 0.01 0.01 +# python train_diabetes.py 0.01 0.75 +# python train_diabetes.py 0.01 1.0 +# + +import os +import warnings +import sys + +import pandas as pd +import numpy as np +from itertools import cycle +import matplotlib.pyplot as plt +from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score +from sklearn.model_selection import train_test_split +from sklearn.linear_model import ElasticNet +from sklearn.linear_model import lasso_path, enet_path +from sklearn import datasets + +# Load Diabetes datasets +diabetes = datasets.load_diabetes() +X = diabetes.data +y = diabetes.target + +# Create pandas DataFrame for sklearn ElasticNet linear_model +Y = np.array([y]).transpose() +d = np.concatenate((X, Y), axis=1) +cols = diabetes.feature_names + ['progression'] +data = pd.DataFrame(d, columns=cols) + + +# Import mlflow +import mlflow +import mlflow.sklearn + + +# Evaluate metrics +def eval_metrics(actual, pred): + rmse = np.sqrt(mean_squared_error(actual, pred)) + mae = mean_absolute_error(actual, pred) + r2 = r2_score(actual, pred) + return rmse, mae, r2 + + + +if __name__ == "__main__": + warnings.filterwarnings("ignore") + np.random.seed(40) + + # Split the data into training and test sets. (0.75, 0.25) split. + train, test = train_test_split(data) + + # The predicted column is "progression" which is a quantitative measure of disease progression one year after baseline + train_x = train.drop(["progression"], axis=1) + test_x = test.drop(["progression"], axis=1) + train_y = train[["progression"]] + test_y = test[["progression"]] + + alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.05 + l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.05 + + # Run ElasticNet + lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42) + lr.fit(train_x, train_y) + predicted_qualities = lr.predict(test_x) + (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities) + + # Print out ElasticNet model metrics + print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio)) + print(" RMSE: %s" % rmse) + print(" MAE: %s" % mae) + print(" R2: %s" % r2) + + # Log mlflow attributes for mlflow UI + mlflow.log_param("alpha", alpha) + mlflow.log_param("l1_ratio", l1_ratio) + mlflow.log_metric("rmse", rmse) + mlflow.log_metric("r2", r2) + mlflow.log_metric("mae", mae) + mlflow.sklearn.log_model(lr, "model") + + + # Compute paths + eps = 5e-3 # the smaller it is the longer is the path + + print("Computing regularization path using the elastic net.") + alphas_enet, coefs_enet, _ = enet_path(X, y, eps=eps, l1_ratio=l1_ratio, fit_intercept=False) + + # Display results + fig = plt.figure(1) + ax = plt.gca() + + colors = cycle(['b', 'r', 'g', 'c', 'k']) + neg_log_alphas_enet = -np.log10(alphas_enet) + for coef_e, c in zip(coefs_enet, colors): + l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c) + + plt.xlabel('-Log(alpha)') + plt.ylabel('coefficients') + title = 'ElasticNet Path by alpha for l1_ratio = ' + str(l1_ratio) + plt.title(title) + plt.axis('tight') + + + # Save figures + fig.savefig("ElasticNet-paths.png") + + # Close plot + plt.close(fig) + + # Log artifacts (output files) + mlflow.log_artifact("ElasticNet-paths.png") diff --git a/examples/sklearn_elasticnet_wine/conda.yaml b/examples/sklearn_elasticnet_wine/conda.yaml index b74a9b9308ca4..9f4fa3c161195 100644 --- a/examples/sklearn_elasticnet_wine/conda.yaml +++ b/examples/sklearn_elasticnet_wine/conda.yaml @@ -3,8 +3,6 @@ channels: - defaults dependencies: - python=3.6 - - numpy=1.14.3 - - pandas=0.22.0 - scikit-learn=0.19.1 - pip: - - mlflow + - mlflow>=1.0 diff --git a/examples/sklearn_logistic_regression/MLproject b/examples/sklearn_logistic_regression/MLproject new file mode 100644 index 0000000000000..f7ebb4ea32688 --- /dev/null +++ b/examples/sklearn_logistic_regression/MLproject @@ -0,0 +1,7 @@ +name: sklearn_logistic_example + +conda_env: conda.yaml + +entry_points: + main: + command: "python train.py" diff --git a/examples/sklearn_logistic_regression/conda.yaml b/examples/sklearn_logistic_regression/conda.yaml new file mode 100644 index 0000000000000..33e52d0daed3c --- /dev/null +++ b/examples/sklearn_logistic_regression/conda.yaml @@ -0,0 +1,10 @@ +name: sklearn-example +channels: + - defaults + - anaconda +dependencies: + - python==3.6 + - scikit-learn=0.19.1 + - pip: + - mlflow>=1.0 + diff --git a/examples/sklearn_logisitic_regression/train.py b/examples/sklearn_logistic_regression/train.py similarity index 100% rename from examples/sklearn_logisitic_regression/train.py rename to examples/sklearn_logistic_regression/train.py diff --git a/examples/tensorflow/MLproject b/examples/tensorflow/MLproject new file mode 100644 index 0000000000000..3ed1c44d4391c --- /dev/null +++ b/examples/tensorflow/MLproject @@ -0,0 +1,7 @@ +name: tensorflow-example +conda_env: conda.yaml + +entry_points: + main: + command: "python train_predict.py" + diff --git a/examples/tensorflow/conda.yaml b/examples/tensorflow/conda.yaml new file mode 100644 index 0000000000000..518626611af62 --- /dev/null +++ b/examples/tensorflow/conda.yaml @@ -0,0 +1,10 @@ +name: tensorflow-example +channels: + - defaults + - anaconda +dependencies: + - python==3.6 + - tensorflow==1.13.1 + - pip: + - mlflow + diff --git a/examples/tensorflow/train_predict.py b/examples/tensorflow/train_predict.py index c21c2c51eaec9..3bb9374991f65 100644 --- a/examples/tensorflow/train_predict.py +++ b/examples/tensorflow/train_predict.py @@ -1,14 +1,16 @@ +# in case this is run outside of conda environment with python2 from __future__ import absolute_import from __future__ import division from __future__ import print_function import mlflow -from mlflow import tensorflow, tracking, pyfunc -import numpy as np +from mlflow import pyfunc import pandas as pd import shutil import tempfile import tensorflow as tf +from tensorflow.python.saved_model import tag_constants +import mlflow.tensorflow def main(argv): @@ -17,16 +19,19 @@ def main(argv): (x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data() # There are 13 features we are using for inference. feat_cols = [tf.feature_column.numeric_column(key="features", shape=(x_train.shape[1],))] - feat_spec = {"features": tf.placeholder("float", name="features", shape=[None, x_train.shape[1]])} + feat_spec = { + "features": tf.placeholder("float", name="features", shape=[None, x_train.shape[1]])} hidden_units = [50, 20] steps = 1000 regressor = tf.estimator.DNNRegressor(hidden_units=hidden_units, feature_columns=feat_cols) - train_input_fn = tf.estimator.inputs.numpy_input_fn({"features": x_train}, y_train, num_epochs=None, shuffle=True) - with tracking.start_run() as tracked_run: + train_input_fn = tf.estimator.inputs.numpy_input_fn({"features": x_train}, y_train, + num_epochs=None, shuffle=True) + with mlflow.start_run(): mlflow.log_param("Hidden Units", hidden_units) mlflow.log_param("Steps", steps) regressor.train(train_input_fn, steps=steps) - test_input_fn = tf.estimator.inputs.numpy_input_fn({"features": x_test}, y_test, num_epochs=None, shuffle=True) + test_input_fn = tf.estimator.inputs.numpy_input_fn({"features": x_test}, y_test, + num_epochs=None, shuffle=True) # Compute mean squared error mse = regressor.evaluate(test_input_fn, steps=steps) mlflow.log_metric("Mean Square Error", mse['average_loss']) @@ -36,9 +41,12 @@ def main(argv): try: saved_estimator_path = regressor.export_savedmodel(temp, receiver_fn).decode("utf-8") # Logging the saved model - tensorflow.log_saved_model(saved_model_dir=saved_estimator_path, signature_def_key="predict", artifact_path="model") + mlflow.tensorflow.log_model(tf_saved_model_dir=saved_estimator_path, + tf_meta_graph_tags=[tag_constants.SERVING], + tf_signature_def_key="predict", + artifact_path="model") # Reloading the model - pyfunc_model = pyfunc.load_pyfunc(saved_estimator_path) + pyfunc_model = pyfunc.load_pyfunc(mlflow.get_artifact_uri('model')) df = pd.DataFrame(data=x_test, columns=["features"] * x_train.shape[1]) # Predicting on the loaded Python Function predict_df = pyfunc_model.predict(df) diff --git a/lint.sh b/lint.sh index 61df4407459cf..9b7f5c59880d9 100755 --- a/lint.sh +++ b/lint.sh @@ -5,7 +5,7 @@ set -e FWDIR="$(cd "`dirname $0`"; pwd)" cd "$FWDIR" -pycodestyle --max-line-length=100 --exclude mlflow/protos,mlflow/server/js -- mlflow tests +pycodestyle --max-line-length=100 --exclude mlflow/protos,mlflow/server/js,mlflow/store/db_migrations,mlflow/temporary_db_migrations_for_pre_1_users -- mlflow tests pylint --msg-template="{path} ({line},{column}): [{msg_id} {symbol}] {msg}" --rcfile="$FWDIR/pylintrc" -- mlflow tests rstcheck README.rst diff --git a/mlflow/R/mlflow/.Rbuildignore b/mlflow/R/mlflow/.Rbuildignore index bf1b8346f00a9..3129046c4c4dd 100644 --- a/mlflow/R/mlflow/.Rbuildignore +++ b/mlflow/R/mlflow/.Rbuildignore @@ -13,3 +13,4 @@ Reference_Manual_mlflow.md .lintr ^man-roxygen/ ^internal/ +^logs/ diff --git a/mlflow/R/mlflow/.lintr b/mlflow/R/mlflow/.lintr index 7ab8a4a1dd06d..7e5242114f158 100644 --- a/mlflow/R/mlflow/.lintr +++ b/mlflow/R/mlflow/.lintr @@ -1 +1 @@ -linters: with_defaults(line_length_linter(120), closed_curly_linter = NULL, open_curly_linter = NULL, absolute_paths_linter = NULL, object_usage_linter = NULL, object_length_linter = NULL) +linters: with_defaults(line_length_linter(120), closed_curly_linter = NULL, open_curly_linter = NULL, absolute_paths_linter = NULL, object_usage_linter = NULL, object_length_linter = NULL, commented_code_linter = NULL) diff --git a/mlflow/R/mlflow/.travis.R b/mlflow/R/mlflow/.travis.R index 369bbcdfb5428..8a5f8a92eb5dd 100644 --- a/mlflow/R/mlflow/.travis.R +++ b/mlflow/R/mlflow/.travis.R @@ -2,6 +2,8 @@ parent_dir <- dir("../", full.names = TRUE) package <- parent_dir[grepl("mlflow_", parent_dir)] install.packages(package) install.packages("keras", repos='http://cran.rstudio.com/') +install.packages("roxygen2") library(keras) install_keras() +devtools::check(error_on = "warning", args = "--no-tests") source("testthat.R") diff --git a/mlflow/R/mlflow/DESCRIPTION b/mlflow/R/mlflow/DESCRIPTION index 10aaedbda4c23..a1c75eaee443c 100644 --- a/mlflow/R/mlflow/DESCRIPTION +++ b/mlflow/R/mlflow/DESCRIPTION @@ -1,7 +1,7 @@ Package: mlflow Type: Package Title: Interface to 'MLflow' -Version: 0.7.1 +Version: 1.0.0 Authors@R: c( person("Matei", "Zaharia", email = "matei@databricks.com", role = c("aut", "cre")), person("Javier", "Luraschi", email = "javier@rstudio.com", role = c("aut")), @@ -21,30 +21,32 @@ SystemRequirements: MLflow (https://www.mlflow.org/) Encoding: UTF-8 LazyData: true Depends: - R (>= 3.1.2) + R (>= 3.3.0) Imports: - aws.s3, + base64enc, forge, fs, git2r, httpuv, httr, + ini, jsonlite, openssl, processx, reticulate, - packrat, purrr, swagger, withr, xml2, yaml, - pryr, - rlang (>= 0.2.0) -RoxygenNote: 6.1.0 + rlang (>= 0.2.0), + zeallot, + tibble, + glue +RoxygenNote: 6.1.1 Suggests: covr, + carrier, keras, lintr, testthat - diff --git a/mlflow/R/mlflow/NAMESPACE b/mlflow/R/mlflow/NAMESPACE index 1aad29dd0e8ec..767da82f27e74 100644 --- a/mlflow/R/mlflow/NAMESPACE +++ b/mlflow/R/mlflow/NAMESPACE @@ -1,58 +1,70 @@ # Generated by roxygen2: do not edit by hand -S3method(mlflow_load_flavor,crate) -S3method(mlflow_load_flavor,keras) -S3method(mlflow_predict_flavor,crate) -S3method(mlflow_predict_flavor,keras.engine.training.Model) -S3method(mlflow_save_flavor,crate) -S3method(mlflow_save_flavor,keras.engine.training.Model) +S3method(mlflow_id,mlflow_experiment) +S3method(mlflow_id,mlflow_run) +S3method(mlflow_load_flavor,mlflow_flavor_crate) +S3method(mlflow_load_flavor,mlflow_flavor_keras) +S3method(mlflow_predict,crate) +S3method(mlflow_predict,keras.engine.training.Model) +S3method(mlflow_save_model,crate) +S3method(mlflow_save_model,keras.engine.training.Model) S3method(mlflow_ui,"NULL") S3method(mlflow_ui,mlflow_client) -S3method(print,crate) +S3method(print,mlflow_host_creds) S3method(with,mlflow_run) -export(crate) -export(is_crate) -export(mlflow_active_run) -export(mlflow_cli) +export(install_mlflow) +export(mlflow_client) export(mlflow_create_experiment) +export(mlflow_delete_experiment) +export(mlflow_delete_run) +export(mlflow_delete_tag) +export(mlflow_download_artifacts) export(mlflow_end_run) +export(mlflow_get_experiment) +export(mlflow_get_metric_history) +export(mlflow_get_run) export(mlflow_get_tracking_uri) -export(mlflow_install) +export(mlflow_id) +export(mlflow_list_artifacts) +export(mlflow_list_experiments) +export(mlflow_list_run_infos) export(mlflow_load_flavor) export(mlflow_load_model) export(mlflow_log_artifact) +export(mlflow_log_batch) export(mlflow_log_metric) export(mlflow_log_model) export(mlflow_log_param) export(mlflow_param) -export(mlflow_predict_flavor) -export(mlflow_predict_model) -export(mlflow_restore_snapshot) -export(mlflow_rfunc_predict) +export(mlflow_predict) +export(mlflow_rename_experiment) +export(mlflow_restore_experiment) +export(mlflow_restore_run) export(mlflow_rfunc_serve) export(mlflow_run) -export(mlflow_save_flavor) export(mlflow_save_model) +export(mlflow_search_runs) export(mlflow_server) export(mlflow_set_experiment) export(mlflow_set_tag) export(mlflow_set_tracking_uri) -export(mlflow_snapshot) export(mlflow_source) export(mlflow_start_run) export(mlflow_ui) -export(mlflow_uninstall) +export(uninstall_mlflow) import(forge) -import(rlang) import(swagger) +importFrom(base64enc,base64encode) importFrom(httpuv,runServer) importFrom(httpuv,startDaemonizedServer) importFrom(httpuv,stopServer) importFrom(httr,GET) importFrom(httr,POST) importFrom(httr,add_headers) +importFrom(httr,config) importFrom(httr,content) importFrom(httr,timeout) +importFrom(ini,read.ini) importFrom(jsonlite,fromJSON) importFrom(openssl,rand_num) importFrom(processx,process) @@ -64,7 +76,7 @@ importFrom(reticulate,conda_install) importFrom(reticulate,conda_list) importFrom(rlang,"%||%") importFrom(utils,browseURL) -importFrom(utils,read.csv) -importFrom(utils,write.csv) +importFrom(utils,modifyList) importFrom(withr,with_envvar) importFrom(yaml,write_yaml) +importFrom(zeallot,"%<-%") diff --git a/mlflow/R/mlflow/R/cli.R b/mlflow/R/mlflow/R/cli.R index e85ff557ccb81..e30ff7071318a 100644 --- a/mlflow/R/mlflow/R/cli.R +++ b/mlflow/R/mlflow/R/cli.R @@ -1,42 +1,39 @@ -#' MLflow Command -#' -#' Executes a generic MLflow command through the commmand line interface. -#' -#' @param ... The parameters to pass to the command line. -#' @param background Should this command be triggered as a background task? -#' Defaults to \code{FALSE}. -#' @param echo Print the standard output and error to the screen? Defaults to -#' \code{TRUE}, does not apply to background tasks. -#' @param stderr_callback NULL, or a function to call for every chunk of the standard error. -#' -#' @return A \code{processx} task. -#' -#' @examples -#' \dontrun{ -#' library(mlflow) -#' mlflow_install() -#' -#' mlflow_cli("server", "--help") -#' } -#' +# Runs a generic MLflow command through the command-line interface. +# +# @param ... The parameters to pass to the command line. +# @param background Should this command be triggered as a background task? +# Defaults to \code{FALSE}. +# @param echo Print the standard output and error to the screen? Defaults to +# \code{TRUE}, does not apply to background tasks. +# @param stderr_callback NULL, or a function to call for every chunk of the standard error. +# Defaults to a function that prints chunks to standard error. +# @param client Mlflow client to provide environment for the cli process. +# +# @return A \code{processx} task. #' @importFrom processx run #' @importFrom processx process #' @importFrom withr with_envvar -#' @export -mlflow_cli <- function(..., background = FALSE, echo = TRUE, stderr_callback = NULL) { +mlflow_cli <- function(..., + background = FALSE, + echo = TRUE, + stderr_callback = NULL, + client = mlflow_client()) { + env <- if (is.null(client)) list() else client$get_cli_env() args <- list(...) - verbose <- mlflow_is_verbose() python <- dirname(python_bin()) - mlflow_bin <- file.path(python, "mlflow") - - env <- list( - PATH = paste(Sys.getenv("PATH"), python, sep = ":"), - MLFLOW_CONDA_HOME = python_conda_home(), # devel version - MLFLOW_MLFLOW_CONDA = file.path(python_conda_bin(), "conda"), # pip version (deprecated) + mlflow_bin <- python_mlflow_bin() + env <- modifyList(list( + PATH = paste(python, Sys.getenv("PATH"), sep = ":"), + MLFLOW_CONDA_HOME = python_conda_home(), MLFLOW_TRACKING_URI = mlflow_get_tracking_uri() - ) + ), env) + if (is.null(stderr_callback)) { + stderr_callback <- function(x, p) { + cat(x, file = stderr()) + } + } with_envvar(env, { if (background) { @@ -45,7 +42,6 @@ mlflow_cli <- function(..., background = FALSE, echo = TRUE, stderr_callback = N result <- run(mlflow_bin, args = unlist(args), echo = echo, echo_cmd = verbose, stderr_callback = stderr_callback) } }) - invisible(result) } diff --git a/mlflow/R/mlflow/R/crate.R b/mlflow/R/mlflow/R/crate.R deleted file mode 100644 index 21fe9e90cd276..0000000000000 --- a/mlflow/R/mlflow/R/crate.R +++ /dev/null @@ -1,214 +0,0 @@ -# TODO: Use crate from CRAN! - -# Remove after rlang 0.3.0 is released -locally <- function(..., .env = env(caller_env())) { - dots <- exprs(...) - nms <- names(dots) - out <- NULL - - for (i in seq_along(dots)) { - out <- eval_bare(dots[[i]], .env) - - nm <- nms[[i]] - if (nm != "") { - .env[[nm]] <- out - } - } - - out -} - -#' @import rlang -NULL - -#' Crate a function to share with another process -#' -#' @description -#' -#' `crate()` creates functions in a self-contained environment -#' (technically, a child of the base environment). This has two -#' advantages: -#' -#' * They can easily be executed in another process. -#' -#' * Their effects are reproducible. You can run them locally with the -#' same results as on a different process. -#' -#' Creating self-contained functions requires some care, see section -#' below. -#' -#' -#' @section Creating self-contained functions: -#' -#' * They should call package functions with an explicit `::` -#' namespace. This includes packages in the default search path with -#' the exception of the base package. For instance `var()` from the -#' stats package must be called with its namespace prefix: -#' `stats::var(x)`. -#' -#' * They should declare any data they depend on. You can declare data -#' by supplying additional arguments or by unquoting objects with `!!`. -#' -#' @param .fn A fresh formula or function. "Fresh" here means that -#' they should be declared in the call to `crate()`. See examples if -#' you need to crate a function that is already defined. Formulas -#' are converted to purrr-like lambda functions using -#' [rlang::as_function()]. -#' @param ... Arguments to declare in the environment of `.fn`. If a -#' name is supplied, the object is assigned to that name. Otherwise -#' the argument is automatically named after itself. -#' -#' @export -#' @examples -#' # You can create functions using the ordinary notation: -#' crate(function(x) stats::var(x)) -#' -#' # Or the formula notation: -#' crate(~stats::var(.x)) -#' -#' # Declare data by supplying named arguments. You can test you have -#' # declared all necessary data by calling your crated function: -#' na_rm <- TRUE -#' fn <- crate(~stats::var(.x, na.rm = na_rm)) -#' try(fn(1:10)) -#' -#' # Arguments are automatically named after themselves so that the -#' # following are equivalent: -#' crate(~stats::var(.x, na.rm = na_rm), na_rm = na_rm) -#' crate(~stats::var(.x, na.rm = na_rm), na_rm) -#' -#' # However if you supply a complex expression, do supply a name! -#' crate(~stats::var(.x, na.rm = na_rm), !na_rm) -#' crate(~stats::var(.x, na.rm = na_rm), na_rm = na_rm) -#' -#' # For small data it is handy to unquote instead. Unquoting inlines -#' # objects inside the function. This is less verbose if your -#' # function depends on many small objects: -#' fn <- crate(~stats::var(.x, na.rm = !!na_rm)) -#' fn(1:10) -#' -#' # One downside is that the individual sizes of unquoted objects -#' # won't be shown in the crate printout: -#' fn -#' -#' -#' # The function or formula you pass to crate() should defined inside -#' # the crate() call, i.e. you can't pass an already defined -#' # function: -#' fn <- function(x) toupper(x) -#' try(crate(fn)) -#' -#' # If you really need to crate an existing function, you can -#' # explicitly set its environment to the crate environment with the -#' # set_env() function from rlang: -#' crate(rlang::set_env(fn)) -crate <- function(.fn, ...) { - # Evaluate arguments in a child of the caller so the caller context - # is in scope and new data is created in a separate child - env <- child_env(caller_env()) - dots <- exprs(..., .named = TRUE) - locally(!!!dots, .env = env) - - # Quote and evaluate in the local env to avoid capturing execution - # envs when user passed an unevaluated function or formula - fn <- eval_bare(enexpr(.fn), env) - - # Isolate the evaluation environment from the search path - env_poke_parent(env, base_env()) - - if (is_formula(fn)) { - fn <- as_function(fn) - } else if (!is_function(fn)) { - abort("`.fn` must evaluate to a function") - } - - if (!is_reference(get_env(fn), env)) { - abort("The function must be defined inside the `crate()` call") - } - - new_crate(fn) -} - - -new_crate <- function(crate) { - if (!is_function(crate)) { - abort("`crate` must be a function") - } - - structure(crate, class = "crate") -} - -#' Is an object a crate? -#' -#' @param x An object to test. -#' @export -is_crate <- function(x) { - inherits(x, "crate") -} - -# Unexported until the `bytes` class is moved to lobstr (and probably -# becomes `lobstr_bytes`) -crate_sizes <- function(crate) { - bare_fn <- unclass(crate) - environment(bare_fn) <- global_env() - - bare_size <- pryr::object_size(bare_fn) - - env <- fn_env(crate) - nms <- ls(env) - - n <- length(nms) + 1 - out <- new_list(n, c("function", nms)) - out[[1]] <- bare_size - - index <- seq2(2, n) - get_size <- function(nm) pryr::object_size(env[[nm]]) - out[index] <- lapply(nms, get_size) - - # Sort data by decreasing size but keep function first - order <- order(as.numeric(out[-1]), decreasing = TRUE) - out <- out[c(1, order + 1)] - - out -} - - -#' @export -print.crate <- function(x, ...) { - sizes <- crate_sizes(x) - - total_size <- format(pryr::object_size(x), ...) - cat(sprintf(" %s\n", total_size)) - - fn_size <- format(sizes[[1]], ...) - cat(sprintf("* function: %s\n", fn_size)) - - nms <- names(sizes) - for (i in seq2_along(2, sizes)) { - nm <- nms[[i]] - size <- format(sizes[[i]], ...) - cat(sprintf("* `%s`: %s\n", nm, size)) - } - - # Print function without the environment tag - bare_fn <- unclass(x) - environment(bare_fn) <- global_env() - print(bare_fn, ...) - - invisible(x) -} - -# From pryr -format.bytes <- function(x, digits = 3, ...) { - power <- min(floor(log(abs(x), 1000)), 4) - if (power < 1) { - unit <- "B" - } else { - unit <- c("kB", "MB", "GB", "TB")[[power]] - x <- x / (1000 ^ power) - } - - x <- signif(x, digits = digits) - fmt <- format(unclass(x), big.mark = ",", scientific = FALSE) - paste(fmt, unit) -} diff --git a/mlflow/R/mlflow/R/databricks-utils.R b/mlflow/R/mlflow/R/databricks-utils.R new file mode 100644 index 0000000000000..4b366d7977063 --- /dev/null +++ b/mlflow/R/mlflow/R/databricks-utils.R @@ -0,0 +1,134 @@ +# Utils for databricks authentication + +new_mlflow_client.mlflow_databricks <- function(tracking_uri) { + profile <- tracking_uri$path + # make sure we can read the config + new_mlflow_client_impl( + get_host_creds = function() { + get_databricks_config(profile) + }, + get_cli_env = function() { + databricks_config_as_env(get_databricks_config(profile)) + }, + class = "mlflow_databricks_client" + ) +} + +DATABRICKS_CONFIG_FILE <- "DATABRICKS_CONFIG_FILE" +# map expected config variables to environment variables +config_variable_map <- list( + host = "DATABRICKS_HOST", + username = "DATABRICKS_USERNAME", + password = "DATABRICKS_PASSWORD", + token = "DATABRICKS_TOKEN", + insecure = "DATABRICKS_INSECURE" +) + +databricks_config_as_env <- function(config) { + if (config$config_source != "cfgfile") { # pass the auth info via environment vars + res <- config[!is.na(config)] + res$config_source <- NULL + if (!as.logical(res$insecure)) { + res$insecure <- NULL + } + names(res) <- lapply(names(res), function (x) config_variable_map[[x]]) + res + } else if (!is.na(Sys.getenv(DATABRICKS_CONFIG_FILE, NA))) { + list(DATABRICKS_CONFIG_FILE = Sys.getenv(DATABRICKS_CONFIG_FILE)) + } else { + # We do not need to do anything if the config comes from a file visible to both processes + list() + } +} + +databricks_config_is_valid <- function(config) { + !is.na(config$host) && + (!is.na(config$token) || (!is.na(config$username) && !is.na(config$password))) +} + +#' @importFrom ini read.ini +get_databricks_config_for_profile <- function(profile) { + config_path <- Sys.getenv("DATABRICKS_CONFIG_FILE", NA) + config_path <- if (is.na(config_path)) path.expand("~/.databrickscfg") else config_path + if (!file.exists(config_path)){ + stop(paste("Databricks configuration file is missing. Expected config file ", config_path)) + } + config <- read.ini(config_path) + if (!(profile %in% names(config))) { + stop(paste("Missing profile '", profile, "'.", sep = "")) + } + new_databricks_config(config_source = "cfgfile", config[[profile]]) +} + +#' @importFrom utils modifyList +new_databricks_config <- function(config_source, + config_vars) { + res <- do.call(new_mlflow_host_creds, config_vars) + res$config_source <- config_source + res +} + +get_databricks_config_from_env <- function() { + config_vars <- lapply(config_variable_map, function(x) Sys.getenv(x, NA)) + names(config_vars) <- names(config_variable_map) + new_databricks_config("env", config_vars) +} + +get_databricks_config <- function(profile) { + config <- if (!is.na(profile)) { + get_databricks_config_for_profile(profile) + } else if (exists("spark.databricks.token") && exists("spark.databricks.api.url")) { + config_vars <- list( + host = get("spark.databricks.api.url", envir = .GlobalEnv), + token = get("spark.databricks.token", envir = .GlobalEnv), + insecure = Sys.getenv(config_variable_map$insecure, "False") + ) + new_databricks_config(config_source = "db_dynamic", config_vars = config_vars) + } else { + config <- get_databricks_config_from_env() + if (databricks_config_is_valid(config)) { + config + } else { + get_databricks_config_for_profile("DEFAULT") + } + } + if (!databricks_config_is_valid(config)) { + stop("Could not find valid Databricks configuration.") + } + config +} + +mlflow_get_run_context.mlflow_databricks_client <- function(client, experiment_id, ...) { + if (exists(".databricks_internals")) { + notebook_info <- do.call(".get_notebook_info", list(), envir = get(".databricks_internals", + envir = .GlobalEnv)) + if (!is.na(notebook_info$id) && !is.na(notebook_info$path)) { + tags <- list() + tags[[MLFLOW_DATABRICKS_TAGS$MLFLOW_DATABRICKS_NOTEBOOK_ID]] <- notebook_info$id + tags[[MLFLOW_DATABRICKS_TAGS$MLFLOW_DATABRICKS_NOTEBOOK_PATH]] <- notebook_info$path + tags[[MLFLOW_DATABRICKS_TAGS$MLFLOW_DATABRICKS_WEBAPP_URL]] <- notebook_info$webapp_url + tags[[MLFLOW_TAGS$MLFLOW_SOURCE_NAME]] <- notebook_info$path + tags[[MLFLOW_TAGS$MLFLOW_SOURCE_VERSION]] <- get_source_version() + tags[[MLFLOW_TAGS$MLFLOW_SOURCE_TYPE]] <- MLFLOW_SOURCE_TYPE$NOTEBOOK + list( + client = client, + tags = tags, + experiment_id = experiment_id %||% notebook_info$id, + ... + ) + } else { + NextMethod() + } + } else { + NextMethod() + } +} + +MLFLOW_DATABRICKS_TAGS <- list( + MLFLOW_DATABRICKS_NOTEBOOK_ID = "mlflow.databricks.notebookID", + MLFLOW_DATABRICKS_NOTEBOOK_PATH = "mlflow.databricks.notebookPath", + MLFLOW_DATABRICKS_WEBAPP_URL = "mlflow.databricks.webappURL", + MLFLOW_DATABRICKS_RUN_URL = "mlflow.databricks.runURL", + MLFLOW_DATABRICKS_SHELL_JOB_ID = "mlflow.databricks.shellJobID", + MLFLOW_DATABRICKS_SHELL_JOB_RUN_ID = "mlflow.databricks.shellJobRunID" +) diff --git a/mlflow/R/mlflow/R/entities.R b/mlflow/R/mlflow/R/entities.R deleted file mode 100644 index b9cfc7e973fda..0000000000000 --- a/mlflow/R/mlflow/R/entities.R +++ /dev/null @@ -1,12 +0,0 @@ -new_mlflow_entities_run <- function(l) { - run <- purrr::compact(l$run) - run <- purrr::map_at(run, "info", tidy_run_info) - - structure( - list( - info = run$info, - data = run$data - ), - class = "mlflow_run" - ) -} diff --git a/mlflow/R/mlflow/R/imports.R b/mlflow/R/mlflow/R/imports.R index 5f18c5bf7407a..4d24a289f7385 100644 --- a/mlflow/R/mlflow/R/imports.R +++ b/mlflow/R/mlflow/R/imports.R @@ -3,3 +3,6 @@ rlang::`%||%` #' @importFrom purrr %>% purrr::`%>%` + +#' @importFrom zeallot %<-% +zeallot::`%<-%` diff --git a/mlflow/R/mlflow/R/install.R b/mlflow/R/mlflow/R/install.R index dea2b8f1ee0f6..21f328f90e23e 100644 --- a/mlflow/R/mlflow/R/install.R +++ b/mlflow/R/mlflow/R/install.R @@ -1,43 +1,67 @@ +# Returns the current MLflow R package version +mlflow_version <- function() { + utils::packageVersion("mlflow") +} + +# Returns the name of a conda environment in which to install the Python MLflow package +mlflow_conda_env_name <- function() { + paste("r-mlflow", mlflow_version(), sep = "-") +} + +# Create conda env used by MLflow if it doesn't already exist +#' @importFrom reticulate conda_install conda_create conda_list +mlflow_maybe_create_conda_env <- function() { + conda <- mlflow_conda_bin() + conda_env_name <- mlflow_conda_env_name() + if (!conda_env_name %in% conda_list(conda = conda)$name) { + conda_create(conda_env_name, conda = conda) + } +} + #' Install MLflow #' -#' Installs MLflow for individual use. +#' Installs auxiliary dependencies of MLflow (e.g. the MLflow CLI). As a one-time setup step, you +#' must run install_mlflow() to install these dependencies before calling other MLflow APIs. #' -#' Notice that MLflow requires Python and Conda to be installed, -#' see \url{https://www.python.org/getit/} and \url{https://conda.io/docs/installation.html}. +#' install_mlflow() requires Python and Conda to be installed. +#' See \url{https://www.python.org/getit/} and \url{https://docs.conda.io/projects/conda/en/latest/user-guide/install/}. #' #' @examples #' \dontrun{ #' library(mlflow) -#' mlflow_install() +#' install_mlflow() #' } #' #' @importFrom reticulate conda_install conda_create conda_list #' @export -mlflow_install <- function() { - packages <- c( - "pandas", - "mlflow" - ) - - if (!"r-mlflow" %in% conda_list()$name) { - conda_create("r-mlflow") - conda_install(packages, envname = "r-mlflow", pip = TRUE) - } +install_mlflow <- function() { + mlflow_maybe_create_conda_env() + # Install the Python MLflow package with version == the current R package version + packages <- c(paste("mlflow", "==", mlflow_version(), sep = "")) + conda <- mlflow_conda_bin() + conda_install(packages, envname = mlflow_conda_env_name(), pip = TRUE, conda = conda) } -#' Uninstalls MLflow. +#' Uninstall MLflow #' #' Uninstalls MLflow by removing the Conda environment. #' #' @examples #' \dontrun{ #' library(mlflow) -#' mlflow_install() -#' mlflow_uninstall() +#' install_mlflow() +#' uninstall_mlflow() #' } #' #' @importFrom reticulate conda_install conda_create conda_list #' @export -mlflow_uninstall <- function() { - reticulate::conda_remove(envname = "r-mlflow") +uninstall_mlflow <- function() { + reticulate::conda_remove(envname = mlflow_conda_env_name(), conda = mlflow_conda_bin()) +} + + +mlflow_conda_bin <- function() { + conda_home <- Sys.getenv("MLFLOW_CONDA_HOME", NA) + conda <- if (!is.na(conda_home)) paste(conda_home, "bin", "conda", sep = "/") else "auto" + conda_binary(conda = conda) } diff --git a/mlflow/R/mlflow/R/model-crate.R b/mlflow/R/mlflow/R/model-crate.R index 9c98db29be090..81be5d2d9c098 100644 --- a/mlflow/R/mlflow/R/model-crate.R +++ b/mlflow/R/mlflow/R/model-crate.R @@ -1,6 +1,10 @@ +#' @rdname mlflow_save_model #' @export -mlflow_save_flavor.crate <- function(x, path = "model", r_dependencies=NULL, conda_env=NULL) { - serialized <- serialize(x, NULL) +mlflow_save_model.crate <- function(model, path, ...) { + if (dir.exists(path)) unlink(path, recursive = TRUE) + dir.create(path) + + serialized <- serialize(model, NULL) saveRDS( serialized, @@ -13,34 +17,16 @@ mlflow_save_flavor.crate <- function(x, path = "model", r_dependencies=NULL, con model = "crate.bin" ) ) - if (!is.null(r_dependencies)) { - dep_file <- basename(r_dependencies) - if (dep_file != "r-dependencies.txt") { - stop("Dependency", dep_file, - "is unsupported by cran flavor. R-dependencies must be named 'r-dependencies.txt'") - } - dst <- file.path(path, basename(conda_env)) - if (r_dependencies != dst) { - file.copy(from = r_dependencies, to = dst) - res$crate$r_dependencies <- basename(r_dependencies) - } - } - if (!is.null(conda_env)){ - dst <- file.path(path, basename(conda_env)) - if (conda_env != dst) { - file.copy(from = conda_env, to = dst) - res$crate$conda_env <- basename(conda_env) - } - } - res + + mlflow_write_model_spec(path, list(flavors = res)) } #' @export -mlflow_load_flavor.crate <- function(model_path) { +mlflow_load_flavor.mlflow_flavor_crate <- function(flavor, model_path) { unserialize(readRDS(file.path(model_path, "crate.bin"))) } #' @export -mlflow_predict_flavor.crate <- function(model, data) { - model(data) +mlflow_predict.crate <- function(model, data, ...) { + do.call(model, list(data, ...)) } diff --git a/mlflow/R/mlflow/R/model-flavor.R b/mlflow/R/mlflow/R/model-flavor.R deleted file mode 100644 index 1f43764442596..0000000000000 --- a/mlflow/R/mlflow/R/model-flavor.R +++ /dev/null @@ -1,47 +0,0 @@ -#' Save MLflow Model Flavor -#' -#' Saves model in MLflow's flavor, to be used by package authors -#' to extend the supported MLflow models. -#' -#' @param x The serving function or model that will perform a prediction. -#' @param path Destination path where this MLflow compatible model -#' will be saved. -#' @param r_dependencies Optional vector of paths to dependency files -#' to include in the model, as in \code{r-dependencies.txt} -#' or \code{conda.yaml}. -#' @param conda_env Path to Conda dependencies file. -#' -#' @return This funciton must return a list of flavors that conform to -#' the MLmodel specification. -#' -#' @export -mlflow_save_flavor <- function(x, path = "model", r_dependencies=NULL, conda_env=NULL) { - UseMethod("mlflow_save_flavor") -} - -#' Load MLflow Model Flavor -#' -#' Loads an MLflow model flavor, to be used by package authors -#' to extend the supported MLflow models. -#' -#' @param model_path The path to the MLflow model wrapped in the correct -#' class. -#' -#' @export -mlflow_load_flavor <- function(model_path) { - UseMethod("mlflow_load_flavor") -} - -#' Predict over MLflow Model Flavor -#' -#' Performs prediction over a model loaded using -#' \code{mlflow_load_model()}, to be used by package authors -#' to extend the supported MLflow models. -#' -#' @param model The loaded MLflow model flavor. -#' @param data A data frame to perform scoring. -#' -#' @export -mlflow_predict_flavor <- function(model, data) { - UseMethod("mlflow_predict_flavor") -} diff --git a/mlflow/R/mlflow/R/model-keras.R b/mlflow/R/mlflow/R/model-keras.R index 7dc53b8d62b6e..206440d76864b 100644 --- a/mlflow/R/mlflow/R/model-keras.R +++ b/mlflow/R/mlflow/R/model-keras.R @@ -1,28 +1,18 @@ -#' Save MLflow Keras Model Flavor -#' -#' Saves model in MLflow's Keras flavor. -#' -#' @param x The serving function or model that will perform a prediction. -#' @param path Destination path where this MLflow compatible model -#' will be saved. -#' @param r_dependencies Optional vector of paths to dependency files -#' to include in the model, as in \code{r-dependencies.txt} -#' or \code{conda.yaml}. +#' @rdname mlflow_save_model #' @param conda_env Path to Conda dependencies file. -#' -#' @return This funciton must return a list of flavors that conform to -#' the MLmodel specification. -#' #' @export -mlflow_save_flavor.keras.engine.training.Model <- function(x, - path = "model", - r_dependencies=NULL, - conda_env=NULL) { +mlflow_save_model.keras.engine.training.Model <- function(model, + path, + conda_env = NULL, + ...) { + if (dir.exists(path)) unlink(path, recursive = TRUE) + dir.create(path) + if (!requireNamespace("keras", quietly = TRUE)) { stop("The 'keras' package must be installed.") } - keras::save_model_hdf5(x, filepath = file.path(path, "model.h5"), include_optimizer = TRUE) + keras::save_model_hdf5(model, filepath = file.path(path, "model.h5"), include_optimizer = TRUE) version <- as.character(utils::packageVersion("keras")) conda_env <- if (!is.null(conda_env)) { dst <- file.path(path, basename(conda_env)) @@ -50,11 +40,13 @@ mlflow_save_flavor.keras.engine.training.Model <- function(x, data = "model.h5", env = conda_env) - append(keras_conf, pyfunc_conf) + mlflow_write_model_spec(path, list( + flavors = append(keras_conf, pyfunc_conf) + )) } #' @export -mlflow_load_flavor.keras <- function(model_path) { +mlflow_load_flavor.mlflow_flavor_keras <- function(flavor, model_path) { if (!requireNamespace("keras", quietly = TRUE)) { stop("The 'keras' package must be installed.") } @@ -63,10 +55,10 @@ mlflow_load_flavor.keras <- function(model_path) { } #' @export -mlflow_predict_flavor.keras.engine.training.Model <- function(model, data) { +mlflow_predict.keras.engine.training.Model <- function(model, data, ...) { if (!requireNamespace("keras", quietly = TRUE)) { stop("The 'keras' package must be installed.") } - stats::predict(model, as.matrix(data)) + stats::predict(model, as.matrix(data), ...) } diff --git a/mlflow/R/mlflow/R/model-serve.R b/mlflow/R/mlflow/R/model-serve.R index 3054c5640b82a..8618b96ebd305 100644 --- a/mlflow/R/mlflow/R/model-serve.R +++ b/mlflow/R/mlflow/R/model-serve.R @@ -2,17 +2,16 @@ #' Serve an RFunc MLflow Model #' -#' Serve an RFunc MLflow Model as a local web api. +#' Serves an RFunc MLflow model as a local web API. #' -#' @param model_path The path to the MLflow model, as a string. -#' @param run_uuid ID of run to grab the model from. +#' @template roxlate-model-uri #' @param host Address to use to serve model, as a string. #' @param port Port to use to serve model, as numeric. -#' @param daemonized Makes 'httpuv' server daemonized so R interactive sessions +#' @param daemonized Makes `httpuv` server daemonized so R interactive sessions #' are not blocked to handle requests. To terminate a daemonized server, call -#' 'httpuv::stopDaemonizedServer()' with the handle returned from this call. +#' `httpuv::stopDaemonizedServer()` with the handle returned from this call. +#' @param ... Optional arguments passed to `mlflow_predict()`. #' @param browse Launch browser with serving landing page? -#' @param restore Should \code{mlflow_restore_snapshot()} be called before serving? #' #' @examples #' \dontrun{ @@ -32,32 +31,26 @@ #' @importFrom jsonlite fromJSON #' @import swagger #' @export -mlflow_rfunc_serve <- function( - model_path, - run_uuid = NULL, - host = "127.0.0.1", - port = 8090, - daemonized = FALSE, - browse = !daemonized, - restore = FALSE -) { - mlflow_restore_or_warning(restore) - - model_path <- resolve_model_path(model_path, run_uuid) - +mlflow_rfunc_serve <- function(model_uri, + host = "127.0.0.1", + port = 8090, + daemonized = FALSE, + browse = !daemonized, + ...) { + model_path <- mlflow_download_artifacts_from_uri(model_uri) httpuv_start <- if (daemonized) httpuv::startDaemonizedServer else httpuv::runServer - serve_run(model_path, host, port, httpuv_start, browse && interactive()) + serve_run(model_path, host, port, httpuv_start, browse && interactive(), ...) } serve_content_type <- function(file_path) { file_split <- strsplit(file_path, split = "\\.")[[1]] switch(file_split[[length(file_split)]], - "css" = "text/css", - "html" = "text/html", - "js" = "application/javascript", - "json" = "application/json", - "map" = "text/plain", - "png" = "image/png" + "css" = "text/css", + "html" = "text/html", + "js" = "application/javascript", + "json" = "application/json", + "map" = "text/plain", + "png" = "image/png" ) } @@ -99,7 +92,7 @@ serve_invalid_request <- function(message = NULL) { ) } -serve_prediction <- function(json_raw, model) { +serve_prediction <- function(json_raw, model, ...) { mlflow_verbose_message("Serving prediction: ", json_raw) df <- data.frame() @@ -113,7 +106,7 @@ serve_prediction <- function(json_raw, model) { df <- as.data.frame(df) - mlflow_predict_flavor(model, df) + mlflow_predict(model, df, ...) } serve_empty_page <- function(req, sess, model) { @@ -126,7 +119,7 @@ serve_empty_page <- function(req, sess, model) { ) } -serve_handlers <- function(host, port) { +serve_handlers <- function(host, port, ...) { handlers <- list( "^/swagger.json" = function(req, model) { list( @@ -149,14 +142,10 @@ serve_handlers <- function(host, port) { ) ) }, - "^/[^/]*$" = function(req, model) { - serve_static_file_response("swagger", file.path("dist", req$PATH_INFO)) - }, "^/predict" = function(req, model) { - json_raw <- req$rook.input$read() - results <- serve_prediction(json_raw, model) + results <- serve_prediction(json_raw, model, ...) list( status = 200L, @@ -164,10 +153,13 @@ serve_handlers <- function(host, port) { "Content-Type" = paste0(serve_content_type("json"), "; charset=UTF-8") ), body = charToRaw(enc2utf8( - jsonlite::toJSON(list(predictions = results), auto_unbox = TRUE) + jsonlite::toJSON(results, auto_unbox = TRUE, digits = NA) )) ) }, + "^/[^/]*$" = function(req, model) { + serve_static_file_response("swagger", file.path("dist", req$PATH_INFO)) + }, ".*" = function(req, sess, model) { stop("Invalid path.") } @@ -189,14 +181,14 @@ message_serve_start <- function(host, port, model) { } #' @importFrom utils browseURL -serve_run <- function(model_path, host, port, start, browse) { +serve_run <- function(model_path, host, port, start, browse, ...) { model <- mlflow_load_model(model_path) message_serve_start(host, port, model) if (browse) browseURL(paste0("http://", host, ":", port)) - handlers <- serve_handlers(host, port) + handlers <- serve_handlers(host, port, ...) start(host, port, list( onHeaders = function(req) { diff --git a/mlflow/R/mlflow/R/model.R b/mlflow/R/mlflow/R/model.R index 47d2a13a8c9e0..673d12fe11315 100644 --- a/mlflow/R/mlflow/R/model.R +++ b/mlflow/R/mlflow/R/model.R @@ -1,42 +1,34 @@ #' Save Model for MLflow #' -#' Saves model in MLflow's format that can later be used -#' for prediction and serving. +#' Saves model in MLflow format that can later be used for prediction and serving. This method is +#' generic to allow package authors to save custom model types. #' -#' @param x The serving function or model that will perform a prediction. +#' @param model The model that will perform a prediction. #' @param path Destination path where this MLflow compatible model #' will be saved. -#' @param r_dependencies Optional vector of paths to dependency files -#' to include in the model, as in \code{r-dependencies.txt} -#' or \code{conda.yaml}. -#' @param conda_env Path to Conda dependencies file. -#' +#' @param ... Optional additional arguments. #' @importFrom yaml write_yaml #' @export -mlflow_save_model <- function(x, path = "model", r_dependencies=NULL, conda_env=NULL) { - - if (dir.exists(path)) unlink(path, recursive = TRUE) - dir.create(path) - - flavor_spec <- list ( - flavors = mlflow_save_flavor(x, path, r_dependencies, conda_env) - ) - mlflow_write_model_spec(path, flavor_spec) +mlflow_save_model <- function(model, path, ...) { + UseMethod("mlflow_save_model") } #' Log Model #' -#' Logs a model in the given run. Similar to `mlflow_save_model()` +#' Logs a model for this run. Similar to `mlflow_save_model()` #' but stores model as an artifact within the active run. #' -#' @param fn The serving function that will perform a prediction. +#' @param model The model that will perform a prediction. #' @param artifact_path Destination path where this MLflow compatible model #' will be saved. +#' @param ... Optional additional arguments passed to `mlflow_save_model()` when persisting the +#' model. For example, `conda_env = /path/to/conda.yaml` may be passed to specify a conda +#' dependencies file for flavors (e.g. keras) that support conda environments. #' #' @export -mlflow_log_model <- function(fn, artifact_path) { +mlflow_log_model <- function(model, artifact_path, ...) { temp_path <- fs::path_temp(artifact_path) - mlflow_save_model(fn, path = temp_path) + mlflow_save_model(model, path = temp_path, ...) mlflow_log_artifact(path = temp_path, artifact_path = artifact_path) } @@ -51,8 +43,8 @@ mlflow_timestamp <- function() { } mlflow_write_model_spec <- function(path, content) { - content$time_created <- mlflow_timestamp() - content$run_id <- active_run_id() + content$utc_time_created <- mlflow_timestamp() + content$run_id <- mlflow_get_active_run_id() write_yaml( purrr::compact(content), @@ -60,29 +52,18 @@ mlflow_write_model_spec <- function(path, content) { ) } -#' Generate prediction with MLflow model. +#' Load MLflow Model #' -#' @param model MLflow model. -#' @param data Dataframe to be scored. -#' @export -mlflow_predict_model <- function(model, data) { - model %>% mlflow_predict_flavor(data) -} - -#' Load MLflow Model. +#' Loads an MLflow model. MLflow models can have multiple model flavors. Not all flavors / models +#' can be loaded in R. This method by default searches for a flavor supported by R/MLflow. #' -#' MLflow models can have multiple model flavors. Not all flavors / models can be loaded in R. This -#' method will by default search for a flavor supported by R/mlflow. -#' -#' @param model_path "Path to the MLflow model. The path is relative to the run with the given -#' run-id or local filesystem path without run-id. -#' @param run_id Optional MLflow run-id. If supplied model will be fetched from MLflow tracking -#' server. -#' @param flavor Optional flavor specification. Can be used to load a particular flavor in case -#' there are multiple flavors available. +#' @template roxlate-model-uri +#' @template roxlate-client +#' @param flavor Optional flavor specification (string). Can be used to load a particular flavor in +#' case there are multiple flavors available. #' @export -mlflow_load_model <- function(model_path, flavor = NULL, run_id = NULL) { - model_path <- resolve_model_path(model_path, run_id) +mlflow_load_model <- function(model_uri, flavor = NULL, client = mlflow_client()) { + model_path <- mlflow_download_artifacts_from_uri(model_uri, client = client) supported_flavors <- supported_model_flavors() spec <- yaml::read_yaml(fs::path(model_path, "MLmodel")) available_flavors <- intersect(names(spec$flavors), supported_flavors) @@ -112,97 +93,110 @@ mlflow_load_model <- function(model_path, flavor = NULL, run_id = NULL) { warning(paste("Multiple model flavors available (", paste(available_flavors, collapse = ", "), " ). loading flavor '", available_flavors[[1]], "'", "")) } - flavor <- available_flavors[[1]] } - flavor_path <- model_path - class(flavor_path) <- c(flavor, class(flavor_path)) - mlflow_load_flavor(flavor_path) + flavor <- mlflow_flavor(flavor) + mlflow_load_flavor(flavor, model_path) } -#' Predict using RFunc MLflow Model -#' -#' Predict using an RFunc MLflow Model from a file or data frame. +new_mlflow_flavor <- function(flavor, class = character(0)) { + structure(character(0), class = c(class, "mlflow_flavor")) +} + +# Create an MLflow Flavor Object +# +# This function creates an `mlflow_flavor` object that can be used to dispatch +# the `mlflow_load_flavor()` method. +# +# @param flavor The name of the flavor. +# @keywords internal +mlflow_flavor <- function(flavor) { + new_mlflow_flavor(flavor, paste0("mlflow_flavor_", flavor)) +} + +#' Load MLflow Model Flavor #' -#' @param model_path The path to the MLflow model, as a string. -#' @param run_uuid Run ID of run to grab the model from. -#' @param input_path Path to 'JSON' or 'CSV' file to be used for prediction. -#' @param output_path 'JSON' or 'CSV' file where the prediction will be written to. -#' @param data Data frame to be scored. This can be utilized for testing purposes and can only -#' be specified when `input_path` is not specified. -#' @param restore Should \code{mlflow_restore_snapshot()} be called before serving? +#' Loads an MLflow model using a specific flavor. This method is called internally by +#' \link[mlflow]{mlflow_load_model}, but is exposed for package authors to extend the supported +#' MLflow models. See https://mlflow.org/docs/latest/models.html#storage-format for more +#' info on MLflow model flavors. #' -#' @examples -#' \dontrun{ -#' library(mlflow) +#' @param flavor An MLflow flavor object loaded by \link[mlflow]{mlflow_load_model}, with class +#' loaded from the flavor field in an MLmodel file. +#' @param model_path The path to the MLflow model wrapped in the correct +#' class. #' -#' # save simple model which roundtrips data as prediction -#' mlflow_save_model(function(df) df, "mlflow_roundtrip") +#' @export +mlflow_load_flavor <- function(flavor, model_path) { + UseMethod("mlflow_load_flavor") +} + +#' Generate Prediction with MLflow Model #' -#' # save data as json -#' jsonlite::write_json(iris, "iris.json") +#' Performs prediction over a model loaded using +#' \code{mlflow_load_model()}, to be used by package authors +#' to extend the supported MLflow models. #' -#' # predict existing model from json data -#' mlflow_rfunc_predict("mlflow_roundtrip", "iris.json") -#' } +#' @param model The loaded MLflow model flavor. +#' @param data A data frame to perform scoring. +#' @param ... Optional additional arguments passed to underlying predict +#' methods. #' -#' @importFrom utils read.csv -#' @importFrom utils write.csv #' @export -mlflow_rfunc_predict <- function( - model_path, - run_uuid = NULL, - input_path = NULL, - output_path = NULL, - data = NULL, - restore = FALSE -) { - mlflow_restore_or_warning(restore) - - model_path <- resolve_model_path(model_path, run_uuid) - - if (!xor(is.null(input_path), is.null(data))) - stop("One and only one of `input_path` or `data` must be specified.") - - data <- if (!is.null(input_path)) { - switch( - fs::path_ext(input_path), - json = jsonlite::read_json(input_path), - csv = read.csv(input_path) - ) - } else { - data - } - - model <- mlflow_load_model(model_path) +mlflow_predict <- function(model, data, ...) { + UseMethod("mlflow_predict") +} - prediction <- mlflow_predict_flavor(model, data) - if (is.null(output_path)) { - if (!interactive()) message(prediction) +# Generate predictions using a saved R MLflow model. +# Input and output are read from and written to a specified input / output file or stdin / stdout. +# +# @param input_path Path to 'JSON' or 'CSV' file to be used for prediction. If not specified data is +# read from the stdin. +# @param output_path 'JSON' file where the prediction will be written to. If not specified, +# data is written out to stdout. - prediction - } else { - switch( - fs::path_ext(output_path), - json = jsonlite::write_json(prediction, output_path), - csv = write.csv(prediction, output_path, row.names = FALSE), - stop("Unsupported output file format.") - ) - } +mlflow_rfunc_predict <- function(model_path, input_path = NULL, output_path = NULL, + content_type = NULL, json_format = NULL) { + model <- mlflow_load_model(model_path) + input_path <- input_path %||% "stdin" + output_path <- output_path %||% stdout() + + data <- switch( + content_type %||% "json", + json = parse_json(input_path, json_format %||% "split"), + csv = utils::read.csv(input_path), + stop("Unsupported input file format.") + ) + model <- mlflow_load_model(model_path) + prediction <- mlflow_predict(model, data) + jsonlite::write_json(prediction, output_path, digits = NA) + invisible(NULL) } -resolve_model_path <- function(model_path, run_uuid) { - if (!is.null(run_uuid)) { - result <- mlflow_cli("artifacts", "download", "--run-id", run_uuid, "-a", model_path, - echo = FALSE) - gsub("\n", "", result$stdout) - } else { - model_path - } +supported_model_flavors <- function() { + purrr::map(utils::methods(generic.function = mlflow_load_flavor), + ~ gsub("mlflow_load_flavor\\.mlflow_flavor_", "", .x)) } -supported_model_flavors <- function() { - purrr::map(utils::methods(generic.function = mlflow_load_flavor), ~ substring(.x, 20)) +# Helper function to parse data frame from json based on given the json_fomat. +# The default behavior is to parse the data in the Pandas "split" orient. +parse_json <- function(input_path, json_format="split") { + switch(json_format, + split = { + json <- jsonlite::read_json(input_path, simplifyVector = TRUE) + elms <- names(json) + if (length(setdiff(elms, c("columns", "index", "data"))) != 0 + || length(setdiff(c("columns", "data"), elms) != 0)) { + stop(paste("Invalid input. Make sure the input json data is in 'split' format.", elms)) + } + df <- data.frame(json$data, row.names = json$index) + names(df) <- json$columns + df + }, + records = jsonlite::read_json(input_path, simplifyVector = TRUE), + stop(paste("Unsupported JSON format", json_format, + ". Supported formats are 'split' or 'records'")) + ) } diff --git a/mlflow/R/mlflow/R/project-param.R b/mlflow/R/mlflow/R/project-param.R index ffa2026f3d798..01371b7285229 100644 --- a/mlflow/R/mlflow/R/project-param.R +++ b/mlflow/R/mlflow/R/project-param.R @@ -1,12 +1,33 @@ -#' Read Command Line Parameter +#' Read Command-Line Parameter #' -#' Reads a command line parameter. +#' Reads a command-line parameter passed to an MLflow project +#' MLflow allows you to define named, typed input parameters to your R scripts via the mlflow_param +#' API. This is useful for experimentation, e.g. tracking multiple invocations of the same script +#' with different parameters. #' -#' @param name The name for this parameter. -#' @param default The default value for this parameter. +#' @examples +#' \dontrun{ +#' # This parametrized script trains a GBM model on the Iris dataset and can be run as an MLflow +#' # project. You can run this script (assuming it's saved at /some/directory/params_example.R) +#' # with custom parameters via: +#' # mlflow_run(entry_point = "params_example.R", uri = "/some/directory", +#' # parameters = list(num_trees = 200, learning_rate = 0.1)) +#' install.packages("gbm") +#' library(mlflow) +#' library(gbm) +#' # define and read input parameters +#' num_trees <- mlflow_param(name = "num_trees", default = 200, type = "integer") +#' lr <- mlflow_param(name = "learning_rate", default = 0.1, type = "numeric") +#' # use params to fit a model +#' ir.adaboost <- gbm(Species ~., data=iris, n.trees=num_trees, shrinkage=lr) +#' } +#' + +#' @param name The name of the parameter. +#' @param default The default value of the parameter. #' @param type Type of this parameter. Required if `default` is not set. If specified, #' must be one of "numeric", "integer", or "string". -#' @param description Optional description for this parameter. +#' @param description Optional description for the parameter. #' #' @import forge #' @export diff --git a/mlflow/R/mlflow/R/project-run.R b/mlflow/R/mlflow/R/project-run.R index 73096cdbb2b3a..cb0ced154912b 100644 --- a/mlflow/R/mlflow/R/project-run.R +++ b/mlflow/R/mlflow/R/project-run.R @@ -1,33 +1,58 @@ -#' Run in MLflow +#' Run an MLflow Project #' -#' Wrapper for `mlflow run`. +#' Wrapper for the `mlflow run` CLI command. See https://www.mlflow.org/docs/latest/cli.html#run +#' for more info. +#' +#' @examples +#' \dontrun{ +#' # This parametrized script trains a GBM model on the Iris dataset and can be run as an MLflow +#' # project. You can run this script (assuming it's saved at /some/directory/params_example.R) +#' # with custom parameters via: +#' # mlflow_run(entry_point = "params_example.R", uri = "/some/directory", +#' # parameters = list(num_trees = 200, learning_rate = 0.1)) +#' install.packages("gbm") +#' library(mlflow) +#' library(gbm) +#' # define and read input parameters +#' num_trees <- mlflow_param(name = "num_trees", default = 200, type = "integer") +#' lr <- mlflow_param(name = "learning_rate", default = 0.1, type = "numeric") +#' # use params to fit a model +#' ir.adaboost <- gbm(Species ~., data=iris, n.trees=num_trees, shrinkage=lr) +#' } +#' + #' #' @param entry_point Entry point within project, defaults to `main` if not specified. #' @param uri A directory containing modeling scripts, defaults to the current directory. #' @param version Version of the project to run, as a Git commit reference for Git projects. -#' @param param_list A list of parameters. +#' @param parameters A list of parameters. #' @param experiment_id ID of the experiment under which to launch the run. -#' @param mode Execution mode to use for run. -#' @param cluster_spec Path to JSON file describing the cluster to use when launching a run on Databricks. -#' @param git_username Username for HTTP(S) Git authentication. -#' @param git_password Password for HTTP(S) Git authentication. +#' @param experiment_name Name of the experiment under which to launch the run. +#' @param backend Execution backend to use for run. +#' @param backend_config Path to JSON file which will be passed to the backend. For the Databricks backend, +#' it should describe the cluster to use when launching a run on Databricks. #' @param no_conda If specified, assume that MLflow is running within a Conda environment with the necessary -#' dependencies for the current project instead of attempting to create a new conda environment. Only +#' dependencies for the current project instead of attempting to create a new Conda environment. Only #' valid if running locally. -#' @param storage_dir Only valid when `mode` is local. MLflow downloads artifacts from distributed URIs passed to -#' parameters of type 'path' to subdirectories of storage_dir. +#' @param storage_dir Valid only when `backend` is local. MLflow downloads artifacts from distributed URIs passed to +#' parameters of type `path` to subdirectories of `storage_dir`. #' #' @return The run associated with this run. #' #' @export -mlflow_run <- function(entry_point = NULL, uri = ".", version = NULL, param_list = NULL, - experiment_id = NULL, mode = NULL, cluster_spec = NULL, - git_username = NULL, git_password = NULL, no_conda = FALSE, - storage_dir = NULL) { +mlflow_run <- function(uri = ".", entry_point = NULL, version = NULL, parameters = NULL, + experiment_id = NULL, experiment_name = NULL, backend = NULL, backend_config = NULL, + no_conda = FALSE, storage_dir = NULL) { + if (!is.null(experiment_name) && !is.null(experiment_id)) { + stop("Specify only one of `experiment_name` or `experiment_id`.") + } + if (is.null(experiment_name)) { + experiment_id <- mlflow_infer_experiment_id(experiment_id) + } if (file.exists(uri)) uri <- fs::path_expand(uri) - param_list <- if (!is.null(param_list)) param_list %>% + param_list <- if (!is.null(parameters)) parameters %>% purrr::imap_chr(~ paste0(.y, "=", .x)) %>% purrr::reduce(~ mlflow_cli_param(.x, "--param-list", .y), .init = list()) @@ -35,19 +60,15 @@ mlflow_run <- function(entry_point = NULL, uri = ".", version = NULL, param_list mlflow_cli_param("--entry-point", entry_point) %>% mlflow_cli_param("--version", version) %>% mlflow_cli_param("--experiment-id", experiment_id) %>% - mlflow_cli_param("--mode", mode) %>% - mlflow_cli_param("--cluster_spec", cluster_spec) %>% - mlflow_cli_param("--git-username", git_username) %>% - mlflow_cli_param("--git-password", git_password) %>% + mlflow_cli_param("--experiment-name", experiment_name) %>% + mlflow_cli_param("--backend", backend) %>% + mlflow_cli_param("--backend-config", backend_config) %>% mlflow_cli_param("--storage-dir", storage_dir) %>% c(param_list) args <- if (!no_conda) args else c(args, "--no-conda") - result <- do.call(mlflow_cli, c("run", args)) - matches <- regexec(".*Run \\(ID \\'([^\\']+).*", result$stderr) - run_uuid <- regmatches(result$stderr, matches)[[1]][[2]] - - invisible(run_uuid) + run_id <- regmatches(result$stderr, matches)[[1]][[2]] + invisible(run_id) } diff --git a/mlflow/R/mlflow/R/project-snapshot.R b/mlflow/R/mlflow/R/project-snapshot.R deleted file mode 100644 index 1d72c47a52309..0000000000000 --- a/mlflow/R/mlflow/R/project-snapshot.R +++ /dev/null @@ -1,77 +0,0 @@ -#' Dependencies Snapshot -#' -#' Creates a snapshot of all dependencies required to run the files in the -#' current directory. -#' -#' @export -mlflow_snapshot <- function() { - packrat::.snapshotImpl( - project = getwd(), - ignore.stale = getOption("mlflow.snapshot.stale", FALSE), - prompt = getOption("mlflow.snapshot.prompt", FALSE), - snapshot.sources = getOption("mlflow.snapshot.sources", FALSE), - verbose = mlflow_is_verbose(), - fallback.ok = getOption("mlflow.snapshot.fallback", FALSE) - ) - - file.copy("packrat/packrat.lock", "r-dependencies.txt") - mlflow_lock_delete() -} - -mlflow_lock_delete <- function() { - if (file.exists("packrat/packrat.lock")) { - unlink("packrat/packrat.lock") - if (length(dir("packrat")) == 0) unlink("packrat", recursive = TRUE) - } -} - -#' Restore Snapshot -#' -#' Restores a snapshot of all dependencies required to run the files in the -#' current directory -#' -#' @export -mlflow_restore_snapshot <- function() { - - if (!file.exists("r-dependencies.txt")) { - stop("r-dependencies.txt expected but does not exist, run 'mlflow_run()' or 'mlflow_snapshot()'.") - } - - if (!file.exists("packrat")) dir.create("packrat") - file.copy("r-dependencies.txt", "packrat/packrat.lock") - on.exit(mlflow_lock_delete) - - if (nchar(Sys.getenv("MLFLOW_SNAPSHOT_CACHE")) > 0) { - Sys.setenv(R_PACKRAT_CACHE_DIR = Sys.getenv("MLFLOW_SNAPSHOT_CACHE")) - } - - options(packrat.verbose.cache = mlflow_is_verbose(), packrat.connect.timeout = 10) - - packrat::set_opts( - auto.snapshot = FALSE, - use.cache = TRUE, - project = getwd(), - persist = FALSE - ) - - packrat::restore(overwrite.dirty = TRUE, - prompt = FALSE, - restart = FALSE) - - packrat::on() -} - -mlflow_snapshot_warning <- function() { - warning( - "Running without restoring the packages snapshot may not reload the model correctly. ", - "Consider running 'mlflow_restore_snapshot()' or setting the 'restore' parameter to 'TRUE'." - ) -} - -mlflow_restore_or_warning <- function(restore) { - if (restore) { - mlflow_restore_snapshot() - } else { - mlflow_snapshot_warning() - } -} diff --git a/mlflow/R/mlflow/R/project-source.R b/mlflow/R/mlflow/R/project-source.R index aa2eb3e089fdb..4d252a4667898 100644 --- a/mlflow/R/mlflow/R/project-source.R +++ b/mlflow/R/mlflow/R/project-source.R @@ -31,7 +31,7 @@ mlflow_source <- function(uri) { }, interrupt = function(cnd) mlflow_end_run(status = "KILLED"), finally = { - mlflow_end_run() + if (!is.null(mlflow_get_active_run_id())) mlflow_end_run(status = "FAILED") clear_run_params() } ) diff --git a/mlflow/R/mlflow/R/python.R b/mlflow/R/mlflow/R/python.R index ccc969488140b..6e4cbaff61099 100644 --- a/mlflow/R/mlflow/R/python.R +++ b/mlflow/R/mlflow/R/python.R @@ -1,48 +1,35 @@ +# Computes path to Python executable within conda environment created for the MLflow R package #' @importFrom reticulate conda_list -python_bin_conda <- function() { - envs <- conda_list() - mlflow_env <- envs[envs$name == "r-mlflow", ] +get_python_bin <- function() { + conda <- mlflow_conda_bin() + envs <- conda_list(conda = conda) + mlflow_env <- envs[envs$name == mlflow_conda_env_name(), ] if (nrow(mlflow_env) == 0) { - stop("MLflow not configured, please run mlflow_install().") + stop("MLflow not configured, please run install_mlflow().") } - mlflow_env$python } +# Returns path to Python executable within conda environment created for the MLflow R package python_bin <- function() { if (is.null(.globals$python_bin)) { - python <- python_bin_conda() + python <- get_python_bin() .globals$python_bin <- path.expand(python) } .globals$python_bin } -#' @importFrom processx run -pip_run <- function(..., echo = TRUE) { - args <- list(...) - - command <- file.path(dirname(python_bin()), "pip") - result <- run(command, args = unlist(args), echo = echo) - - invisible(result) -} - -#' @importFrom reticulate conda_binary -python_conda_installed <- function() { - tryCatch({ - conda_binary() - TRUE - }, error = function(err) { - FALSE - }) +# Returns path to MLflow CLI, assumed to be in the same bin/ directory as the +# Python executable +python_mlflow_bin <- function() { + python_bin_dir <- dirname(python_bin()) + file.path(python_bin_dir, "mlflow") } +# Return path to conda home directory, such that the `conda` executable can be found +# under conda_home/bin/ #' @importFrom reticulate conda_binary -python_conda_bin <- function() { - dirname(conda_binary()) -} - python_conda_home <- function() { - dirname(python_conda_bin()) + dirname(dirname(mlflow_conda_bin())) } diff --git a/mlflow/R/mlflow/R/tracking-client.R b/mlflow/R/mlflow/R/tracking-client.R index d2e226fe6e332..b93c4a86d1d73 100644 --- a/mlflow/R/mlflow/R/tracking-client.R +++ b/mlflow/R/mlflow/R/tracking-client.R @@ -1,395 +1,140 @@ -new_mlflow_client <- function(tracking_uri, server_url = NULL) { - structure( - list( - tracking_uri = tracking_uri, - server_url = server_url %||% tracking_uri - ), - class = "mlflow_client" - ) -} - -#' Initialize an MLflow client -#' -#' @param tracking_uri The tracking URI. If not provided, defaults to the service -#' set by `mlflow_set_tracking_uri()`. -#' @keywords internal -mlflow_client <- function(tracking_uri = NULL) { - tracking_uri <- tracking_uri %||% mlflow_get_tracking_uri() - server_url <- if (startsWith(tracking_uri, "http")) { - tracking_uri - } else if (!is.null(mlflow_local_server(tracking_uri)$server_url)) { - mlflow_local_server(tracking_uri)$server_url - } else { - local_server <- mlflow_server(file_store = tracking_uri, port = mlflow_connect_port()) - mlflow_register_local_server(tracking_uri = tracking_uri, local_server = local_server) - local_server$server_url - } - new_mlflow_client(tracking_uri, server_url = server_url) +new_mlflow_client <- function(tracking_uri) { + UseMethod("new_mlflow_client") } -#' Create Experiment - Tracking Client -#' -#' Creates an MLflow experiment. -#' -#' @param name The name of the experiment to create. -#' @param artifact_location Location where all artifacts for this experiment are stored. If -#' not provided, the remote server will select an appropriate default. -#' @template roxlate-client -mlflow_client_create_experiment <- function(client, name, artifact_location = NULL) { - name <- forge::cast_string(name) - response <- mlflow_rest( - "experiments", "create", client = client, verb = "POST", - data = list( - name = name, - artifact_location = artifact_location - ) - ) - invisible(response$experiment_id) -} - -#' List Experiments -#' -#' Get a list of all experiments. -#' -#' @param view_type Qualifier for type of experiments to be returned. Defaults to `ACTIVE_ONLY`. -#' @template roxlate-client -mlflow_client_list_experiments <- function(client, view_type = c("ACTIVE_ONLY", "DELETED_ONLY", "ALL")) { - view_type <- match.arg(view_type) - response <- mlflow_rest( - "experiments", "list", client = client, verb = "GET", - query = list( - view_type = view_type - )) - exps <- response$experiments - - exps$artifact_location <- mlflow_relative_paths(exps$artifact_location) - exps -} - -#' Get Experiment -#' -#' Get meta data for experiment and a list of runs for this experiment. -#' -#' @param experiment_id Identifer to get an experiment. -#' @template roxlate-client -mlflow_client_get_experiment <- function(client, experiment_id) { - mlflow_rest( - "experiments", "get", client = client, - query = list(experiment_id = experiment_id) - ) -} - -#' Get Experiment by Name -#' -#' Get meta data for experiment by name. -#' -#' @param name The experiment name. -#' @template roxlate-client -mlflow_client_get_experiment_by_name <- function(client, name) { - exps <- mlflow_client_list_experiments(client = client) - experiment <- exps[exps$name == name, ] - if (nrow(experiment)) experiment else NULL -} - -#' Create Run -#' -#' reate a new run within an experiment. A run is usually a single execution of a machine learning or data ETL pipeline. -#' -#' MLflow uses runs to track Param, Metric, and RunTag, associated with a single execution. -#' -#' @param experiment_id Unique identifier for the associated experiment. -#' @param user_id User ID or LDAP for the user executing the run. -#' @param run_name Human readable name for run. -#' @param source_type Originating source for this run. One of Notebook, Job, Project, Local or Unknown. -#' @param source_name String descriptor for source. For example, name or description of the notebook, or job name. -#' @param start_time Unix timestamp of when the run started in milliseconds. -#' @param source_version Git version of the source code used to create run. -#' @param entry_point_name Name of the entry point for the run. -#' @param tags Additional metadata for run in key-value pairs. -#' @template roxlate-client -mlflow_client_create_run <- function( - client, experiment_id, user_id = NULL, run_name = NULL, source_type = NULL, - source_name = NULL, entry_point_name = NULL, start_time = NULL, - source_version = NULL, tags = NULL -) { - tags <- if (!is.null(tags)) tags %>% - purrr::imap(~ list(key = .y, value = .x)) %>% - unname() - - start_time <- start_time %||% current_time() - user_id <- user_id %||% mlflow_user() - - response <- mlflow_rest( - "runs", "create", client = client, verb = "POST", - data = list( - experiment_id = experiment_id, - user_id = user_id, - run_name = run_name, - source_type = source_type, - source_name = source_name, - entry_point_name = entry_point_name, - start_time = start_time, - source_version = source_version, - tags = tags - ) - ) - new_mlflow_entities_run(response) -} - -mlflow_rest_update_run <- function(client, run_uuid, status, end_time) { - mlflow_rest("runs", "update", client = client, verb = "POST", data = list( - run_uuid = run_uuid, - status = status, - end_time = end_time - )) -} +new_mlflow_uri <- function(raw_uri) { + # Special case 'databricks' + if (identical(raw_uri, "databricks")) { + raw_uri <- paste0("databricks", "://") + } -#' Delete Experiment -#' -#' Mark an experiment and associated runs, params, metrics, … etc for deletion. If the -#' experiment uses FileStore, artifacts associated with experiment are also deleted. -#' -#' @param experiment_id ID of the associated experiment. This field is required. -#' @template roxlate-client -mlflow_client_delete_experiment <- function(client, experiment_id) { - mlflow_rest( - "experiments", "delete", client = client, verb = "POST", - data = list(experiment_id = experiment_id), + if (!grepl("://", raw_uri)) { + raw_uri <- paste0("file://", raw_uri) + } + parts <- strsplit(raw_uri, "://")[[1]] + structure( + list(scheme = parts[1], path = parts[2]), + class = c(paste("mlflow_", parts[1], sep = ""), "mlflow_uri") ) } -#' Restore Experiment -#' -#' Restore an experiment marked for deletion. This also restores associated metadata, -#' runs, metrics, and params. If experiment uses FileStore, underlying artifacts -#' associated with experiment are also restored. -#' -#' Throws RESOURCE_DOES_NOT_EXIST if experiment was never created or was permanently deleted. -#' -#' @param experiment_id ID of the associated experiment. This field is required. -#' @template roxlate-client -mlflow_client_restore_experiment <- function(client, experiment_id) { - mlflow_rest( - "experiments", "restore", client = client, verb = "POST", - data = list(experiment_id = experiment_id), +new_mlflow_client_impl <- function(get_host_creds, get_cli_env = list, class = character()) { + structure( + list(get_host_creds = get_host_creds, + get_cli_env = get_cli_env + ), + class = c(class, "mlflow_client") ) } -#' Get Run -#' -#' Get meta data, params, tags, and metrics for run. Only last logged value for each metric is returned. -#' -#' @template roxlate-run-id -#' @template roxlate-client -mlflow_client_get_run <- function(client, run_id) { - response <- mlflow_rest( - "runs", "get", client = client, verb = "GET", - query = list(run_uuid = run_id) +new_mlflow_host_creds <- function( host = NA, username = NA, password = NA, token = NA, + insecure = "False") { + insecure_arg <- if (is.null(insecure) || is.na(insecure)) { + "False" + } else { + list(true = "True", false = "False")[[tolower(insecure)]] + } + structure( + list(host = host, username = username, password = password, token = token, + insecure = insecure_arg), + class = "mlflow_host_creds" ) - new_mlflow_entities_run(response) } -#' Log Metric -#' -#' API to log a metric for a run. Metrics key-value pair that record a single float measure. -#' During a single execution of a run, a particular metric can be logged several times. -#' Backend will keep track of historical values along with timestamps. -#' -#' @param key Name of the metric. -#' @param value Float value for the metric being logged. -#' @param timestamp Unix timestamp in milliseconds at the time metric was logged. -#' @template roxlate-run-id -#' @template roxlate-client -mlflow_client_log_metric <- function(client, run_id, key, value, timestamp = NULL) { - if (!is.numeric(value)) stop( - "Metric `", key, "`` must be numeric but ", class(value)[[1]], " found.", - call. = FALSE +#' @export +print.mlflow_host_creds <- function(x, ...) { + mlflow_host_creds <- x + args <- list( + host = if (is.na(mlflow_host_creds$host)) { + "" + } else { + paste ("host = ", mlflow_host_creds$host, sep = "") + }, + username = if (is.na(mlflow_host_creds$username)) { + "" + } else { + paste ("username = ", mlflow_host_creds$username, sep = "") + }, + password = if (is.na(mlflow_host_creds$password)) { + "" + } else { + "password = *****" + }, + token = if (is.na(mlflow_host_creds$token)) { + "" + } else { + "token = *****" + }, + insecure = paste("insecure = ", as.character(mlflow_host_creds$insecure), + sep = ""), + sep = ", " ) - timestamp <- timestamp %||% current_time() - mlflow_rest("runs", "log-metric", client = client, verb = "POST", data = list( - run_uuid = run_id, - key = key, - value = value, - timestamp = timestamp - )) + cat("mlflow_host_creds( ") + do.call(cat, args[args != ""]) + cat(")\n") } -#' Log Parameter -#' -#' API to log a parameter used for this run. Examples are params and hyperparams -#' used for ML training, or constant dates and values used in an ETL pipeline. -#' A params is a STRING key-value pair. For a run, a single parameter is allowed -#' to be logged only once. -#' -#' @param key Name of the parameter. -#' @param value String value of the parameter. -#' @template roxlate-run-id -#' @template roxlate-client -mlflow_client_log_param <- function(client, run_id, key, value) { - mlflow_rest("runs", "log-parameter", client = client, verb = "POST", data = list( - run_uuid = run_id, - key = key, - value = cast_string(value) - )) -} - -#' Set Tag -#' -#' Set a tag on a run. Tags are run metadata that can be updated during and -#' after a run completes. -#' -#' @param key Name of the tag. Maximum size is 255 bytes. This field is required. -#' @param value String value of the tag being logged. Maximum size is 500 bytes. This field is required. -#' @template roxlate-run-id -#' @template roxlate-client -mlflow_client_set_tag <- function(client, run_id, key, value) { - mlflow_rest("runs", "set-tag", client = client, verb = "POST", data = list( - run_uuid = run_id, - key = key, - value = value - )) - invisible(NULL) -} - -#' Terminate a Run -#' -#' @param run_id Unique identifier for the run. -#' @param status Updated status of the run. Defaults to `FINISHED`. -#' @param end_time Unix timestamp of when the run ended in milliseconds. -#' @template roxlate-run-id -#' @template roxlate-client -mlflow_client_set_terminated <- function( - client, run_id, status = c("FINISHED", "SCHEDULED", "FAILED", "KILLED"), - end_time = NULL -) { - status <- match.arg(status) - end_time <- end_time %||% current_time() - response <- mlflow_rest_update_run(client, run_id, status, end_time) - tidy_run_info(response$run_info) -} - -#' Delete a Run -#' -#' @template roxlate-client -#' @template roxlate-run-id -mlflow_client_delete_run <- function(client, run_id) { - mlflow_rest("runs", "delete", client = client, verb = "POST", data = list( - run_uuid = run_id - )) +new_mlflow_client.mlflow_file <- function(tracking_uri) { + path <- tracking_uri$path + server_url <- if (!is.null(mlflow_local_server(path)$server_url)) { + mlflow_local_server(path)$server_url + } else { + local_server <- mlflow_server(file_store = path, port = mlflow_connect_port()) + mlflow_register_local_server(tracking_uri = path, local_server = local_server) + local_server$server_url + } + new_mlflow_client_impl(get_host_creds = function () { + new_mlflow_host_creds(host = server_url) + }, class = "mlflow_file_client") } -#' Restore a Run -#' -#' @template roxlate-client -#' @template roxlate-run-id -mlflow_client_restore_run <- function(client, run_id) { - mlflow_rest("runs", "restore", client = client, verb = "POST", data = list( - run_uuid = run_id - )) +new_mlflow_client.default <- function(tracking_uri) { + stop(paste("Unsupported scheme: '", tracking_uri$scheme, "'", sep = "")) } -#' Log Artifact -#' -#' Logs an specific file or directory as an artifact. -#' -#' @param path The file or directory to log as an artifact. -#' @param artifact_path Destination path within the run’s artifact URI. -#' @template roxlate-client -#' @template roxlate-run-id -#' -#' @details -#' -#' When logging to Amazon S3, ensure that the user has a proper policy -#' attach to it, for instance: -#' -#' \code{ -#' { -#' "Version": "2012-10-17", -#' "Statement": [ -#' { -#' "Sid": "VisualEditor0", -#' "Effect": "Allow", -#' "Action": [ -#' "s3:PutObject", -#' "s3:GetObject", -#' "s3:ListBucket", -#' "s3:GetBucketLocation" -#' ], -#' "Resource": [ -#' "arn:aws:s3:::mlflow-test/*", -#' "arn:aws:s3:::mlflow-test" -#' ] -#' } -#' ] -#' } -#' } -#' -#' Additionally, at least the \code{AWS_ACCESS_KEY_ID} and \code{AWS_SECRET_ACCESS_KEY} -#' environment variables must be set to the corresponding key and secrets provided -#' by Amazon IAM. -mlflow_client_log_artifact <- function(client, run_id, path, artifact_path = NULL) { - artifact_param <- NULL - if (!is.null(artifact_path)) artifact_param <- "--artifact-path" - - if (as.logical(fs::is_file(path))) { - command <- "log-artifact" - local_param <- "--local-file" - } else { - command <- "log-artifacts" - local_param <- "--local-dir" +basic_http_client <- function(tracking_uri) { + host <- paste(tracking_uri$scheme, tracking_uri$path, sep = "://") + get_host_creds <- function () { + new_mlflow_host_creds( + host = host, + username = Sys.getenv("MLFLOW_USERNAME", NA), + password = Sys.getenv("MLFLOW_PASSWORD", NA), + token = Sys.getenv("MLFLOW_TOKEN", NA), + insecure = Sys.getenv("MLFLOW_INSECURE", NA) + ) } + cli_env <- function() { + res <- list( + MLFLOW_USERNAME = Sys.getenv("MLFLOW_USERNAME", NA), + MLFLOW_PASSWORD = Sys.getenv("MLFLOW_PASSWORD", NA), + MLFLOW_TOKEN = Sys.getenv("MLFLOW_TOKEN", NA), + MLFLOW_INSECURE = Sys.getenv("MLFLOW_INSECURE", NA) + ) + res[!is.na(res)] + } + new_mlflow_client_impl(get_host_creds, cli_env, class = "mlflow_http_client") +} - mlflow_cli("artifacts", - command, - local_param, - path, - artifact_param, - artifact_path, - "--run-id", - run_id) - - invisible(NULL) +new_mlflow_client.mlflow_http <- function(tracking_uri) { + basic_http_client(tracking_uri) } -#' List artifacts -#' -#' @template roxlate-client -#' @template roxlate-run-id -#' @param path The run's relative artifact path to list from. If not specified, it is -#' set to the root artifact path -mlflow_client_list_artifacts <- function(client, run_id, path = NULL) { - response <- mlflow_rest( - "artifacts", "list", client = client, verb = "GET", - query = list( - run_uuid = run_id, - path = path - )) - response +new_mlflow_client.mlflow_https <- function(tracking_uri) { + basic_http_client(tracking_uri) } -#' Download Artifacts +#' Initialize an MLflow Client #' -#' Download an artifact file or directory from a run to a local directory if applicable, -#' and return a local path for it. +#' Initializes and returns an MLflow client that communicates with the tracking server or store +#' at the specified URI. #' -#' @template roxlate-client -#' @template roxlate-run-id -#' @param path Relative source path to the desired artifact. -mlflow_client_download_artifacts <- function(client, run_id, path) { - result <- mlflow_cli( - "artifacts", "download", - "--run-id", run_id, - "--artifact-path", path, - echo = FALSE, - stderr_callback = function(x, p) { - if (grepl("FileNotFoundError", x)) - stop( - gsub("(.|\n)*(?=FileNotFoundError)", "", x, perl = TRUE), - call. = FALSE - ) - } - ) - - gsub("\n", "", result$stdout) +#' @param tracking_uri The tracking URI. If not provided, defaults to the service +#' set by `mlflow_set_tracking_uri()`. +#' @export +mlflow_client <- function(tracking_uri = NULL) { + tracking_uri <- new_mlflow_uri(tracking_uri %||% mlflow_get_tracking_uri()) + client <- new_mlflow_client(tracking_uri) + if (inherits(client, "mlflow_file_client")) mlflow_validate_server(client) + client } diff --git a/mlflow/R/mlflow/R/tracking-experiments.R b/mlflow/R/mlflow/R/tracking-experiments.R new file mode 100644 index 0000000000000..6bc5f40519cc6 --- /dev/null +++ b/mlflow/R/mlflow/R/tracking-experiments.R @@ -0,0 +1,197 @@ +#' Create Experiment +#' +#' Creates an MLflow experiment and returns its id. +#' +#' @param name The name of the experiment to create. +#' @param artifact_location Location where all artifacts for this experiment are stored. If +#' not provided, the remote server will select an appropriate default. +#' @template roxlate-client +#' @export +mlflow_create_experiment <- function(name, artifact_location = NULL, client = NULL) { + client <- resolve_client(client) + name <- forge::cast_string(name) + response <- mlflow_rest( + "experiments", "create", + client = client, verb = "POST", + data = list( + name = name, + artifact_location = artifact_location + ) + ) + response$experiment_id +} + +#' List Experiments +#' +#' Gets a list of all experiments. +#' +#' @param view_type Qualifier for type of experiments to be returned. Defaults to `ACTIVE_ONLY`. +#' @template roxlate-client +#' @export +mlflow_list_experiments <- function(view_type = c("ACTIVE_ONLY", "DELETED_ONLY", "ALL"), client = NULL) { + client <- resolve_client(client) + view_type <- match.arg(view_type) + response <- mlflow_rest( + "experiments", "list", + client = client, verb = "GET", + query = list(view_type = view_type) + ) + + # Return `NULL` if no experiments + if (!length(response)) return(NULL) + + response$experiments %>% + purrr::transpose() %>% + purrr::map(unlist) %>% + tibble::as_tibble() +} + +#' Get Experiment +#' +#' Gets metadata for an experiment and a list of runs for the experiment. Attempts to obtain the +#' active experiment if both `experiment_id` and `name` are unspecified. +#' +#' +#' @param experiment_id Identifer to get an experiment. +#' @param name The experiment name. Only one of `name` or `experiment_id` should be specified. +#' @template roxlate-client +#' @export +mlflow_get_experiment <- function(experiment_id = NULL, name = NULL, client = NULL) { + if (!is.null(name) && !is.null(experiment_id)) { + stop("Only one of `name` or `experiment_id` should be specified.", call. = FALSE) + } + + client <- resolve_client(client) + + if (!is.null(name)) return(mlflow_get_experiment_by_name(client = client, name = name)) + + experiment_id <- resolve_experiment_id(experiment_id) + experiment_id <- cast_string(experiment_id) + response <- mlflow_rest( + "experiments", "get", + client = client, query = list(experiment_id = experiment_id) + ) + response$experiment %>% + new_mlflow_experiment() +} + +mlflow_get_experiment_by_name <- function(name, client = NULL) { + client <- resolve_client(client) + exps <- mlflow_list_experiments(client = client) + if (is.null(exps)) stop("No experiments found.", call. = FALSE) + + experiment <- exps[exps$name == name, ] + if (nrow(experiment)) { + new_mlflow_experiment(experiment) + } else { + stop(glue::glue("Experiment `{exp}` not found.", exp = name), call. = FALSE) + } +} + +#' Delete Experiment +#' +#' Marks an experiment and associated runs, params, metrics, etc. for deletion. If the +#' experiment uses FileStore, artifacts associated with experiment are also deleted. +#' +#' @param experiment_id ID of the associated experiment. This field is required. +#' @template roxlate-client +#' @export +mlflow_delete_experiment <- function(experiment_id, client = NULL) { + if (identical(experiment_id, mlflow_get_active_experiment_id())) + stop("Cannot delete an active experiment.", call. = FALSE) + + client <- resolve_client(client) + mlflow_rest( + "experiments", "delete", + verb = "POST", client = client, + data = list(experiment_id = experiment_id) + + ) + invisible(NULL) +} + + + +#' Restore Experiment +#' +#' Restores an experiment marked for deletion. This also restores associated metadata, +#' runs, metrics, and params. If experiment uses FileStore, underlying artifacts +#' associated with experiment are also restored. +#' +#' Throws `RESOURCE_DOES_NOT_EXIST` if the experiment was never created or was permanently deleted. +#' +#' @param experiment_id ID of the associated experiment. This field is required. +#' @template roxlate-client +#' @export +mlflow_restore_experiment <- function(experiment_id, client = NULL) { + client <- resolve_client(client) + mlflow_rest( + "experiments", "restore", + client = client, verb = "POST", + data = list(experiment_id = experiment_id) + ) + invisible(NULL) +} + +#' Rename Experiment +#' +#' Renames an experiment. +#' +#' @template roxlate-client +#' @param experiment_id ID of the associated experiment. This field is required. +#' @param new_name The experiment’s name will be changed to this. The new name must be unique. +#' @export +mlflow_rename_experiment <- function(new_name, experiment_id = NULL, client = NULL) { + experiment_id <- resolve_experiment_id(experiment_id) + + client <- resolve_client(client) + mlflow_rest( + "experiments", "update", + client = client, verb = "POST", + data = list( + experiment_id = experiment_id, + new_name = new_name + ) + ) + invisible(NULL) +} + +#' Set Experiment +#' +#' Sets an experiment as the active experiment. Either the name or ID of the experiment can be provided. +#' If the a name is provided but the experiment does not exist, this function creates an experiment +#' with provided name. Returns the ID of the active experiment. +#' +#' @param experiment_name Name of experiment to be activated. +#' @param experiment_id ID of experiment to be activated. +#' @param artifact_location Location where all artifacts for this experiment are stored. If +#' not provided, the remote server will select an appropriate default. +#' @export +mlflow_set_experiment <- function(experiment_name = NULL, experiment_id = NULL, artifact_location = NULL) { + if (!is.null(experiment_name) && !is.null(experiment_id)) { + stop("Only one of `experiment_name` or `experiment_id` should be specified.", + call. = FALSE + ) + } + + if (is.null(experiment_name) && is.null(experiment_id)) { + stop("Exactly one of `experiment_name` or `experiment_id` should be specified.", + call. = FALSE) + } + + client <- mlflow_client() + + final_experiment_id <- if (!is.null(experiment_name)) { + tryCatch( + mlflow_id(mlflow_get_experiment(client = client, name = experiment_name)), + error = function(e) { + message("Experiment `", experiment_name, "` does not exist. Creating a new experiment.") + mlflow_create_experiment(client = client, name = experiment_name, artifact_location = artifact_location) + } + ) + } else { + experiment_id + } + + invisible(mlflow_set_active_experiment_id(final_experiment_id)) +} diff --git a/mlflow/R/mlflow/R/tracking-fluent.R b/mlflow/R/mlflow/R/tracking-fluent.R deleted file mode 100644 index cc0e96f5787fc..0000000000000 --- a/mlflow/R/mlflow/R/tracking-fluent.R +++ /dev/null @@ -1,220 +0,0 @@ -#' Create Experiment -#' -#' Creates an MLflow experiment. -#' -#' @param name The name of the experiment to create. -#' @param artifact_location Location where all artifacts for this experiment are stored. If -#' not provided, the remote server will select an appropriate default. -#' @template roxlate-fluent -#' -#' @export -mlflow_create_experiment <- function(name, artifact_location = NULL) { - client <- mlflow_client() - mlflow_client_create_experiment(client, name, artifact_location) -} - -#' Set Experiment -#' -#' Set given experiment as active experiment. If experiment does not -#' exist, create an experiment with provided name. -#' -#' @param experiment_name Name of experiment to be activated. -#' @template roxlate-fluent -#' @export -mlflow_set_experiment <- function(experiment_name) { - client <- mlflow_client() - experiment <- mlflow_client_get_experiment_by_name(client, experiment_name) - exp_id <- if (!is.null(experiment)) { - experiment$experiment_id - } else { - message("`", experiment_name, "` does not exist. Creating a new experiment.") - mlflow_client_create_experiment(client, experiment_name) - } - mlflow_set_active_experiment_id(exp_id) -} - -#' Start Run -#' -#' Starts a new run within an experiment, should be used within a \code{with} block. -#' -#' @param run_uuid If specified, get the run with the specified UUID and log metrics -#' and params under that run. The run's end time is unset and its status is set to -#' running, but the run's other attributes remain unchanged. -#' @param experiment_id Used only when ``run_uuid`` is unspecified. ID of the experiment under -#' which to create the current run. If unspecified, the run is created under -#' a new experiment with a randomly generated name. -#' @param source_name Name of the source file or URI of the project to be associated with the run. -#' Defaults to the current file if none provided. -#' @param source_version Optional Git commit hash to associate with the run. -#' @param entry_point_name Optional name of the entry point for to the current run. -#' @param source_type Integer enum value describing the type of the run ("local", "project", etc.). -#' @template roxlate-fluent -#' -#' @examples -#' \dontrun{ -#' with(mlflow_start_run(), { -#' mlflow_log("test", 10) -#' }) -#' } -#' -#' @export -mlflow_start_run <- function(run_uuid = NULL, experiment_id = NULL, source_name = NULL, - source_version = NULL, entry_point_name = NULL, - source_type = "LOCAL") { - active_run <- mlflow_active_run() - if (!is.null(active_run)) { - stop("Run with UUID ", active_run_id(), " is already active.", - call. = FALSE) - } - - existing_run_uuid <- run_uuid %||% { - env_run_id <- Sys.getenv("MLFLOW_RUN_ID") - if (nchar(env_run_id)) env_run_id - } - - run <- if (!is.null(existing_run_uuid)) { - client <- mlflow_client() - mlflow_client_get_run(client, existing_run_uuid) - } else { - experiment_id <- as.integer( - experiment_id %||% mlflow_get_active_experiment_id() %||% Sys.getenv("MLFLOW_EXPERIMENT_ID", unset = "0") - ) - - client <- mlflow_client() - - mlflow_client_create_run( - client = client, - experiment_id = experiment_id, - source_name = source_name %||% get_source_name(), - source_version = source_version %||% get_source_version(), - entry_point_name = entry_point_name, - source_type = source_type - ) - } - - mlflow_set_active_run(run) -} - -#' Log Metric -#' -#' API to log a metric for a run. Metrics key-value pair that record a single float measure. -#' During a single execution of a run, a particular metric can be logged several times. -#' Backend will keep track of historical values along with timestamps. -#' -#' @param key Name of the metric. -#' @param value Float value for the metric being logged. -#' @param timestamp Unix timestamp in milliseconds at the time metric was logged. -#' @template roxlate-fluent -#' -#' @export -mlflow_log_metric <- function(key, value, timestamp = NULL) { - active_run <- mlflow_get_or_start_run() - client <- mlflow_client() - mlflow_client_log_metric( - client = client, run_id = run_id(active_run), - key = key, value = value, timestamp = timestamp - ) - invisible(value) -} - -#' Set Tag -#' -#' Set a tag on a run. Tags are run metadata that can be updated during and -#' after a run completes. -#' -#' @param key Name of the tag. Maximum size is 255 bytes. This field is required. -#' @param value String value of the tag being logged. Maximum size is 500 bytes. This field is required. -#' @template roxlate-fluent -#' -#' @export -mlflow_set_tag <- function(key, value) { - active_run <- mlflow_get_or_start_run() - client <- mlflow_client() - mlflow_client_set_tag( - client = client, run_id = run_id(active_run), key = key, value = value - ) -} - -#' End a Run -#' -#' End an active MLflow run (if there is one). -#' -#' @param status Updated status of the run. Defaults to `FINISHED`. -#' @template roxlate-fluent -#' -#' @export -mlflow_end_run <- function(status = c("FINISHED", "SCHEDULED", "FAILED", "KILLED")) { - active_run <- mlflow_active_run() - if (!is.null(active_run)) { - status <- match.arg(status) - client <- mlflow_client() - mlflow_client_set_terminated(client, run_id(active_run), status) - mlflow_set_active_run(NULL) - } - invisible(NULL) -} - -#' Log Parameter -#' -#' API to log a parameter used for this run. Examples are params and hyperparams -#' used for ML training, or constant dates and values used in an ETL pipeline. -#' A params is a STRING key-value pair. For a run, a single parameter is allowed -#' to be logged only once. -#' -#' @param key Name of the parameter. -#' @param value String value of the parameter. -#' @template roxlate-fluent -#' -#' @export -mlflow_log_param <- function(key, value) { - active_run <- mlflow_get_or_start_run() - client <- mlflow_client() - mlflow_client_log_param(client, run_id(active_run), key, value) - invisible(value) -} - -#' Log Artifact -#' -#' Logs an specific file or directory as an artifact. -#' -#' @param path The file or directory to log as an artifact. -#' @param artifact_path Destination path within the run’s artifact URI. -#' @template roxlate-fluent -#' -#' @details -#' -#' When logging to Amazon S3, ensure that the user has a proper policy -#' attach to it, for instance: -#' -#' \code{ -#' { -#' "Version": "2012-10-17", -#' "Statement": [ -#' { -#' "Sid": "VisualEditor0", -#' "Effect": "Allow", -#' "Action": [ -#' "s3:PutObject", -#' "s3:GetObject", -#' "s3:ListBucket", -#' "s3:GetBucketLocation" -#' ], -#' "Resource": [ -#' "arn:aws:s3:::mlflow-test/*", -#' "arn:aws:s3:::mlflow-test" -#' ] -#' } -#' ] -#' } -#' } -#' -#' Additionally, at least the \code{AWS_ACCESS_KEY_ID} and \code{AWS_SECRET_ACCESS_KEY} -#' environment variables must be set to the corresponding key and secrets provided -#' by Amazon IAM. -#' -#' @export -mlflow_log_artifact <- function(path, artifact_path = NULL) { - active_run <- mlflow_get_or_start_run() - client <- mlflow_client() - mlflow_client_log_artifact(client, run_id(active_run), path, artifact_path) -} diff --git a/mlflow/R/mlflow/R/tracking-globals.R b/mlflow/R/mlflow/R/tracking-globals.R index bf4e4520be908..c412eb7d4b44a 100644 --- a/mlflow/R/mlflow/R/tracking-globals.R +++ b/mlflow/R/mlflow/R/tracking-globals.R @@ -1,27 +1,17 @@ -#' Active Run -#' -#' Retrieves the active run. -#' -#' @export -mlflow_active_run <- function() { - .globals$active_run +mlflow_set_active_run_id <- function(run_id) { + .globals$active_run_id <- run_id } -mlflow_set_active_run <- function(run) { - .globals$active_run <- run - invisible(run) -} - -mlflow_get_active_experiment_id <- function() { - .globals$active_experiment_id +mlflow_get_active_run_id <- function() { + .globals$active_run_id } mlflow_set_active_experiment_id <- function(experiment_id) { - if (!identical(experiment_id, .globals$active_experiment_id)) { - .globals$active_experiment_id <- experiment_id - } + .globals$active_experiment_id <- experiment_id +} - invisible(experiment_id) +mlflow_get_active_experiment_id <- function() { + .globals$active_experiment_id } #' Set Remote Tracking URI @@ -39,10 +29,12 @@ mlflow_set_tracking_uri <- function(uri) { #' Get Remote Tracking URI #' +#' Gets the remote tracking URI. +#' #' @export mlflow_get_tracking_uri <- function() { .globals$tracking_uri %||% { env_uri <- Sys.getenv("MLFLOW_TRACKING_URI") - if (nchar(env_uri)) env_uri else fs::path_abs("mlruns") + if (nchar(env_uri)) env_uri else paste("file://", fs::path_abs("mlruns"), sep = "") } } diff --git a/mlflow/R/mlflow/R/tracking-rest.R b/mlflow/R/mlflow/R/tracking-rest.R index f55b40f76e4ae..5f5ca8511b213 100644 --- a/mlflow/R/mlflow/R/tracking-rest.R +++ b/mlflow/R/mlflow/R/tracking-rest.R @@ -1,65 +1,96 @@ mlflow_rest_path <- function(version) { switch( version, - "2.0" = "ajax-api/2.0/preview/mlflow" + "2.0" = "api/2.0/preview/mlflow" ) } -mlflow_rest_body <- function(data) { - data <- Filter(length, data) - paste0( - "\"", - gsub( - "\\\"", - "\\\\\"", - as.character( - jsonlite::toJSON(data, auto_unbox = TRUE) - ) - ), - "\"" - ) +#' @importFrom httr timeout +mlflow_rest_timeout <- function() { + timeout(getOption("mlflow.rest.timeout", 60)) } -#' @importFrom httr add_headers -mlflow_rest_headers <- function() { - add_headers("Content-Type" = "application/json") +try_parse_response_as_text <- function(response) { + raw_content <- content(response, type = "raw") + tryCatch({ + rawToChar(raw_content) + }, error = function(e) { + do.call(paste, as.list(raw_content)) + }) } -#' @importFrom httr timeout -mlflow_rest_timeout <- function() { - timeout(getOption("mlflow.rest.timeout", 1)) +#' @importFrom base64enc base64encode +get_rest_config <- function(host_creds) { + headers <- list() + auth_header <- if (!is.na(host_creds$username) && !is.na(host_creds$password)) { + basic_auth_str <- paste(host_creds$username, host_creds$password, sep = ":") + paste("Basic", base64encode(charToRaw(basic_auth_str)), sep = " ") + } else if (!is.na(host_creds$token)) { + paste("Bearer", host_creds$token, sep = " ") + } else { + NA + } + if (!is.na(auth_header)) { + headers$Authorization <- auth_header + } + headers$`User-Agent` <- paste("mlflow-r-client", utils::packageVersion("mlflow"), sep = "/") + is_insecure <- as.logical(host_creds$insecure) + list( + headers = headers, + config = if (is_insecure) { + httr::config(ssl_verifypeer = 0, ssl_verifyhost = 0) + } else { + list() + } + ) } -#' @importFrom httr content -#' @importFrom httr GET -#' @importFrom httr POST -#' @importFrom jsonlite fromJSON -mlflow_rest <- function(..., client, query = NULL, data = NULL, verb = "GET", version = "2.0") { +#' @importFrom httr GET POST add_headers config content +mlflow_rest <- function( ..., client, query = NULL, data = NULL, verb = "GET", version = "2.0") { + host_creds <- client$get_host_creds() + rest_config <- get_rest_config(host_creds) args <- list(...) - tracking_url <- client$server_url - api_url <- file.path( - tracking_url, + host_creds$host, mlflow_rest_path(version), paste(args, collapse = "/") ) response <- switch( verb, - GET = GET(api_url, query = query, mlflow_rest_timeout()), + GET = GET( + api_url, + query = query, + mlflow_rest_timeout(), + config = rest_config$config, + do.call(add_headers, rest_config$headers)), POST = POST( api_url, - body = mlflow_rest_body(data), - mlflow_rest_headers(), - mlflow_rest_timeout() + body = if (is.null(data)) NULL else rapply(data, as.character, how = "replace"), + encode = "json", + mlflow_rest_timeout(), + config = rest_config$config, + do.call(add_headers, rest_config$headers) ), - stop("Verb '", verb, "' is unsupported.") + stop("Verb '", verb, "' is unsupported.", call. = FALSE) ) - - if (identical(response$status_code, 500L)) { - stop(xml2::as_list(content(response))$html$body$p[[1]]) + if (response$status_code != 200) { + message_body <- tryCatch( + paste(content(response, "parsed", type = "application/json"), collapse = "; "), + error = function(e) { + try_parse_response_as_text(response) + } + ) + msg <- paste("API request to endpoint '", + paste(args, collapse = "/"), + "' failed with error code ", + response$status_code, + ". Reponse body: '", + message_body, + "'", + sep = "") + stop(msg, call. = FALSE) } - text <- content(response, "text", encoding = "UTF-8") - jsonlite::fromJSON(text) + jsonlite::fromJSON(text, simplifyVector = FALSE) } diff --git a/mlflow/R/mlflow/R/tracking-runs.R b/mlflow/R/mlflow/R/tracking-runs.R new file mode 100644 index 0000000000000..759107413a8b8 --- /dev/null +++ b/mlflow/R/mlflow/R/tracking-runs.R @@ -0,0 +1,595 @@ +#' Log Metric +#' +#' Logs a metric for a run. Metrics key-value pair that records a single float measure. +#' During a single execution of a run, a particular metric can be logged several times. +#' The MLflow Backend keeps track of historical metric values along two axes: timestamp and step. +#' +#' @param key Name of the metric. +#' @param value Float value for the metric being logged. +#' @param timestamp Timestamp at which to log the metric. Timestamp is rounded to the nearest +#' integer. If unspecified, the number of milliseconds since the Unix epoch is used. +#' @param step Step at which to log the metric. Step is rounded to the nearest integer. If +#' unspecified, the default value of zero is used. +#' @template roxlate-run-id +#' @template roxlate-client +#' @export +mlflow_log_metric <- function(key, value, timestamp = NULL, step = NULL, run_id = NULL, + client = NULL) { + c(client, run_id) %<-% resolve_client_and_run_id(client, run_id) + key <- cast_string(key) + value <- cast_scalar_double(value) + timestamp <- cast_nullable_scalar_double(timestamp) + timestamp <- round(timestamp %||% current_time()) + step <- round(cast_nullable_scalar_double(step) %||% 0) + mlflow_rest("runs", "log-metric", client = client, verb = "POST", data = list( + run_uuid = run_id, + run_id = run_id, + key = key, + value = value, + timestamp = timestamp, + step = step + )) + invisible(value) +} + +mlflow_create_run <- function(start_time = NULL, tags = NULL, experiment_id = NULL, client) { + experiment_id <- resolve_experiment_id(experiment_id) + + # Read user_id from tags + # user_id is deprecated and will be removed from a future release + user_id <- tags[[MLFLOW_TAGS$MLFLOW_USER]] %||% "unknown" + + tags <- if (!is.null(tags)) tags %>% + purrr::imap(~ list(key = .y, value = .x)) %>% + unname() + + start_time <- start_time %||% current_time() + + response <- mlflow_rest( + "runs", "create", + client = client, verb = "POST", + data = list( + experiment_id = experiment_id, + user_id = user_id, + start_time = start_time, + tags = tags + ) + ) + + mlflow_get_run(run_id = response$run$info$run_uuid, client = client) +} + +#' Delete a Run +#' +#' Deletes the run with the specified ID. +#' @template roxlate-client +#' @template roxlate-run-id +#' @export +mlflow_delete_run <- function(run_id, client = NULL) { + run_id <- cast_string(run_id) + if (identical(run_id, mlflow_get_active_run_id())) + stop("Cannot delete an active run.", call. = FALSE) + client <- resolve_client(client) + mlflow_rest("runs", "delete", client = client, verb = "POST", data = list( + run_id = run_id + )) + invisible(NULL) +} + +#' Restore a Run +#' +#' Restores the run with the specified ID. +#' @template roxlate-client +#' @template roxlate-run-id +#' @export +mlflow_restore_run <- function(run_id, client = NULL) { + run_id <- cast_string(run_id) + client <- resolve_client(client) + mlflow_rest("runs", "restore", client = client, verb = "POST", data = list( + run_id = run_id + )) + mlflow_get_run(run_id) +} + +#' Get Run +#' +#' Gets metadata, params, tags, and metrics for a run. Returns a single value for each metric +#' key: the most recently logged metric value at the largest step. +#' +#' @template roxlate-run-id +#' @template roxlate-client +#' @export +mlflow_get_run <- function(run_id = NULL, client = NULL) { + run_id <- resolve_run_id(run_id) + client <- resolve_client(client) + response <- mlflow_rest( + "runs", "get", + client = client, verb = "GET", + query = list(run_uuid = run_id, run_id = run_id) + ) + parse_run(response$run) +} + +#' Log Batch +#' +#' Log a batch of metrics, params, and/or tags for a run. The server will respond with an error (non-200 status code) +#' if any data failed to be persisted. In case of error (due to internal server error or an invalid request), partial +#' data may be written. +#' @template roxlate-client +#' @template roxlate-run-id +#' @param metrics A dataframe of metrics to log, containing the following columns: "key", "value", +#' "step", "timestamp". This dataframe cannot contain any missing ('NA') entries. +#' @param params A dataframe of params to log, containing the following columns: "key", "value". +#' This dataframe cannot contain any missing ('NA') entries. +#' @param tags A dataframe of tags to log, containing the following columns: "key", "value". +#' This dataframe cannot contain any missing ('NA') entries. +#' @export +mlflow_log_batch <- function(metrics = NULL, params = NULL, tags = NULL, run_id = NULL, + client = NULL) { + validate_batch_input("metrics", metrics, c("key", "value", "step", "timestamp")) + validate_batch_input("params", params, c("key", "value")) + validate_batch_input("tags", tags, c("key", "value")) + + c(client, run_id) %<-% resolve_client_and_run_id(client, run_id) + + mlflow_rest("runs", "log-batch", client = client, verb = "POST", data = list( + run_id = run_id, + metrics = metrics, + params = params, + tags = tags + )) + invisible(NULL) +} + +validate_batch_input <- function(input_type, input_dataframe, expected_column_names) { + if (is.null(input_dataframe)) { + return() + } else if (!setequal(names(input_dataframe), expected_column_names)) { + msg <- paste(input_type, + " batch input dataframe must contain exactly the following columns: ", + paste(expected_column_names, collapse = ", "), + ". Found: ", + paste(names(input_dataframe), collapse = ", "), + sep = "") + stop(msg, call. = FALSE) + } else if (any(is.na(input_dataframe))) { + msg <- paste(input_type, + " batch input dataframe contains a missing ('NA') entry.", + sep = "") + stop(msg, call. = FALSE) + } +} + +#' Set Tag +#' +#' Sets a tag on a run. Tags are run metadata that can be updated during a run and +#' after a run completes. +#' +#' @param key Name of the tag. Maximum size is 255 bytes. This field is required. +#' @param value String value of the tag being logged. Maximum size is 500 bytes. This field is required. +#' @template roxlate-run-id +#' @template roxlate-client +#' @export +mlflow_set_tag <- function(key, value, run_id = NULL, client = NULL) { + c(client, run_id) %<-% resolve_client_and_run_id(client, run_id) + + key <- cast_string(key) + value <- cast_string(value) + + mlflow_rest("runs", "set-tag", client = client, verb = "POST", data = list( + run_uuid = run_id, + run_id = run_id, + key = key, + value = value + )) + + invisible(NULL) +} + +#' Delete Tag +#' +#' Deletes a tag on a run. This is irreversible. Tags are run metadata that can be updated during a run and +#' after a run completes. +#' +#' @param key Name of the tag. Maximum size is 255 bytes. This field is required. +#' @template roxlate-run-id +#' @template roxlate-client +#' @export +mlflow_delete_tag <- function(key, run_id = NULL, client = NULL) { + c(client, run_id) %<-% resolve_client_and_run_id(client, run_id) + + key <- cast_string(key) + + mlflow_rest("runs", "delete-tag", client = client, verb = "POST", data = list( + run_id = run_id, + key = key + )) + + invisible(NULL) +} + +#' Log Parameter +#' +#' Logs a parameter for a run. Examples are params and hyperparams +#' used for ML training, or constant dates and values used in an ETL pipeline. +#' A param is a STRING key-value pair. For a run, a single parameter is allowed +#' to be logged only once. +#' +#' @param key Name of the parameter. +#' @param value String value of the parameter. +#' @template roxlate-run-id +#' @template roxlate-client +#' @export +mlflow_log_param <- function(key, value, run_id = NULL, client = NULL) { + c(client, run_id) %<-% resolve_client_and_run_id(client, run_id) + + key <- cast_string(key) + value <- cast_string(value) + + mlflow_rest("runs", "log-parameter", client = client, verb = "POST", data = list( + run_uuid = run_id, + run_id = run_id, + key = key, + value = cast_string(value) + )) + + invisible(value) +} + +#' Get Metric History +#' +#' Get a list of all values for the specified metric for a given run. +#' +#' @template roxlate-run-id +#' @template roxlate-client +#' @param metric_key Name of the metric. +#' +#' @export +mlflow_get_metric_history <- function(metric_key, run_id = NULL, client = NULL) { + run_id <- resolve_run_id(run_id) + client <- resolve_client(client) + + metric_key <- cast_string(metric_key) + + response <- mlflow_rest( + "metrics", "get-history", + client = client, verb = "GET", + query = list(run_uuid = run_id, run_id = run_id, metric_key = metric_key) + ) + + response$metrics %>% + purrr::transpose() %>% + purrr::map(unlist) %>% + purrr::map_at("timestamp", milliseconds_to_date) %>% + purrr::map_at("step", as.double) %>% + tibble::as_tibble() +} + +#' Search Runs +#' +#' Search for runs that satisfy expressions. Search expressions can use Metric and Param keys. +#' +#' @template roxlate-client +#' @param experiment_ids List of string experiment IDs (or a single string experiment ID) to search +#' over. Attempts to use active experiment if not specified. +#' @param filter A filter expression over params, metrics, and tags, allowing returning a subset of runs. +#' The syntax is a subset of SQL which allows only ANDing together binary operations between a param/metric/tag and a constant. +#' @param run_view_type Run view type. +#' @param order_by List of properties to order by. Example: "metrics.acc DESC". +#' +#' @export +mlflow_search_runs <- function(filter = NULL, + run_view_type = c("ACTIVE_ONLY", "DELETED_ONLY", "ALL"), + experiment_ids = NULL, + order_by = list(), + client = NULL) { + experiment_ids <- resolve_experiment_id(experiment_ids) + # If we get back a single experiment ID, e.g. the active experiment ID, convert it to a list + if (is.atomic(experiment_ids)) { + experiment_ids <- list(experiment_ids) + } + client <- resolve_client(client) + + run_view_type <- match.arg(run_view_type) + experiment_ids <- cast_string_list(experiment_ids) + filter <- cast_nullable_string(filter) + + response <- mlflow_rest("runs", "search", client = client, verb = "POST", data = list( + experiment_ids = experiment_ids, + filter = filter, + run_view_type = run_view_type, + order_by = cast_string_list(order_by) + )) + + runs_list <- response$run %>% + purrr::map(parse_run) + do.call("rbind", runs_list) %||% data.frame() +} + +#' List Artifacts +#' +#' Gets a list of artifacts. +#' +#' @template roxlate-client +#' @template roxlate-run-id +#' @param path The run's relative artifact path to list from. If not specified, it is +#' set to the root artifact path +#' +#' @export +mlflow_list_artifacts <- function(path = NULL, run_id = NULL, client = NULL) { + run_id <- resolve_run_id(run_id) + client <- resolve_client(client) + + response <- mlflow_rest( + "artifacts", "list", + client = client, verb = "GET", + query = list( + run_uuid = run_id, + run_id = run_id, + path = path + ) + ) + + message(glue::glue("Root URI: {uri}", uri = response$root_uri)) + + response$files %>% + purrr::transpose() %>% + purrr::map(unlist) %>% + tibble::as_tibble() +} + +mlflow_set_terminated <- function(status, end_time, run_id, client) { + + response <- mlflow_rest("runs", "update", verb = "POST", client = client, data = list( + run_uuid = run_id, + run_id = run_id, + status = status, + end_time = end_time + )) + mlflow_get_run(client = client, run_id = response$run_info$run_uuid) +} + +#' Download Artifacts +#' +#' Download an artifact file or directory from a run to a local directory if applicable, +#' and return a local path for it. +#' +#' @template roxlate-client +#' @template roxlate-run-id +#' @param path Relative source path to the desired artifact. +#' @export +mlflow_download_artifacts <- function(path, run_id = NULL, client = NULL) { + run_id <- resolve_run_id(run_id) + client <- resolve_client(client) + result <- mlflow_cli( + "artifacts", "download", + "--run-id", run_id, + "--artifact-path", path, + echo = FALSE, + stderr_callback = function(x, p) { + if (grepl("FileNotFoundError", x)) { + stop( + gsub("(.|\n)*(?=FileNotFoundError)", "", x, perl = TRUE), + call. = FALSE + ) + } + }, + client = client + ) + gsub("\n", "", result$stdout) +} + +# ' Download Artifacts from URI. +mlflow_download_artifacts_from_uri <- function(artifact_uri, client = mlflow_client()) { + result <- mlflow_cli("artifacts", "download", "-u", artifact_uri, echo = FALSE, client = client) + gsub("\n", "", result$stdout) +} + +#' List Run Infos +#' +#' Returns a tibble whose columns contain run metadata (run ID, etc) for all runs under the +#' specified experiment. +#' +#' @param experiment_id Experiment ID. Attempts to use the active experiment if not specified. +#' @param run_view_type Run view type. +#' @template roxlate-client +#' @export +mlflow_list_run_infos <- function(run_view_type = c("ACTIVE_ONLY", "DELETED_ONLY", "ALL"), + experiment_id = NULL, client = NULL) { + experiment_id <- resolve_experiment_id(experiment_id) + client <- resolve_client(client) + + run_view_type <- match.arg(run_view_type) + experiment_ids <- cast_string_list(experiment_id) + + response <- mlflow_rest("runs", "search", client = client, verb = "POST", data = list( + experiment_ids = experiment_ids, + filter = NULL, + run_view_type = run_view_type + )) + + run_infos_list <- response$runs %>% + purrr::map("info") %>% + purrr::map(parse_run_info) + do.call("rbind", run_infos_list) %||% data.frame() +} + +#' Log Artifact +#' +#' Logs a specific file or directory as an artifact for a run. +#' +#' @param path The file or directory to log as an artifact. +#' @param artifact_path Destination path within the run's artifact URI. +#' @template roxlate-client +#' @template roxlate-run-id +#' +#' @details +#' +#' When logging to Amazon S3, ensure that you have the s3:PutObject, s3:GetObject, +#' s3:ListBucket, and s3:GetBucketLocation permissions on your bucket. +#' +#' Additionally, at least the \code{AWS_ACCESS_KEY_ID} and \code{AWS_SECRET_ACCESS_KEY} +#' environment variables must be set to the corresponding key and secrets provided +#' by Amazon IAM. +#' +#' @export +mlflow_log_artifact <- function(path, artifact_path = NULL, run_id = NULL, client = NULL) { + c(client, run_id) %<-% resolve_client_and_run_id(client, run_id) + artifact_param <- NULL + if (!is.null(artifact_path)) artifact_param <- "--artifact-path" + + if (as.logical(fs::is_file(path))) { + command <- "log-artifact" + local_param <- "--local-file" + } else { + command <- "log-artifacts" + local_param <- "--local-dir" + } + + mlflow_cli("artifacts", + command, + local_param, + path, + artifact_param, + artifact_path, + "--run-id", + run_id, + client = client + ) + + mlflow_list_artifacts(run_id = run_id, path = artifact_path, client = client) +} + +#' Start Run +#' +#' Starts a new run. If `client` is not provided, this function infers contextual information such as +#' source name and version, and also registers the created run as the active run. If `client` is provided, +#' no inference is done, and additional arguments such as `start_time` can be provided. +#' +#' @param run_id If specified, get the run with the specified UUID and log metrics +#' and params under that run. The run's end time is unset and its status is set to +#' running, but the run's other attributes remain unchanged. +#' @param experiment_id Used only when `run_id` is unspecified. ID of the experiment under +#' which to create the current run. If unspecified, the run is created under +#' a new experiment with a randomly generated name. +#' @param start_time Unix timestamp of when the run started in milliseconds. Only used when `client` is specified. +#' @param tags Additional metadata for run in key-value pairs. Only used when `client` is specified. +#' @template roxlate-client +#' +#' @examples +#' \dontrun{ +#' with(mlflow_start_run(), { +#' mlflow_log_metric("test", 10) +#' }) +#' } +#' +#' @export +mlflow_start_run <- function(run_id = NULL, experiment_id = NULL, start_time = NULL, tags = NULL, client = NULL) { + + # When `client` is provided, this function acts as a wrapper for `runs/create` and does not register + # an active run. + if (!is.null(client)) { + if (!is.null(run_id)) stop("`run_id` should not be specified when `client` is specified.", call. = FALSE) + run <- mlflow_create_run(client = client, start_time = start_time, + tags = tags, experiment_id = experiment_id) + return(run) + } + + # Fluent mode, check to see if extraneous params passed. + + if (!is.null(start_time)) stop("`start_time` should only be specified when `client` is specified.", call. = FALSE) + if (!is.null(tags)) stop("`tags` should only be specified when `client` is specified.", call. = FALSE) + + active_run_id <- mlflow_get_active_run_id() + if (!is.null(active_run_id)) { + stop("Run with UUID ", active_run_id, " is already active.", + call. = FALSE + ) + } + + existing_run_id <- run_id %||% { + env_run_id <- Sys.getenv("MLFLOW_RUN_ID") + if (nchar(env_run_id)) env_run_id + } + + client <- mlflow_client() + + run <- if (!is.null(existing_run_id)) { + # This is meant to pick up existing run when we're inside `mlflow_source()` called via `mlflow run`. + mlflow_get_run(client = client, run_id = existing_run_id) + } else { + experiment_id <- mlflow_infer_experiment_id(experiment_id) + client <- mlflow_client() + + args <- mlflow_get_run_context( + client, + experiment_id = experiment_id + ) + do.call(mlflow_create_run, args) + } + mlflow_set_active_run_id(mlflow_id(run)) + run +} + +mlflow_get_run_context <- function(client, ...) { + UseMethod("mlflow_get_run_context") +} + +mlflow_get_run_context.default <- function(client, experiment_id, ...) { + tags <- list() + tags[[MLFLOW_TAGS$MLFLOW_USER]] <- mlflow_user() + tags[[MLFLOW_TAGS$MLFLOW_SOURCE_NAME]] <- get_source_name() + tags[[MLFLOW_TAGS$MLFLOW_SOURCE_VERSION]] <- get_source_version() + tags[[MLFLOW_TAGS$MLFLOW_SOURCE_TYPE]] <- MLFLOW_SOURCE_TYPE$LOCAL + list( + client = client, + tags = tags, + experiment_id = experiment_id %||% 0, + ... + ) +} + +#' End a Run +#' +#' Terminates a run. Attempts to end the current active run if `run_id` is not specified. +#' +#' @param status Updated status of the run. Defaults to `FINISHED`. Can also be set to +#' "FAILED" or "KILLED". +#' @param end_time Unix timestamp of when the run ended in milliseconds. +#' @template roxlate-run-id +#' @template roxlate-client +#' +#' @export +mlflow_end_run <- function(status = c("FINISHED", "FAILED", "KILLED"), + end_time = NULL, run_id = NULL, client = NULL) { + + status <- match.arg(status) + end_time <- end_time %||% current_time() + + active_run_id <- mlflow_get_active_run_id() + + if (!is.null(client) && is.null(run_id)) + stop("`run_id` must be specified when `client` is specified.", call. = FALSE) + + run <- if (!is.null(run_id)) { + client <- resolve_client(client) + mlflow_set_terminated(client = client, run_id = run_id, status = status, + end_time = end_time) + } else { + if (is.null(active_run_id)) stop("There is no active run to end.", call. = FALSE) + client <- mlflow_client() + run_id <- active_run_id + mlflow_set_terminated(client = client, run_id = active_run_id, status = status, + end_time = end_time) + } + + if (identical(run_id, active_run_id)) mlflow_set_active_run_id(NULL) + run +} + +MLFLOW_TAGS <- list( + MLFLOW_USER = "mlflow.user", + MLFLOW_SOURCE_NAME = "mlflow.source.name", + MLFLOW_SOURCE_VERSION = "mlflow.source.version", + MLFLOW_SOURCE_TYPE = "mlflow.source.type" +) diff --git a/mlflow/R/mlflow/R/tracking-server.R b/mlflow/R/mlflow/R/tracking-server.R index 3ae716f2f6391..58d145e375ea6 100644 --- a/mlflow/R/mlflow/R/tracking-server.R +++ b/mlflow/R/mlflow/R/tracking-server.R @@ -42,7 +42,7 @@ mlflow_cli_param <- function(args, param, value) { args } -#' Run the MLflow Tracking Server +#' Run MLflow Tracking Server #' #' Wrapper for `mlflow server`. #' @@ -58,7 +58,7 @@ mlflow_server <- function(file_store = "mlruns", default_artifact_root = NULL, file_store <- fs::path_abs(file_store) args <- mlflow_cli_param(list(), "--port", port) %>% - mlflow_cli_param("--file-store", file_store) %>% + mlflow_cli_param("--backend-store-uri", file_store) %>% mlflow_cli_param("--default-artifact-root", default_artifact_root) %>% mlflow_cli_param("--host", host) %>% mlflow_cli_param("--port", port) %>% @@ -73,7 +73,8 @@ mlflow_server <- function(file_store = "mlruns", default_artifact_root = NULL, "server", args, list( - background = getOption("mlflow.ui.background", TRUE) + background = getOption("mlflow.ui.background", TRUE), + client = NULL ) ) ) @@ -91,14 +92,12 @@ new_mlflow_server <- function(server_url, handle, ...) { ), class = "mlflow_server" ) - - mlflow_validate_server(ms) ms } -mlflow_validate_server <- function(ms) { +mlflow_validate_server <- function(client) { wait_for( - function() mlflow_rest(client = ms, "experiments", "list"), + function() mlflow_rest("experiments", "list", client = client), getOption("mlflow.connect.wait", 10), getOption("mlflow.connect.sleep", 1) ) diff --git a/mlflow/R/mlflow/R/tracking-ui.R b/mlflow/R/mlflow/R/tracking-ui.R index b42843afbdc64..b9d9aaf4039e3 100644 --- a/mlflow/R/mlflow/R/tracking-ui.R +++ b/mlflow/R/mlflow/R/tracking-ui.R @@ -5,14 +5,14 @@ mlflow_view_url <- function(url) { invisible(url) } -#' MLflow User Interface +#' Run MLflow User Interface #' -#' Launches MLflow user interface. +#' Launches the MLflow user interface. #' #' @examples #' \dontrun{ #' library(mlflow) -#' mlflow_install() +#' install_mlflow() #' #' # launch mlflow ui locally #' mlflow_ui() @@ -22,20 +22,20 @@ mlflow_view_url <- function(url) { #' mlflow_ui() #' } #' -#' @param x An `mlflow_client` object. +#' @template roxlate-client #' @param ... Optional arguments passed to `mlflow_server()` when `x` is a path to a file store. #' @export -mlflow_ui <- function(x, ...) { +mlflow_ui <- function(client, ...) { UseMethod("mlflow_ui") } #' @export -mlflow_ui.mlflow_client <- function(x, ...) { - mlflow_view_url(x$server_url) +mlflow_ui.mlflow_client <- function(client, ...) { + mlflow_view_url(client$get_host_creds()$host) } #' @export -mlflow_ui.NULL <- function(x, ...) { +mlflow_ui.NULL <- function(client, ...) { client <- mlflow_client() mlflow_ui(client) } diff --git a/mlflow/R/mlflow/R/tracking-utils.R b/mlflow/R/mlflow/R/tracking-utils.R index 1153d4a034ed3..8ca9e5d045e2f 100644 --- a/mlflow/R/mlflow/R/tracking-utils.R +++ b/mlflow/R/mlflow/R/tracking-utils.R @@ -25,12 +25,31 @@ get_source_version <- function() { ) } -mlflow_get_or_start_run <- function() { - mlflow_active_run() %||% mlflow_start_run() +mlflow_get_active_run_id_or_start_run <- function() { + mlflow_get_active_run_id() %||% mlflow_id(mlflow_start_run()) +} + + +mlflow_get_experiment_id_from_env <- function(client = mlflow_client()) { + name <- Sys.getenv("MLFLOW_EXPERIMENT_NAME", unset = NA) + if (!is.na(name)) { + mlflow_get_experiment(client = client, name = name)$experiment_id + } else { + id <- Sys.getenv("MLFLOW_EXPERIMENT_ID", unset = NA) + if (is.na(id)) NULL else id + } +} + +mlflow_infer_experiment_id <- function(experiment_id = NULL) { + experiment_id %||% mlflow_get_active_experiment_id() %||% mlflow_get_experiment_id_from_env() } #' @export with.mlflow_run <- function(data, expr, ...) { + run_id <- mlflow_id(data) + if (!identical(run_id, mlflow_get_active_run_id())) { + stop("`with()` should only be used with `mlflow_start_run()`.", call. = FALSE) + } tryCatch( { @@ -47,11 +66,6 @@ with.mlflow_run <- function(data, expr, ...) { invisible(NULL) } - -run_id <- function(run) cast_nullable_string(run$info$run_uuid) - -active_run_id <- function() run_id(mlflow_active_run()) - current_time <- function() { round(as.numeric(Sys.time()) * 1000) } @@ -93,3 +107,114 @@ mlflow_user <- function() { else "unknown" } + +MLFLOW_SOURCE_TYPE <- list( + NOTEBOOK = "NOTEBOOK", + JOB = "JOB", + PROJECT = "PROJECT", + LOCAL = "LOCAL", + UNKNOWN = "UNKNOWN" +) + +resolve_client_and_run_id <- function(client, run_id) { + run_id <- cast_nullable_string(run_id) + if (is.null(client)) { + if (is.null(run_id)) { + run_id <- mlflow_get_active_run_id_or_start_run() + } + client <- mlflow_client() + } else { + client <- resolve_client(client) + if (is.null(run_id)) stop("`run_id` must be specified when `client` is specified.", call. = FALSE) + } + list(client = client, run_id = run_id) +} + +parse_run <- function(r) { + info <- parse_run_info(r$info) + + info$metrics <- parse_run_data(r$data$metrics) + info$params <- parse_run_data(r$data$params) + info$tags <- parse_run_data(r$data$tags) + + new_mlflow_run(info) +} + +fill_missing_run_cols <- function(r) { + # Ensure the current runs list has at least all the names in expected_list + expected_names <- c("run_uuid", "experiment_id", "user_id", "status", "start_time", + "artifact_uri", "lifecycle_stage", "run_id", "end_time") + r[setdiff(expected_names, names(r))] <- NA + r +} + +parse_run_info <- function(r) { + # TODO: Consider adding dplyr back after 1.0 along with a minimum rlang version to avoid + # dependency conflicts. The dplyr implementation is likely faster. + r %>% + purrr::map_at(c("start_time", "end_time"), milliseconds_to_date) %>% + fill_missing_run_cols %>% + tibble::as_tibble() +} + +parse_run_data <- function(d) { + if (is.null(d)) return(NA) + d %>% + purrr::transpose() %>% + purrr::map(unlist) %>% + purrr::map_at("timestamp", milliseconds_to_date) %>% + purrr::map_at("step", as.double) %>% + tibble::as_tibble() %>% + list() +} + +resolve_experiment_id <- function(experiment_id) { + mlflow_infer_experiment_id(experiment_id) %||% + stop("`experiment_id` must be specified when there is no active experiment.", call. = FALSE) +} + +resolve_run_id <- function(run_id) { + cast_nullable_string(run_id) %||% + mlflow_get_active_run_id() %||% + stop("`run_id` must be specified when there is no active run.", call. = FALSE) +} + +new_mlflow_experiment <- function(x) { + tibble::new_tibble(x, nrow = 1, class = "mlflow_experiment") +} + +new_mlflow_run <- function(x) { + tibble::new_tibble(x, nrow = 1, class = "mlflow_run") +} + + +#' Get Run or Experiment ID +#' +#' Extracts the ID of the run or experiment. +#' +#' @param object An `mlflow_run` or `mlflow_experiment` object. +#' @export +mlflow_id <- function(object) { + UseMethod("mlflow_id") +} + +#' @rdname mlflow_id +#' @export +mlflow_id.mlflow_run <- function(object) { + object$run_uuid %||% stop("Cannot extract Run ID.", call. = FALSE) +} + +#' @rdname mlflow_id +#' @export +mlflow_id.mlflow_experiment <- function(object) { + object$experiment_id %||% stop("Cannot extract Experiment ID.", call. = FALSE) +} + +resolve_client <- function(client) { + if (is.null(client)) { + mlflow_client() + } else { + if (!inherits(client, "mlflow_client")) stop("`client` must be an `mlflow_client` object.", call. = FALSE) + client + } +} diff --git a/mlflow/R/mlflow/README.Rmd b/mlflow/R/mlflow/README.Rmd index 39170111cc5f6..61bdc0ce2ff54 100644 --- a/mlflow/R/mlflow/README.Rmd +++ b/mlflow/R/mlflow/README.Rmd @@ -26,7 +26,7 @@ Install `mlflow` followed by installing the `mlflow` runtime as follows: ```{r eval=FALSE} devtools::install_github("mlflow/mlflow", subdir = "mlflow/R/mlflow") -mlflow::mlflow_install() +mlflow::install_mlflow() ``` Notice also that [Anaconda](https://www.anaconda.com/download/) or [Miniconda](https://conda.io/miniconda.html) need to be manually installed. @@ -43,7 +43,7 @@ Then install the latest released `mlflow` runtime. ```{r, eval=FALSE} # Install latest released version -mlflow::mlflow_install() +mlflow::install_mlflow() ``` However, currently, the development runtime of `mlflow` is also required; which means you also need to download or clone the `mlflow` GitHub repo: diff --git a/mlflow/R/mlflow/README.md b/mlflow/R/mlflow/README.md index c94d098d737eb..44df874282e81 100644 --- a/mlflow/R/mlflow/README.md +++ b/mlflow/R/mlflow/README.md @@ -17,7 +17,7 @@ Install `mlflow` followed by installing the `mlflow` runtime as follows: ``` r devtools::install_github("mlflow/mlflow", subdir = "mlflow/R/mlflow") -mlflow::mlflow_install() +mlflow::install_mlflow() ``` Notice also that [Anaconda](https://www.anaconda.com/download/) or @@ -36,7 +36,7 @@ Then install the latest released `mlflow` runtime. ``` r # Install latest released version -mlflow::mlflow_install() +mlflow::install_mlflow() ``` However, currently, the development runtime of `mlflow` is also @@ -292,4 +292,4 @@ follows: ## Contributing -See the [MLflow contribution guidelines](../../../CONTRIBUTING.rst). +See the [MLflow contribution guidelines](https://github.com/mlflow/mlflow/blob/master/CONTRIBUTING.rst). diff --git a/mlflow/R/mlflow/document.R b/mlflow/R/mlflow/document.R index 84f9f44f58900..e6905291d1483 100644 --- a/mlflow/R/mlflow/document.R +++ b/mlflow/R/mlflow/document.R @@ -1,9 +1,13 @@ -# Generate docs as markdown -Rd2md::ReferenceManual() +# Generate docs as markdown into a per-session tempdir that's automatically cleaned up when +# the R session terminates. +Rd2md::ReferenceManual(outdir = tempdir()) # Remove markdown package description -markdown_doc <- readLines("Reference_Manual_mlflow.md") -first_function <- which(grepl("crate", markdown_doc))[[1]] +markdown_doc <- readLines(file.path(tempdir(), "Reference_Manual_mlflow.md")) +# Somewhat of a hack: find the second occurrence of "```", which delimits the TOC generated by +# Rd2md, and remove all preceding lines to delete the TOC. +toc_delimiter = "```" +first_function <- which(grepl(toc_delimiter, markdown_doc))[[2]] + 1 markdown_fixed <- markdown_doc[first_function:length(markdown_doc)] # Remove function name from section @@ -14,7 +18,7 @@ markdown_fixed <- gsub("## Description", "", markdown_fixed) markdown_fixed <- gsub("## Usage", "", markdown_fixed) # Remove objects exported from other packages section -last_section <- which(grepl("Objects exported from other packages", markdown_fixed))[[1]] +last_section <- which(grepl("reexports", markdown_fixed))[[1]] markdown_fixed <- markdown_fixed[1:last_section - 1] # Write fixed markdown file @@ -35,9 +39,9 @@ rst_header <- ".. _R-api: R API ======== -The MLflow R API allows you to use MLflow :doc:`Tracking `, :doc:`Projects ` and :doc:`Models `. +The MLflow `R `_ API allows you to use MLflow :doc:`Tracking `, :doc:`Projects ` and :doc:`Models `. -For instance, you can use the R API to `install MLflow`_, start the `user interface `_, `create `_ and `list experiments`_, `save models `_, `run projects `_ and `serve models `_ among many other functions available in the R API. +You can use the R API to `install MLflow `_, start the `user interface `_, `create `_ and `list experiments `_, `save models `_, `run projects `_ and `serve models `_ among many other functions available in the R API. .. contents:: Table of Contents :local: @@ -48,5 +52,3 @@ rst_doc <- c(rst_header, rst_doc) writeLines(rst_doc, "../../../docs/source/R-api.rst") # Generate docs by using an mlflow virtualenv and running `make` from `mlflow/docs` - - diff --git a/mlflow/R/mlflow/man-roxygen/roxlate-client.R b/mlflow/R/mlflow/man-roxygen/roxlate-client.R index b4ef7db41b0de..b88b2b5e06e57 100644 --- a/mlflow/R/mlflow/man-roxygen/roxlate-client.R +++ b/mlflow/R/mlflow/man-roxygen/roxlate-client.R @@ -1,6 +1,4 @@ -#' @param client An `mlflow_client` object. -#' @keywords internal -#' @details The Tracking Client family of functions require an MLflow client to be -#' specified explicitly. These functions allow for greater control of where the -#' operations take place in terms of services and runs, but are more verbose -#' compared to the Fluent API. +#' @param client (Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +#' If specified, MLflow will use the tracking server associated with the passed-in client. If +#' unspecified (the common case), +#' MLflow will use the tracking server associated with the current tracking URI. diff --git a/mlflow/R/mlflow/man-roxygen/roxlate-fluent.R b/mlflow/R/mlflow/man-roxygen/roxlate-fluent.R deleted file mode 100644 index 00dfda2d85bad..0000000000000 --- a/mlflow/R/mlflow/man-roxygen/roxlate-fluent.R +++ /dev/null @@ -1,4 +0,0 @@ -#' @details The fluent API family of functions operate with an implied MLflow client -#' determined by the service set by `mlflow_set_tracking_uri()`. For operations -#' involving a run it adopts the current active run, or, if one does not exist, -#' starts one through the implied service. diff --git a/mlflow/R/mlflow/man-roxygen/roxlate-model-uri.R b/mlflow/R/mlflow/man-roxygen/roxlate-model-uri.R new file mode 100644 index 0000000000000..3dab4012bdc68 --- /dev/null +++ b/mlflow/R/mlflow/man-roxygen/roxlate-model-uri.R @@ -0,0 +1,12 @@ +#' @param model_uri The location, in URI format, of the MLflow model. +#' @details The URI scheme must be supported by MLflow - i.e. there has to be an MLflow artifact +#' repository corresponding to the scheme of the URI. The content is expected to point to a +#' directory containing MLmodel. The following are examples of valid model uris: +#' +#' - ``file:///absolute/path/to/local/model`` +#' - ``file:relative/path/to/local/model`` +#' - ``s3://my_bucket/path/to/model`` +#' - ``runs://run-relative/path/to/model`` +#' +#' For more information about supported URI schemes, see the Artifacts Documentation at +#' https://www.mlflow.org/docs/latest/tracking.html#supported-artifact-stores. diff --git a/mlflow/R/mlflow/man/crate.Rd b/mlflow/R/mlflow/man/crate.Rd deleted file mode 100644 index d246205a47870..0000000000000 --- a/mlflow/R/mlflow/man/crate.Rd +++ /dev/null @@ -1,89 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/crate.R -\name{crate} -\alias{crate} -\title{Crate a function to share with another process} -\usage{ -crate(.fn, ...) -} -\arguments{ -\item{.fn}{A fresh formula or function. "Fresh" here means that -they should be declared in the call to `crate()`. See examples if -you need to crate a function that is already defined. Formulas -are converted to purrr-like lambda functions using -[rlang::as_function()].} - -\item{...}{Arguments to declare in the environment of `.fn`. If a -name is supplied, the object is assigned to that name. Otherwise -the argument is automatically named after itself.} -} -\description{ -`crate()` creates functions in a self-contained environment -(technically, a child of the base environment). This has two -advantages: - -* They can easily be executed in another process. - -* Their effects are reproducible. You can run them locally with the - same results as on a different process. - -Creating self-contained functions requires some care, see section -below. -} -\section{Creating self-contained functions}{ - - -* They should call package functions with an explicit `::` - namespace. This includes packages in the default search path with - the exception of the base package. For instance `var()` from the - stats package must be called with its namespace prefix: - `stats::var(x)`. - -* They should declare any data they depend on. You can declare data - by supplying additional arguments or by unquoting objects with `!!`. -} - -\examples{ -# You can create functions using the ordinary notation: -crate(function(x) stats::var(x)) - -# Or the formula notation: -crate(~stats::var(.x)) - -# Declare data by supplying named arguments. You can test you have -# declared all necessary data by calling your crated function: -na_rm <- TRUE -fn <- crate(~stats::var(.x, na.rm = na_rm)) -try(fn(1:10)) - -# Arguments are automatically named after themselves so that the -# following are equivalent: -crate(~stats::var(.x, na.rm = na_rm), na_rm = na_rm) -crate(~stats::var(.x, na.rm = na_rm), na_rm) - -# However if you supply a complex expression, do supply a name! -crate(~stats::var(.x, na.rm = na_rm), !na_rm) -crate(~stats::var(.x, na.rm = na_rm), na_rm = na_rm) - -# For small data it is handy to unquote instead. Unquoting inlines -# objects inside the function. This is less verbose if your -# function depends on many small objects: -fn <- crate(~stats::var(.x, na.rm = !!na_rm)) -fn(1:10) - -# One downside is that the individual sizes of unquoted objects -# won't be shown in the crate printout: -fn - - -# The function or formula you pass to crate() should defined inside -# the crate() call, i.e. you can't pass an already defined -# function: -fn <- function(x) toupper(x) -try(crate(fn)) - -# If you really need to crate an existing function, you can -# explicitly set its environment to the crate environment with the -# set_env() function from rlang: -crate(rlang::set_env(fn)) -} diff --git a/mlflow/R/mlflow/man/install_mlflow.Rd b/mlflow/R/mlflow/man/install_mlflow.Rd new file mode 100644 index 0000000000000..f71c160ce9ee2 --- /dev/null +++ b/mlflow/R/mlflow/man/install_mlflow.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/install.R +\name{install_mlflow} +\alias{install_mlflow} +\title{Install MLflow} +\usage{ +install_mlflow() +} +\description{ +Installs auxiliary dependencies of MLflow (e.g. the MLflow CLI). As a one-time setup step, you +must run install_mlflow() to install these dependencies before calling other MLflow APIs. +} +\details{ +install_mlflow() requires Python and Conda to be installed. +See \url{https://www.python.org/getit/} and \url{https://docs.conda.io/projects/conda/en/latest/user-guide/install/}. +} +\examples{ +\dontrun{ +library(mlflow) +install_mlflow() +} + +} diff --git a/mlflow/R/mlflow/man/is_crate.Rd b/mlflow/R/mlflow/man/is_crate.Rd deleted file mode 100644 index 817cb816402df..0000000000000 --- a/mlflow/R/mlflow/man/is_crate.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/crate.R -\name{is_crate} -\alias{is_crate} -\title{Is an object a crate?} -\usage{ -is_crate(x) -} -\arguments{ -\item{x}{An object to test.} -} -\description{ -Is an object a crate? -} diff --git a/mlflow/R/mlflow/man/mlflow_active_run.Rd b/mlflow/R/mlflow/man/mlflow_active_run.Rd deleted file mode 100644 index eb4d79bce1a90..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_active_run.Rd +++ /dev/null @@ -1,11 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-globals.R -\name{mlflow_active_run} -\alias{mlflow_active_run} -\title{Active Run} -\usage{ -mlflow_active_run() -} -\description{ -Retrieves the active run. -} diff --git a/mlflow/R/mlflow/man/mlflow_cli.Rd b/mlflow/R/mlflow/man/mlflow_cli.Rd deleted file mode 100644 index 01a2900c2bae1..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_cli.Rd +++ /dev/null @@ -1,35 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cli.R -\name{mlflow_cli} -\alias{mlflow_cli} -\title{MLflow Command} -\usage{ -mlflow_cli(..., background = FALSE, echo = TRUE, - stderr_callback = NULL) -} -\arguments{ -\item{...}{The parameters to pass to the command line.} - -\item{background}{Should this command be triggered as a background task? -Defaults to \code{FALSE}.} - -\item{echo}{Print the standard output and error to the screen? Defaults to -\code{TRUE}, does not apply to background tasks.} - -\item{stderr_callback}{NULL, or a function to call for every chunk of the standard error.} -} -\value{ -A \code{processx} task. -} -\description{ -Executes a generic MLflow command through the commmand line interface. -} -\examples{ -\dontrun{ -library(mlflow) -mlflow_install() - -mlflow_cli("server", "--help") -} - -} diff --git a/mlflow/R/mlflow/man/mlflow_client.Rd b/mlflow/R/mlflow/man/mlflow_client.Rd index 2b6cc0b096571..e0d69491e16c1 100644 --- a/mlflow/R/mlflow/man/mlflow_client.Rd +++ b/mlflow/R/mlflow/man/mlflow_client.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/tracking-client.R \name{mlflow_client} \alias{mlflow_client} -\title{Initialize an MLflow client} +\title{Initialize an MLflow Client} \usage{ mlflow_client(tracking_uri = NULL) } @@ -11,6 +11,6 @@ mlflow_client(tracking_uri = NULL) set by `mlflow_set_tracking_uri()`.} } \description{ -Initialize an MLflow client +Initializes and returns an MLflow client that communicates with the tracking server or store +at the specified URI. } -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_create_experiment.Rd b/mlflow/R/mlflow/man/mlflow_client_create_experiment.Rd deleted file mode 100644 index f106e38ffe887..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_create_experiment.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_create_experiment} -\alias{mlflow_client_create_experiment} -\title{Create Experiment - Tracking Client} -\usage{ -mlflow_client_create_experiment(client, name, artifact_location = NULL) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{name}{The name of the experiment to create.} - -\item{artifact_location}{Location where all artifacts for this experiment are stored. If -not provided, the remote server will select an appropriate default.} -} -\description{ -Creates an MLflow experiment. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_create_run.Rd b/mlflow/R/mlflow/man/mlflow_client_create_run.Rd deleted file mode 100644 index 68fe8633d052e..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_create_run.Rd +++ /dev/null @@ -1,44 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_create_run} -\alias{mlflow_client_create_run} -\title{Create Run} -\usage{ -mlflow_client_create_run(client, experiment_id, user_id = NULL, - run_name = NULL, source_type = NULL, source_name = NULL, - entry_point_name = NULL, start_time = NULL, source_version = NULL, - tags = NULL) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{experiment_id}{Unique identifier for the associated experiment.} - -\item{user_id}{User ID or LDAP for the user executing the run.} - -\item{run_name}{Human readable name for run.} - -\item{source_type}{Originating source for this run. One of Notebook, Job, Project, Local or Unknown.} - -\item{source_name}{String descriptor for source. For example, name or description of the notebook, or job name.} - -\item{entry_point_name}{Name of the entry point for the run.} - -\item{start_time}{Unix timestamp of when the run started in milliseconds.} - -\item{source_version}{Git version of the source code used to create run.} - -\item{tags}{Additional metadata for run in key-value pairs.} -} -\description{ -reate a new run within an experiment. A run is usually a single execution of a machine learning or data ETL pipeline. -} -\details{ -MLflow uses runs to track Param, Metric, and RunTag, associated with a single execution. - -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_delete_experiment.Rd b/mlflow/R/mlflow/man/mlflow_client_delete_experiment.Rd deleted file mode 100644 index 3dcd1f03c29c9..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_delete_experiment.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_delete_experiment} -\alias{mlflow_client_delete_experiment} -\title{Delete Experiment} -\usage{ -mlflow_client_delete_experiment(client, experiment_id) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{experiment_id}{ID of the associated experiment. This field is required.} -} -\description{ -Mark an experiment and associated runs, params, metrics, … etc for deletion. If the - experiment uses FileStore, artifacts associated with experiment are also deleted. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_delete_run.Rd b/mlflow/R/mlflow/man/mlflow_client_delete_run.Rd deleted file mode 100644 index 9a2b95a58c66f..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_delete_run.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_delete_run} -\alias{mlflow_client_delete_run} -\title{Delete a Run} -\usage{ -mlflow_client_delete_run(client, run_id) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Run ID.} -} -\description{ -Delete a Run -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_download_artifacts.Rd b/mlflow/R/mlflow/man/mlflow_client_download_artifacts.Rd deleted file mode 100644 index 301f438ddf73e..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_download_artifacts.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_download_artifacts} -\alias{mlflow_client_download_artifacts} -\title{Download Artifacts} -\usage{ -mlflow_client_download_artifacts(client, run_id, path) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Run ID.} - -\item{path}{Relative source path to the desired artifact.} -} -\description{ -Download an artifact file or directory from a run to a local directory if applicable, - and return a local path for it. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_get_experiment.Rd b/mlflow/R/mlflow/man/mlflow_client_get_experiment.Rd deleted file mode 100644 index 723639187fd0c..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_get_experiment.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_get_experiment} -\alias{mlflow_client_get_experiment} -\title{Get Experiment} -\usage{ -mlflow_client_get_experiment(client, experiment_id) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{experiment_id}{Identifer to get an experiment.} -} -\description{ -Get meta data for experiment and a list of runs for this experiment. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_get_experiment_by_name.Rd b/mlflow/R/mlflow/man/mlflow_client_get_experiment_by_name.Rd deleted file mode 100644 index 35f55f9c0e4f2..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_get_experiment_by_name.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_get_experiment_by_name} -\alias{mlflow_client_get_experiment_by_name} -\title{Get Experiment by Name} -\usage{ -mlflow_client_get_experiment_by_name(client, name) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{name}{The experiment name.} -} -\description{ -Get meta data for experiment by name. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_get_run.Rd b/mlflow/R/mlflow/man/mlflow_client_get_run.Rd deleted file mode 100644 index 190eef30dc5dc..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_get_run.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_get_run} -\alias{mlflow_client_get_run} -\title{Get Run} -\usage{ -mlflow_client_get_run(client, run_id) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Run ID.} -} -\description{ -Get meta data, params, tags, and metrics for run. Only last logged value for each metric is returned. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_list_artifacts.Rd b/mlflow/R/mlflow/man/mlflow_client_list_artifacts.Rd deleted file mode 100644 index dfe37ee86723c..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_list_artifacts.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_list_artifacts} -\alias{mlflow_client_list_artifacts} -\title{List artifacts} -\usage{ -mlflow_client_list_artifacts(client, run_id, path = NULL) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Run ID.} - -\item{path}{The run's relative artifact path to list from. If not specified, it is -set to the root artifact path} -} -\description{ -List artifacts -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_list_experiments.Rd b/mlflow/R/mlflow/man/mlflow_client_list_experiments.Rd deleted file mode 100644 index 83d11d488cd5b..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_list_experiments.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_list_experiments} -\alias{mlflow_client_list_experiments} -\title{List Experiments} -\usage{ -mlflow_client_list_experiments(client, view_type = c("ACTIVE_ONLY", - "DELETED_ONLY", "ALL")) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{view_type}{Qualifier for type of experiments to be returned. Defaults to `ACTIVE_ONLY`.} -} -\description{ -Get a list of all experiments. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_log_artifact.Rd b/mlflow/R/mlflow/man/mlflow_client_log_artifact.Rd deleted file mode 100644 index 4ef5ee6b2ad95..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_log_artifact.Rd +++ /dev/null @@ -1,56 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_log_artifact} -\alias{mlflow_client_log_artifact} -\title{Log Artifact} -\usage{ -mlflow_client_log_artifact(client, run_id, path, artifact_path = NULL) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Run ID.} - -\item{path}{The file or directory to log as an artifact.} - -\item{artifact_path}{Destination path within the run’s artifact URI.} -} -\description{ -Logs an specific file or directory as an artifact. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. - -When logging to Amazon S3, ensure that the user has a proper policy -attach to it, for instance: - -\code{ -{ -"Version": "2012-10-17", -"Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket", - "s3:GetBucketLocation" - ], - "Resource": [ - "arn:aws:s3:::mlflow-test/*", - "arn:aws:s3:::mlflow-test" - ] - } - ] -} -} - -Additionally, at least the \code{AWS_ACCESS_KEY_ID} and \code{AWS_SECRET_ACCESS_KEY} -environment variables must be set to the corresponding key and secrets provided -by Amazon IAM. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_log_metric.Rd b/mlflow/R/mlflow/man/mlflow_client_log_metric.Rd deleted file mode 100644 index 9ac9e98379f43..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_log_metric.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_log_metric} -\alias{mlflow_client_log_metric} -\title{Log Metric} -\usage{ -mlflow_client_log_metric(client, run_id, key, value, timestamp = NULL) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Run ID.} - -\item{key}{Name of the metric.} - -\item{value}{Float value for the metric being logged.} - -\item{timestamp}{Unix timestamp in milliseconds at the time metric was logged.} -} -\description{ -API to log a metric for a run. Metrics key-value pair that record a single float measure. - During a single execution of a run, a particular metric can be logged several times. - Backend will keep track of historical values along with timestamps. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_log_param.Rd b/mlflow/R/mlflow/man/mlflow_client_log_param.Rd deleted file mode 100644 index 222a8bfcd2029..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_log_param.Rd +++ /dev/null @@ -1,30 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_log_param} -\alias{mlflow_client_log_param} -\title{Log Parameter} -\usage{ -mlflow_client_log_param(client, run_id, key, value) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Run ID.} - -\item{key}{Name of the parameter.} - -\item{value}{String value of the parameter.} -} -\description{ -API to log a parameter used for this run. Examples are params and hyperparams - used for ML training, or constant dates and values used in an ETL pipeline. - A params is a STRING key-value pair. For a run, a single parameter is allowed - to be logged only once. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_restore_experiment.Rd b/mlflow/R/mlflow/man/mlflow_client_restore_experiment.Rd deleted file mode 100644 index b4983e905d5db..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_restore_experiment.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_restore_experiment} -\alias{mlflow_client_restore_experiment} -\title{Restore Experiment} -\usage{ -mlflow_client_restore_experiment(client, experiment_id) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{experiment_id}{ID of the associated experiment. This field is required.} -} -\description{ -Restore an experiment marked for deletion. This also restores associated metadata, - runs, metrics, and params. If experiment uses FileStore, underlying artifacts - associated with experiment are also restored. -} -\details{ -Throws RESOURCE_DOES_NOT_EXIST if experiment was never created or was permanently deleted. - -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_restore_run.Rd b/mlflow/R/mlflow/man/mlflow_client_restore_run.Rd deleted file mode 100644 index 0c1384164c941..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_restore_run.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_restore_run} -\alias{mlflow_client_restore_run} -\title{Restore a Run} -\usage{ -mlflow_client_restore_run(client, run_id) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Run ID.} -} -\description{ -Restore a Run -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_set_tag.Rd b/mlflow/R/mlflow/man/mlflow_client_set_tag.Rd deleted file mode 100644 index 4050befc001e2..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_set_tag.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_set_tag} -\alias{mlflow_client_set_tag} -\title{Set Tag} -\usage{ -mlflow_client_set_tag(client, run_id, key, value) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Run ID.} - -\item{key}{Name of the tag. Maximum size is 255 bytes. This field is required.} - -\item{value}{String value of the tag being logged. Maximum size is 500 bytes. This field is required.} -} -\description{ -Set a tag on a run. Tags are run metadata that can be updated during and - after a run completes. -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_client_set_terminated.Rd b/mlflow/R/mlflow/man/mlflow_client_set_terminated.Rd deleted file mode 100644 index e7116a6dfc9f5..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_client_set_terminated.Rd +++ /dev/null @@ -1,30 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-client.R -\name{mlflow_client_set_terminated} -\alias{mlflow_client_set_terminated} -\title{Terminate a Run} -\usage{ -mlflow_client_set_terminated(client, run_id, status = c("FINISHED", - "SCHEDULED", "FAILED", "KILLED"), end_time = NULL) -} -\arguments{ -\item{client}{An `mlflow_client` object.} - -\item{run_id}{Unique identifier for the run.} - -\item{status}{Updated status of the run. Defaults to `FINISHED`.} - -\item{end_time}{Unix timestamp of when the run ended in milliseconds.} - -\item{run_id}{Run ID.} -} -\description{ -Terminate a Run -} -\details{ -The Tracking Client family of functions require an MLflow client to be - specified explicitly. These functions allow for greater control of where the - operations take place in terms of services and runs, but are more verbose - compared to the Fluent API. -} -\keyword{internal} diff --git a/mlflow/R/mlflow/man/mlflow_create_experiment.Rd b/mlflow/R/mlflow/man/mlflow_create_experiment.Rd index 89372e58aacd8..aa6baa99e9f04 100644 --- a/mlflow/R/mlflow/man/mlflow_create_experiment.Rd +++ b/mlflow/R/mlflow/man/mlflow_create_experiment.Rd @@ -1,23 +1,22 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-fluent.R +% Please edit documentation in R/tracking-experiments.R \name{mlflow_create_experiment} \alias{mlflow_create_experiment} \title{Create Experiment} \usage{ -mlflow_create_experiment(name, artifact_location = NULL) +mlflow_create_experiment(name, artifact_location = NULL, client = NULL) } \arguments{ \item{name}{The name of the experiment to create.} \item{artifact_location}{Location where all artifacts for this experiment are stored. If not provided, the remote server will select an appropriate default.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} } \description{ -Creates an MLflow experiment. -} -\details{ -The fluent API family of functions operate with an implied MLflow client - determined by the service set by `mlflow_set_tracking_uri()`. For operations - involving a run it adopts the current active run, or, if one does not exist, - starts one through the implied service. +Creates an MLflow experiment and returns its id. } diff --git a/mlflow/R/mlflow/man/mlflow_delete_experiment.Rd b/mlflow/R/mlflow/man/mlflow_delete_experiment.Rd new file mode 100644 index 0000000000000..e93529623999a --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_delete_experiment.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-experiments.R +\name{mlflow_delete_experiment} +\alias{mlflow_delete_experiment} +\title{Delete Experiment} +\usage{ +mlflow_delete_experiment(experiment_id, client = NULL) +} +\arguments{ +\item{experiment_id}{ID of the associated experiment. This field is required.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Marks an experiment and associated runs, params, metrics, etc. for deletion. If the + experiment uses FileStore, artifacts associated with experiment are also deleted. +} diff --git a/mlflow/R/mlflow/man/mlflow_delete_run.Rd b/mlflow/R/mlflow/man/mlflow_delete_run.Rd new file mode 100644 index 0000000000000..6ba2f03109e56 --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_delete_run.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_delete_run} +\alias{mlflow_delete_run} +\title{Delete a Run} +\usage{ +mlflow_delete_run(run_id, client = NULL) +} +\arguments{ +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Deletes the run with the specified ID. +} diff --git a/mlflow/R/mlflow/man/mlflow_delete_tag.Rd b/mlflow/R/mlflow/man/mlflow_delete_tag.Rd new file mode 100644 index 0000000000000..cb3f761dcd4c0 --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_delete_tag.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_delete_tag} +\alias{mlflow_delete_tag} +\title{Delete Tag} +\usage{ +mlflow_delete_tag(key, run_id = NULL, client = NULL) +} +\arguments{ +\item{key}{Name of the tag. Maximum size is 255 bytes. This field is required.} + +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Deletes a tag on a run. This is irreversible. Tags are run metadata that can be updated during a run and + after a run completes. +} diff --git a/mlflow/R/mlflow/man/mlflow_download_artifacts.Rd b/mlflow/R/mlflow/man/mlflow_download_artifacts.Rd new file mode 100644 index 0000000000000..39bac8d941eed --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_download_artifacts.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_download_artifacts} +\alias{mlflow_download_artifacts} +\title{Download Artifacts} +\usage{ +mlflow_download_artifacts(path, run_id = NULL, client = NULL) +} +\arguments{ +\item{path}{Relative source path to the desired artifact.} + +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Download an artifact file or directory from a run to a local directory if applicable, + and return a local path for it. +} diff --git a/mlflow/R/mlflow/man/mlflow_end_run.Rd b/mlflow/R/mlflow/man/mlflow_end_run.Rd index be4b088790764..b39dd32fc2caf 100644 --- a/mlflow/R/mlflow/man/mlflow_end_run.Rd +++ b/mlflow/R/mlflow/man/mlflow_end_run.Rd @@ -1,20 +1,25 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-fluent.R +% Please edit documentation in R/tracking-runs.R \name{mlflow_end_run} \alias{mlflow_end_run} \title{End a Run} \usage{ -mlflow_end_run(status = c("FINISHED", "SCHEDULED", "FAILED", "KILLED")) +mlflow_end_run(status = c("FINISHED", "FAILED", "KILLED"), + end_time = NULL, run_id = NULL, client = NULL) } \arguments{ -\item{status}{Updated status of the run. Defaults to `FINISHED`.} +\item{status}{Updated status of the run. Defaults to `FINISHED`. Can also be set to +"FAILED" or "KILLED".} + +\item{end_time}{Unix timestamp of when the run ended in milliseconds.} + +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} } \description{ -End an active MLflow run (if there is one). -} -\details{ -The fluent API family of functions operate with an implied MLflow client - determined by the service set by `mlflow_set_tracking_uri()`. For operations - involving a run it adopts the current active run, or, if one does not exist, - starts one through the implied service. +Terminates a run. Attempts to end the current active run if `run_id` is not specified. } diff --git a/mlflow/R/mlflow/man/mlflow_get_experiment.Rd b/mlflow/R/mlflow/man/mlflow_get_experiment.Rd new file mode 100644 index 0000000000000..0a9b51f24715c --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_get_experiment.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-experiments.R +\name{mlflow_get_experiment} +\alias{mlflow_get_experiment} +\title{Get Experiment} +\usage{ +mlflow_get_experiment(experiment_id = NULL, name = NULL, + client = NULL) +} +\arguments{ +\item{experiment_id}{Identifer to get an experiment.} + +\item{name}{The experiment name. Only one of `name` or `experiment_id` should be specified.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Gets metadata for an experiment and a list of runs for the experiment. Attempts to obtain the +active experiment if both `experiment_id` and `name` are unspecified. +} diff --git a/mlflow/R/mlflow/man/mlflow_get_metric_history.Rd b/mlflow/R/mlflow/man/mlflow_get_metric_history.Rd new file mode 100644 index 0000000000000..f8d4d0493d3b9 --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_get_metric_history.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_get_metric_history} +\alias{mlflow_get_metric_history} +\title{Get Metric History} +\usage{ +mlflow_get_metric_history(metric_key, run_id = NULL, client = NULL) +} +\arguments{ +\item{metric_key}{Name of the metric.} + +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Get a list of all values for the specified metric for a given run. +} diff --git a/mlflow/R/mlflow/man/mlflow_get_run.Rd b/mlflow/R/mlflow/man/mlflow_get_run.Rd new file mode 100644 index 0000000000000..186dac4a96739 --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_get_run.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_get_run} +\alias{mlflow_get_run} +\title{Get Run} +\usage{ +mlflow_get_run(run_id = NULL, client = NULL) +} +\arguments{ +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Gets metadata, params, tags, and metrics for a run. Returns a single value for each metric +key: the most recently logged metric value at the largest step. +} diff --git a/mlflow/R/mlflow/man/mlflow_get_tracking_uri.Rd b/mlflow/R/mlflow/man/mlflow_get_tracking_uri.Rd index d74dca0b7f5b6..0737483de6894 100644 --- a/mlflow/R/mlflow/man/mlflow_get_tracking_uri.Rd +++ b/mlflow/R/mlflow/man/mlflow_get_tracking_uri.Rd @@ -7,5 +7,5 @@ mlflow_get_tracking_uri() } \description{ -Get Remote Tracking URI +Gets the remote tracking URI. } diff --git a/mlflow/R/mlflow/man/mlflow_id.Rd b/mlflow/R/mlflow/man/mlflow_id.Rd new file mode 100644 index 0000000000000..0043c5dc78897 --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_id.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-utils.R +\name{mlflow_id} +\alias{mlflow_id} +\alias{mlflow_id.mlflow_run} +\alias{mlflow_id.mlflow_experiment} +\title{Get Run or Experiment ID} +\usage{ +mlflow_id(object) + +\method{mlflow_id}{mlflow_run}(object) + +\method{mlflow_id}{mlflow_experiment}(object) +} +\arguments{ +\item{object}{An `mlflow_run` or `mlflow_experiment` object.} +} +\description{ +Extracts the ID of the run or experiment. +} diff --git a/mlflow/R/mlflow/man/mlflow_install.Rd b/mlflow/R/mlflow/man/mlflow_install.Rd deleted file mode 100644 index 0e189eee14c3a..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_install.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/install.R -\name{mlflow_install} -\alias{mlflow_install} -\title{Install MLflow} -\usage{ -mlflow_install() -} -\description{ -Installs MLflow for individual use. -} -\details{ -Notice that MLflow requires Python and Conda to be installed, -see \url{https://www.python.org/getit/} and \url{https://conda.io/docs/installation.html}. -} -\examples{ -\dontrun{ -library(mlflow) -mlflow_install() -} - -} diff --git a/mlflow/R/mlflow/man/mlflow_list_artifacts.Rd b/mlflow/R/mlflow/man/mlflow_list_artifacts.Rd new file mode 100644 index 0000000000000..0007b7e0e4371 --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_list_artifacts.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_list_artifacts} +\alias{mlflow_list_artifacts} +\title{List Artifacts} +\usage{ +mlflow_list_artifacts(path = NULL, run_id = NULL, client = NULL) +} +\arguments{ +\item{path}{The run's relative artifact path to list from. If not specified, it is +set to the root artifact path} + +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Gets a list of artifacts. +} diff --git a/mlflow/R/mlflow/man/mlflow_list_experiments.Rd b/mlflow/R/mlflow/man/mlflow_list_experiments.Rd new file mode 100644 index 0000000000000..6f6fb834c4dc3 --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_list_experiments.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-experiments.R +\name{mlflow_list_experiments} +\alias{mlflow_list_experiments} +\title{List Experiments} +\usage{ +mlflow_list_experiments(view_type = c("ACTIVE_ONLY", "DELETED_ONLY", + "ALL"), client = NULL) +} +\arguments{ +\item{view_type}{Qualifier for type of experiments to be returned. Defaults to `ACTIVE_ONLY`.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Gets a list of all experiments. +} diff --git a/mlflow/R/mlflow/man/mlflow_list_run_infos.Rd b/mlflow/R/mlflow/man/mlflow_list_run_infos.Rd new file mode 100644 index 0000000000000..571bca15dc3fb --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_list_run_infos.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_list_run_infos} +\alias{mlflow_list_run_infos} +\title{List Run Infos} +\usage{ +mlflow_list_run_infos(run_view_type = c("ACTIVE_ONLY", "DELETED_ONLY", + "ALL"), experiment_id = NULL, client = NULL) +} +\arguments{ +\item{run_view_type}{Run view type.} + +\item{experiment_id}{Experiment ID. Attempts to use the active experiment if not specified.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Returns a tibble whose columns contain run metadata (run ID, etc) for all runs under the +specified experiment. +} diff --git a/mlflow/R/mlflow/man/mlflow_load_flavor.Rd b/mlflow/R/mlflow/man/mlflow_load_flavor.Rd index 74d0d4619ac0e..e45d3f75a49fa 100644 --- a/mlflow/R/mlflow/man/mlflow_load_flavor.Rd +++ b/mlflow/R/mlflow/man/mlflow_load_flavor.Rd @@ -1,16 +1,21 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model-flavor.R +% Please edit documentation in R/model.R \name{mlflow_load_flavor} \alias{mlflow_load_flavor} \title{Load MLflow Model Flavor} \usage{ -mlflow_load_flavor(model_path) +mlflow_load_flavor(flavor, model_path) } \arguments{ +\item{flavor}{An MLflow flavor object loaded by \link[mlflow]{mlflow_load_model}, with class +loaded from the flavor field in an MLmodel file.} + \item{model_path}{The path to the MLflow model wrapped in the correct class.} } \description{ -Loads an MLflow model flavor, to be used by package authors -to extend the supported MLflow models. +Loads an MLflow model using a specific flavor. This method is called internally by +\link[mlflow]{mlflow_load_model}, but is exposed for package authors to extend the supported +MLflow models. See https://mlflow.org/docs/latest/models.html#storage-format for more +info on MLflow model flavors. } diff --git a/mlflow/R/mlflow/man/mlflow_load_model.Rd b/mlflow/R/mlflow/man/mlflow_load_model.Rd index 39b95e5ee1f0a..30e5d1bac322a 100644 --- a/mlflow/R/mlflow/man/mlflow_load_model.Rd +++ b/mlflow/R/mlflow/man/mlflow_load_model.Rd @@ -2,21 +2,35 @@ % Please edit documentation in R/model.R \name{mlflow_load_model} \alias{mlflow_load_model} -\title{Load MLflow Model.} +\title{Load MLflow Model} \usage{ -mlflow_load_model(model_path, flavor = NULL, run_id = NULL) +mlflow_load_model(model_uri, flavor = NULL, client = mlflow_client()) } \arguments{ -\item{model_path}{"Path to the MLflow model. The path is relative to the run with the given -run-id or local filesystem path without run-id.} +\item{model_uri}{The location, in URI format, of the MLflow model.} -\item{flavor}{Optional flavor specification. Can be used to load a particular flavor in case -there are multiple flavors available.} +\item{flavor}{Optional flavor specification (string). Can be used to load a particular flavor in +case there are multiple flavors available.} -\item{run_id}{Optional MLflow run-id. If supplied model will be fetched from MLflow tracking -server.} +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} } \description{ -MLflow models can have multiple model flavors. Not all flavors / models can be loaded in R. This -method will by default search for a flavor supported by R/mlflow. +Loads an MLflow model. MLflow models can have multiple model flavors. Not all flavors / models +can be loaded in R. This method by default searches for a flavor supported by R/MLflow. +} +\details{ +The URI scheme must be supported by MLflow - i.e. there has to be an MLflow artifact + repository corresponding to the scheme of the URI. The content is expected to point to a + directory containing MLmodel. The following are examples of valid model uris: + + - ``file:///absolute/path/to/local/model`` + - ``file:relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see the Artifacts Documentation at + https://www.mlflow.org/docs/latest/tracking.html#supported-artifact-stores. } diff --git a/mlflow/R/mlflow/man/mlflow_log_artifact.Rd b/mlflow/R/mlflow/man/mlflow_log_artifact.Rd index a308fe66721ca..4e91b2cb3513f 100644 --- a/mlflow/R/mlflow/man/mlflow_log_artifact.Rd +++ b/mlflow/R/mlflow/man/mlflow_log_artifact.Rd @@ -1,49 +1,30 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-fluent.R +% Please edit documentation in R/tracking-runs.R \name{mlflow_log_artifact} \alias{mlflow_log_artifact} \title{Log Artifact} \usage{ -mlflow_log_artifact(path, artifact_path = NULL) +mlflow_log_artifact(path, artifact_path = NULL, run_id = NULL, + client = NULL) } \arguments{ \item{path}{The file or directory to log as an artifact.} -\item{artifact_path}{Destination path within the run’s artifact URI.} -} -\description{ -Logs an specific file or directory as an artifact. -} -\details{ -The fluent API family of functions operate with an implied MLflow client - determined by the service set by `mlflow_set_tracking_uri()`. For operations - involving a run it adopts the current active run, or, if one does not exist, - starts one through the implied service. +\item{artifact_path}{Destination path within the run's artifact URI.} -When logging to Amazon S3, ensure that the user has a proper policy -attach to it, for instance: +\item{run_id}{Run ID.} -\code{ -{ -"Version": "2012-10-17", -"Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket", - "s3:GetBucketLocation" - ], - "Resource": [ - "arn:aws:s3:::mlflow-test/*", - "arn:aws:s3:::mlflow-test" - ] - } - ] +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} } +\description{ +Logs a specific file or directory as an artifact for a run. } +\details{ +When logging to Amazon S3, ensure that you have the s3:PutObject, s3:GetObject, +s3:ListBucket, and s3:GetBucketLocation permissions on your bucket. Additionally, at least the \code{AWS_ACCESS_KEY_ID} and \code{AWS_SECRET_ACCESS_KEY} environment variables must be set to the corresponding key and secrets provided diff --git a/mlflow/R/mlflow/man/mlflow_log_batch.Rd b/mlflow/R/mlflow/man/mlflow_log_batch.Rd new file mode 100644 index 0000000000000..455bc754398eb --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_log_batch.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_log_batch} +\alias{mlflow_log_batch} +\title{Log Batch} +\usage{ +mlflow_log_batch(metrics = NULL, params = NULL, tags = NULL, + run_id = NULL, client = NULL) +} +\arguments{ +\item{metrics}{A dataframe of metrics to log, containing the following columns: "key", "value", +"step", "timestamp". This dataframe cannot contain any missing ('NA') entries.} + +\item{params}{A dataframe of params to log, containing the following columns: "key", "value". +This dataframe cannot contain any missing ('NA') entries.} + +\item{tags}{A dataframe of tags to log, containing the following columns: "key", "value". +This dataframe cannot contain any missing ('NA') entries.} + +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Log a batch of metrics, params, and/or tags for a run. The server will respond with an error (non-200 status code) + if any data failed to be persisted. In case of error (due to internal server error or an invalid request), partial + data may be written. +} diff --git a/mlflow/R/mlflow/man/mlflow_log_metric.Rd b/mlflow/R/mlflow/man/mlflow_log_metric.Rd index a7d141ed8b77c..ee8d0815a1188 100644 --- a/mlflow/R/mlflow/man/mlflow_log_metric.Rd +++ b/mlflow/R/mlflow/man/mlflow_log_metric.Rd @@ -1,26 +1,32 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-fluent.R +% Please edit documentation in R/tracking-runs.R \name{mlflow_log_metric} \alias{mlflow_log_metric} \title{Log Metric} \usage{ -mlflow_log_metric(key, value, timestamp = NULL) +mlflow_log_metric(key, value, timestamp = NULL, step = NULL, + run_id = NULL, client = NULL) } \arguments{ \item{key}{Name of the metric.} \item{value}{Float value for the metric being logged.} -\item{timestamp}{Unix timestamp in milliseconds at the time metric was logged.} +\item{timestamp}{Timestamp at which to log the metric. Timestamp is rounded to the nearest +integer. If unspecified, the number of milliseconds since the Unix epoch is used.} + +\item{step}{Step at which to log the metric. Step is rounded to the nearest integer. If +unspecified, the default value of zero is used.} + +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} } \description{ -API to log a metric for a run. Metrics key-value pair that record a single float measure. +Logs a metric for a run. Metrics key-value pair that records a single float measure. During a single execution of a run, a particular metric can be logged several times. - Backend will keep track of historical values along with timestamps. -} -\details{ -The fluent API family of functions operate with an implied MLflow client - determined by the service set by `mlflow_set_tracking_uri()`. For operations - involving a run it adopts the current active run, or, if one does not exist, - starts one through the implied service. + The MLflow Backend keeps track of historical metric values along two axes: timestamp and step. } diff --git a/mlflow/R/mlflow/man/mlflow_log_model.Rd b/mlflow/R/mlflow/man/mlflow_log_model.Rd index 4e19de69d3e4b..ae00ef097c2c8 100644 --- a/mlflow/R/mlflow/man/mlflow_log_model.Rd +++ b/mlflow/R/mlflow/man/mlflow_log_model.Rd @@ -4,15 +4,19 @@ \alias{mlflow_log_model} \title{Log Model} \usage{ -mlflow_log_model(fn, artifact_path) +mlflow_log_model(model, artifact_path, ...) } \arguments{ -\item{fn}{The serving function that will perform a prediction.} +\item{model}{The model that will perform a prediction.} \item{artifact_path}{Destination path where this MLflow compatible model will be saved.} + +\item{...}{Optional additional arguments passed to `mlflow_save_model()` when persisting the +model. For example, `conda_env = /path/to/conda.yaml` may be passed to specify a conda +dependencies file for flavors (e.g. keras) that support conda environments.} } \description{ -Logs a model in the given run. Similar to `mlflow_save_model()` +Logs a model for this run. Similar to `mlflow_save_model()` but stores model as an artifact within the active run. } diff --git a/mlflow/R/mlflow/man/mlflow_log_param.Rd b/mlflow/R/mlflow/man/mlflow_log_param.Rd index 03a67abc70c4a..053bceaaa3ff0 100644 --- a/mlflow/R/mlflow/man/mlflow_log_param.Rd +++ b/mlflow/R/mlflow/man/mlflow_log_param.Rd @@ -1,25 +1,26 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-fluent.R +% Please edit documentation in R/tracking-runs.R \name{mlflow_log_param} \alias{mlflow_log_param} \title{Log Parameter} \usage{ -mlflow_log_param(key, value) +mlflow_log_param(key, value, run_id = NULL, client = NULL) } \arguments{ \item{key}{Name of the parameter.} \item{value}{String value of the parameter.} + +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} } \description{ -API to log a parameter used for this run. Examples are params and hyperparams +Logs a parameter for a run. Examples are params and hyperparams used for ML training, or constant dates and values used in an ETL pipeline. - A params is a STRING key-value pair. For a run, a single parameter is allowed + A param is a STRING key-value pair. For a run, a single parameter is allowed to be logged only once. } -\details{ -The fluent API family of functions operate with an implied MLflow client - determined by the service set by `mlflow_set_tracking_uri()`. For operations - involving a run it adopts the current active run, or, if one does not exist, - starts one through the implied service. -} diff --git a/mlflow/R/mlflow/man/mlflow_param.Rd b/mlflow/R/mlflow/man/mlflow_param.Rd index 6e818fc7462be..111fe79321e79 100644 --- a/mlflow/R/mlflow/man/mlflow_param.Rd +++ b/mlflow/R/mlflow/man/mlflow_param.Rd @@ -2,20 +2,41 @@ % Please edit documentation in R/project-param.R \name{mlflow_param} \alias{mlflow_param} -\title{Read Command Line Parameter} +\title{Read Command-Line Parameter} \usage{ mlflow_param(name, default = NULL, type = NULL, description = NULL) } \arguments{ -\item{name}{The name for this parameter.} +\item{name}{The name of the parameter.} -\item{default}{The default value for this parameter.} +\item{default}{The default value of the parameter.} \item{type}{Type of this parameter. Required if `default` is not set. If specified, must be one of "numeric", "integer", or "string".} -\item{description}{Optional description for this parameter.} +\item{description}{Optional description for the parameter.} } \description{ -Reads a command line parameter. +Reads a command-line parameter passed to an MLflow project +MLflow allows you to define named, typed input parameters to your R scripts via the mlflow_param +API. This is useful for experimentation, e.g. tracking multiple invocations of the same script +with different parameters. +} +\examples{ +\dontrun{ +# This parametrized script trains a GBM model on the Iris dataset and can be run as an MLflow +# project. You can run this script (assuming it's saved at /some/directory/params_example.R) +# with custom parameters via: +# mlflow_run(entry_point = "params_example.R", uri = "/some/directory", +# parameters = list(num_trees = 200, learning_rate = 0.1)) +install.packages("gbm") +library(mlflow) +library(gbm) +# define and read input parameters +num_trees <- mlflow_param(name = "num_trees", default = 200, type = "integer") +lr <- mlflow_param(name = "learning_rate", default = 0.1, type = "numeric") +# use params to fit a model +ir.adaboost <- gbm(Species ~., data=iris, n.trees=num_trees, shrinkage=lr) +} + } diff --git a/mlflow/R/mlflow/man/mlflow_predict_flavor.Rd b/mlflow/R/mlflow/man/mlflow_predict.Rd similarity index 56% rename from mlflow/R/mlflow/man/mlflow_predict_flavor.Rd rename to mlflow/R/mlflow/man/mlflow_predict.Rd index 3512c4eea2bd0..df17077a6fe9d 100644 --- a/mlflow/R/mlflow/man/mlflow_predict_flavor.Rd +++ b/mlflow/R/mlflow/man/mlflow_predict.Rd @@ -1,15 +1,18 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model-flavor.R -\name{mlflow_predict_flavor} -\alias{mlflow_predict_flavor} -\title{Predict over MLflow Model Flavor} +% Please edit documentation in R/model.R +\name{mlflow_predict} +\alias{mlflow_predict} +\title{Generate Prediction with MLflow Model} \usage{ -mlflow_predict_flavor(model, data) +mlflow_predict(model, data, ...) } \arguments{ \item{model}{The loaded MLflow model flavor.} \item{data}{A data frame to perform scoring.} + +\item{...}{Optional additional arguments passed to underlying predict +methods.} } \description{ Performs prediction over a model loaded using diff --git a/mlflow/R/mlflow/man/mlflow_predict_model.Rd b/mlflow/R/mlflow/man/mlflow_predict_model.Rd deleted file mode 100644 index 2ad0e9e6f069b..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_predict_model.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{mlflow_predict_model} -\alias{mlflow_predict_model} -\title{Generate prediction with MLflow model.} -\usage{ -mlflow_predict_model(model, data) -} -\arguments{ -\item{model}{MLflow model.} - -\item{data}{Dataframe to be scored.} -} -\description{ -Generate prediction with MLflow model. -} diff --git a/mlflow/R/mlflow/man/mlflow_rename_experiment.Rd b/mlflow/R/mlflow/man/mlflow_rename_experiment.Rd new file mode 100644 index 0000000000000..7856e3b73b234 --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_rename_experiment.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-experiments.R +\name{mlflow_rename_experiment} +\alias{mlflow_rename_experiment} +\title{Rename Experiment} +\usage{ +mlflow_rename_experiment(new_name, experiment_id = NULL, client = NULL) +} +\arguments{ +\item{new_name}{The experiment’s name will be changed to this. The new name must be unique.} + +\item{experiment_id}{ID of the associated experiment. This field is required.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Renames an experiment. +} diff --git a/mlflow/R/mlflow/man/mlflow_restore_experiment.Rd b/mlflow/R/mlflow/man/mlflow_restore_experiment.Rd new file mode 100644 index 0000000000000..a2be01287ddfa --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_restore_experiment.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-experiments.R +\name{mlflow_restore_experiment} +\alias{mlflow_restore_experiment} +\title{Restore Experiment} +\usage{ +mlflow_restore_experiment(experiment_id, client = NULL) +} +\arguments{ +\item{experiment_id}{ID of the associated experiment. This field is required.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Restores an experiment marked for deletion. This also restores associated metadata, + runs, metrics, and params. If experiment uses FileStore, underlying artifacts + associated with experiment are also restored. +} +\details{ +Throws `RESOURCE_DOES_NOT_EXIST` if the experiment was never created or was permanently deleted. +} diff --git a/mlflow/R/mlflow/man/mlflow_restore_run.Rd b/mlflow/R/mlflow/man/mlflow_restore_run.Rd new file mode 100644 index 0000000000000..c20b6572eddfa --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_restore_run.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_restore_run} +\alias{mlflow_restore_run} +\title{Restore a Run} +\usage{ +mlflow_restore_run(run_id, client = NULL) +} +\arguments{ +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Restores the run with the specified ID. +} diff --git a/mlflow/R/mlflow/man/mlflow_restore_snapshot.Rd b/mlflow/R/mlflow/man/mlflow_restore_snapshot.Rd deleted file mode 100644 index 0d4c2166fa036..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_restore_snapshot.Rd +++ /dev/null @@ -1,12 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/project-snapshot.R -\name{mlflow_restore_snapshot} -\alias{mlflow_restore_snapshot} -\title{Restore Snapshot} -\usage{ -mlflow_restore_snapshot() -} -\description{ -Restores a snapshot of all dependencies required to run the files in the -current directory -} diff --git a/mlflow/R/mlflow/man/mlflow_rfunc_predict.Rd b/mlflow/R/mlflow/man/mlflow_rfunc_predict.Rd deleted file mode 100644 index 24d3420bcb2f0..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_rfunc_predict.Rd +++ /dev/null @@ -1,41 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{mlflow_rfunc_predict} -\alias{mlflow_rfunc_predict} -\title{Predict using RFunc MLflow Model} -\usage{ -mlflow_rfunc_predict(model_path, run_uuid = NULL, input_path = NULL, - output_path = NULL, data = NULL, restore = FALSE) -} -\arguments{ -\item{model_path}{The path to the MLflow model, as a string.} - -\item{run_uuid}{Run ID of run to grab the model from.} - -\item{input_path}{Path to 'JSON' or 'CSV' file to be used for prediction.} - -\item{output_path}{'JSON' or 'CSV' file where the prediction will be written to.} - -\item{data}{Data frame to be scored. This can be utilized for testing purposes and can only -be specified when `input_path` is not specified.} - -\item{restore}{Should \code{mlflow_restore_snapshot()} be called before serving?} -} -\description{ -Predict using an RFunc MLflow Model from a file or data frame. -} -\examples{ -\dontrun{ -library(mlflow) - -# save simple model which roundtrips data as prediction -mlflow_save_model(function(df) df, "mlflow_roundtrip") - -# save data as json -jsonlite::write_json(iris, "iris.json") - -# predict existing model from json data -mlflow_rfunc_predict("mlflow_roundtrip", "iris.json") -} - -} diff --git a/mlflow/R/mlflow/man/mlflow_rfunc_serve.Rd b/mlflow/R/mlflow/man/mlflow_rfunc_serve.Rd index ba4f903627c4e..ddd26e29e053f 100644 --- a/mlflow/R/mlflow/man/mlflow_rfunc_serve.Rd +++ b/mlflow/R/mlflow/man/mlflow_rfunc_serve.Rd @@ -4,29 +4,39 @@ \alias{mlflow_rfunc_serve} \title{Serve an RFunc MLflow Model} \usage{ -mlflow_rfunc_serve(model_path, run_uuid = NULL, host = "127.0.0.1", - port = 8090, daemonized = FALSE, browse = !daemonized, - restore = FALSE) +mlflow_rfunc_serve(model_uri, host = "127.0.0.1", port = 8090, + daemonized = FALSE, browse = !daemonized, ...) } \arguments{ -\item{model_path}{The path to the MLflow model, as a string.} - -\item{run_uuid}{ID of run to grab the model from.} +\item{model_uri}{The location, in URI format, of the MLflow model.} \item{host}{Address to use to serve model, as a string.} \item{port}{Port to use to serve model, as numeric.} -\item{daemonized}{Makes 'httpuv' server daemonized so R interactive sessions +\item{daemonized}{Makes `httpuv` server daemonized so R interactive sessions are not blocked to handle requests. To terminate a daemonized server, call -'httpuv::stopDaemonizedServer()' with the handle returned from this call.} +`httpuv::stopDaemonizedServer()` with the handle returned from this call.} \item{browse}{Launch browser with serving landing page?} -\item{restore}{Should \code{mlflow_restore_snapshot()} be called before serving?} +\item{...}{Optional arguments passed to `mlflow_predict()`.} } \description{ -Serve an RFunc MLflow Model as a local web api. +Serves an RFunc MLflow model as a local web API. +} +\details{ +The URI scheme must be supported by MLflow - i.e. there has to be an MLflow artifact + repository corresponding to the scheme of the URI. The content is expected to point to a + directory containing MLmodel. The following are examples of valid model uris: + + - ``file:///absolute/path/to/local/model`` + - ``file:relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see the Artifacts Documentation at + https://www.mlflow.org/docs/latest/tracking.html#supported-artifact-stores. } \examples{ \dontrun{ diff --git a/mlflow/R/mlflow/man/mlflow_run.Rd b/mlflow/R/mlflow/man/mlflow_run.Rd index 846d18694ab22..edd9e3fa5f6e9 100644 --- a/mlflow/R/mlflow/man/mlflow_run.Rd +++ b/mlflow/R/mlflow/man/mlflow_run.Rd @@ -2,42 +2,61 @@ % Please edit documentation in R/project-run.R \name{mlflow_run} \alias{mlflow_run} -\title{Run in MLflow} +\title{Run an MLflow Project} \usage{ -mlflow_run(entry_point = NULL, uri = ".", version = NULL, - param_list = NULL, experiment_id = NULL, mode = NULL, - cluster_spec = NULL, git_username = NULL, git_password = NULL, - no_conda = FALSE, storage_dir = NULL) +mlflow_run(uri = ".", entry_point = NULL, version = NULL, + parameters = NULL, experiment_id = NULL, experiment_name = NULL, + backend = NULL, backend_config = NULL, no_conda = FALSE, + storage_dir = NULL) } \arguments{ -\item{entry_point}{Entry point within project, defaults to `main` if not specified.} - \item{uri}{A directory containing modeling scripts, defaults to the current directory.} +\item{entry_point}{Entry point within project, defaults to `main` if not specified.} + \item{version}{Version of the project to run, as a Git commit reference for Git projects.} -\item{param_list}{A list of parameters.} +\item{parameters}{A list of parameters.} \item{experiment_id}{ID of the experiment under which to launch the run.} -\item{mode}{Execution mode to use for run.} - -\item{cluster_spec}{Path to JSON file describing the cluster to use when launching a run on Databricks.} +\item{experiment_name}{Name of the experiment under which to launch the run.} -\item{git_username}{Username for HTTP(S) Git authentication.} +\item{backend}{Execution backend to use for run.} -\item{git_password}{Password for HTTP(S) Git authentication.} +\item{backend_config}{Path to JSON file which will be passed to the backend. For the Databricks backend, +it should describe the cluster to use when launching a run on Databricks.} \item{no_conda}{If specified, assume that MLflow is running within a Conda environment with the necessary -dependencies for the current project instead of attempting to create a new conda environment. Only +dependencies for the current project instead of attempting to create a new Conda environment. Only valid if running locally.} -\item{storage_dir}{Only valid when `mode` is local. MLflow downloads artifacts from distributed URIs passed to -parameters of type 'path' to subdirectories of storage_dir.} +\item{storage_dir}{Valid only when `backend` is local. MLflow downloads artifacts from distributed URIs passed to +parameters of type `path` to subdirectories of `storage_dir`.} } \value{ The run associated with this run. } \description{ -Wrapper for `mlflow run`. +Wrapper for the `mlflow run` CLI command. See https://www.mlflow.org/docs/latest/cli.html#run +for more info. +} +\examples{ +\dontrun{ +# This parametrized script trains a GBM model on the Iris dataset and can be run as an MLflow +# project. You can run this script (assuming it's saved at /some/directory/params_example.R) +# with custom parameters via: +# mlflow_run(entry_point = "params_example.R", uri = "/some/directory", +# parameters = list(num_trees = 200, learning_rate = 0.1)) +install.packages("gbm") +library(mlflow) +library(gbm) +# define and read input parameters +num_trees <- mlflow_param(name = "num_trees", default = 200, type = "integer") +lr <- mlflow_param(name = "learning_rate", default = 0.1, type = "numeric") +# use params to fit a model +ir.adaboost <- gbm(Species ~., data=iris, n.trees=num_trees, shrinkage=lr) +} + + } diff --git a/mlflow/R/mlflow/man/mlflow_save_flavor.Rd b/mlflow/R/mlflow/man/mlflow_save_flavor.Rd deleted file mode 100644 index 357adda0cdda6..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_save_flavor.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model-flavor.R -\name{mlflow_save_flavor} -\alias{mlflow_save_flavor} -\title{Save MLflow Model Flavor} -\usage{ -mlflow_save_flavor(x, path = "model", r_dependencies = NULL, - conda_env = NULL) -} -\arguments{ -\item{x}{The serving function or model that will perform a prediction.} - -\item{path}{Destination path where this MLflow compatible model -will be saved.} - -\item{r_dependencies}{Optional vector of paths to dependency files -to include in the model, as in \code{r-dependencies.txt} -or \code{conda.yaml}.} - -\item{conda_env}{Path to Conda dependencies file.} -} -\value{ -This funciton must return a list of flavors that conform to - the MLmodel specification. -} -\description{ -Saves model in MLflow's flavor, to be used by package authors -to extend the supported MLflow models. -} diff --git a/mlflow/R/mlflow/man/mlflow_save_flavor.keras.engine.training.Model.Rd b/mlflow/R/mlflow/man/mlflow_save_flavor.keras.engine.training.Model.Rd deleted file mode 100644 index ff8a4fed82b18..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_save_flavor.keras.engine.training.Model.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model-keras.R -\name{mlflow_save_flavor.keras.engine.training.Model} -\alias{mlflow_save_flavor.keras.engine.training.Model} -\title{Save MLflow Keras Model Flavor} -\usage{ -\method{mlflow_save_flavor}{keras.engine.training.Model}(x, - path = "model", r_dependencies = NULL, conda_env = NULL) -} -\arguments{ -\item{x}{The serving function or model that will perform a prediction.} - -\item{path}{Destination path where this MLflow compatible model -will be saved.} - -\item{r_dependencies}{Optional vector of paths to dependency files -to include in the model, as in \code{r-dependencies.txt} -or \code{conda.yaml}.} - -\item{conda_env}{Path to Conda dependencies file.} -} -\value{ -This funciton must return a list of flavors that conform to - the MLmodel specification. -} -\description{ -Saves model in MLflow's Keras flavor. -} diff --git a/mlflow/R/mlflow/man/mlflow_save_model.Rd b/mlflow/R/mlflow/man/mlflow_save_model.Rd index 29c1434a678fb..9452e6ead029a 100644 --- a/mlflow/R/mlflow/man/mlflow_save_model.Rd +++ b/mlflow/R/mlflow/man/mlflow_save_model.Rd @@ -1,25 +1,29 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{mlflow_save_model} +% Please edit documentation in R/model-crate.R, R/model-keras.R, R/model.R +\name{mlflow_save_model.crate} +\alias{mlflow_save_model.crate} +\alias{mlflow_save_model.keras.engine.training.Model} \alias{mlflow_save_model} \title{Save Model for MLflow} \usage{ -mlflow_save_model(x, path = "model", r_dependencies = NULL, - conda_env = NULL) +\method{mlflow_save_model}{crate}(model, path, ...) + +\method{mlflow_save_model}{keras.engine.training.Model}(model, path, + conda_env = NULL, ...) + +mlflow_save_model(model, path, ...) } \arguments{ -\item{x}{The serving function or model that will perform a prediction.} +\item{model}{The model that will perform a prediction.} \item{path}{Destination path where this MLflow compatible model will be saved.} -\item{r_dependencies}{Optional vector of paths to dependency files -to include in the model, as in \code{r-dependencies.txt} -or \code{conda.yaml}.} +\item{...}{Optional additional arguments.} \item{conda_env}{Path to Conda dependencies file.} } \description{ -Saves model in MLflow's format that can later be used -for prediction and serving. +Saves model in MLflow format that can later be used for prediction and serving. This method is +generic to allow package authors to save custom model types. } diff --git a/mlflow/R/mlflow/man/mlflow_search_runs.Rd b/mlflow/R/mlflow/man/mlflow_search_runs.Rd new file mode 100644 index 0000000000000..c90ded975798a --- /dev/null +++ b/mlflow/R/mlflow/man/mlflow_search_runs.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tracking-runs.R +\name{mlflow_search_runs} +\alias{mlflow_search_runs} +\title{Search Runs} +\usage{ +mlflow_search_runs(filter = NULL, run_view_type = c("ACTIVE_ONLY", + "DELETED_ONLY", "ALL"), experiment_ids = NULL, order_by = list(), + client = NULL) +} +\arguments{ +\item{filter}{A filter expression over params, metrics, and tags, allowing returning a subset of runs. +The syntax is a subset of SQL which allows only ANDing together binary operations between a param/metric/tag and a constant.} + +\item{run_view_type}{Run view type.} + +\item{experiment_ids}{List of string experiment IDs (or a single string experiment ID) to search +over. Attempts to use active experiment if not specified.} + +\item{order_by}{List of properties to order by. Example: "metrics.acc DESC".} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} +} +\description{ +Search for runs that satisfy expressions. Search expressions can use Metric and Param keys. +} diff --git a/mlflow/R/mlflow/man/mlflow_server.Rd b/mlflow/R/mlflow/man/mlflow_server.Rd index cb260a26cd5aa..559331371a3b6 100644 --- a/mlflow/R/mlflow/man/mlflow_server.Rd +++ b/mlflow/R/mlflow/man/mlflow_server.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/tracking-server.R \name{mlflow_server} \alias{mlflow_server} -\title{Run the MLflow Tracking Server} +\title{Run MLflow Tracking Server} \usage{ mlflow_server(file_store = "mlruns", default_artifact_root = NULL, host = "127.0.0.1", port = 5000, workers = 4, diff --git a/mlflow/R/mlflow/man/mlflow_set_experiment.Rd b/mlflow/R/mlflow/man/mlflow_set_experiment.Rd index 0a083151eba0a..f9724591eac7c 100644 --- a/mlflow/R/mlflow/man/mlflow_set_experiment.Rd +++ b/mlflow/R/mlflow/man/mlflow_set_experiment.Rd @@ -1,21 +1,22 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-fluent.R +% Please edit documentation in R/tracking-experiments.R \name{mlflow_set_experiment} \alias{mlflow_set_experiment} \title{Set Experiment} \usage{ -mlflow_set_experiment(experiment_name) +mlflow_set_experiment(experiment_name = NULL, experiment_id = NULL, + artifact_location = NULL) } \arguments{ \item{experiment_name}{Name of experiment to be activated.} + +\item{experiment_id}{ID of experiment to be activated.} + +\item{artifact_location}{Location where all artifacts for this experiment are stored. If +not provided, the remote server will select an appropriate default.} } \description{ -Set given experiment as active experiment. If experiment does not - exist, create an experiment with provided name. -} -\details{ -The fluent API family of functions operate with an implied MLflow client - determined by the service set by `mlflow_set_tracking_uri()`. For operations - involving a run it adopts the current active run, or, if one does not exist, - starts one through the implied service. +Sets an experiment as the active experiment. Either the name or ID of the experiment can be provided. + If the a name is provided but the experiment does not exist, this function creates an experiment + with provided name. Returns the ID of the active experiment. } diff --git a/mlflow/R/mlflow/man/mlflow_set_tag.Rd b/mlflow/R/mlflow/man/mlflow_set_tag.Rd index fdeec6e2982ed..d61e2f1c8182f 100644 --- a/mlflow/R/mlflow/man/mlflow_set_tag.Rd +++ b/mlflow/R/mlflow/man/mlflow_set_tag.Rd @@ -1,23 +1,24 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-fluent.R +% Please edit documentation in R/tracking-runs.R \name{mlflow_set_tag} \alias{mlflow_set_tag} \title{Set Tag} \usage{ -mlflow_set_tag(key, value) +mlflow_set_tag(key, value, run_id = NULL, client = NULL) } \arguments{ \item{key}{Name of the tag. Maximum size is 255 bytes. This field is required.} \item{value}{String value of the tag being logged. Maximum size is 500 bytes. This field is required.} + +\item{run_id}{Run ID.} + +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} } \description{ -Set a tag on a run. Tags are run metadata that can be updated during and +Sets a tag on a run. Tags are run metadata that can be updated during a run and after a run completes. } -\details{ -The fluent API family of functions operate with an implied MLflow client - determined by the service set by `mlflow_set_tracking_uri()`. For operations - involving a run it adopts the current active run, or, if one does not exist, - starts one through the implied service. -} diff --git a/mlflow/R/mlflow/man/mlflow_snapshot.Rd b/mlflow/R/mlflow/man/mlflow_snapshot.Rd deleted file mode 100644 index c2bd95d0a0575..0000000000000 --- a/mlflow/R/mlflow/man/mlflow_snapshot.Rd +++ /dev/null @@ -1,12 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/project-snapshot.R -\name{mlflow_snapshot} -\alias{mlflow_snapshot} -\title{Dependencies Snapshot} -\usage{ -mlflow_snapshot() -} -\description{ -Creates a snapshot of all dependencies required to run the files in the -current directory. -} diff --git a/mlflow/R/mlflow/man/mlflow_start_run.Rd b/mlflow/R/mlflow/man/mlflow_start_run.Rd index 46fd8432780de..59dd80831648e 100644 --- a/mlflow/R/mlflow/man/mlflow_start_run.Rd +++ b/mlflow/R/mlflow/man/mlflow_start_run.Rd @@ -1,44 +1,39 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tracking-fluent.R +% Please edit documentation in R/tracking-runs.R \name{mlflow_start_run} \alias{mlflow_start_run} \title{Start Run} \usage{ -mlflow_start_run(run_uuid = NULL, experiment_id = NULL, - source_name = NULL, source_version = NULL, entry_point_name = NULL, - source_type = "LOCAL") +mlflow_start_run(run_id = NULL, experiment_id = NULL, + start_time = NULL, tags = NULL, client = NULL) } \arguments{ -\item{run_uuid}{If specified, get the run with the specified UUID and log metrics +\item{run_id}{If specified, get the run with the specified UUID and log metrics and params under that run. The run's end time is unset and its status is set to running, but the run's other attributes remain unchanged.} -\item{experiment_id}{Used only when ``run_uuid`` is unspecified. ID of the experiment under +\item{experiment_id}{Used only when `run_id` is unspecified. ID of the experiment under which to create the current run. If unspecified, the run is created under a new experiment with a randomly generated name.} -\item{source_name}{Name of the source file or URI of the project to be associated with the run. -Defaults to the current file if none provided.} +\item{start_time}{Unix timestamp of when the run started in milliseconds. Only used when `client` is specified.} -\item{source_version}{Optional Git commit hash to associate with the run.} +\item{tags}{Additional metadata for run in key-value pairs. Only used when `client` is specified.} -\item{entry_point_name}{Optional name of the entry point for to the current run.} - -\item{source_type}{Integer enum value describing the type of the run ("local", "project", etc.).} +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} } \description{ -Starts a new run within an experiment, should be used within a \code{with} block. -} -\details{ -The fluent API family of functions operate with an implied MLflow client - determined by the service set by `mlflow_set_tracking_uri()`. For operations - involving a run it adopts the current active run, or, if one does not exist, - starts one through the implied service. +Starts a new run. If `client` is not provided, this function infers contextual information such as + source name and version, and also registers the created run as the active run. If `client` is provided, + no inference is done, and additional arguments such as `start_time` can be provided. } \examples{ \dontrun{ with(mlflow_start_run(), { - mlflow_log("test", 10) + mlflow_log_metric("test", 10) }) } diff --git a/mlflow/R/mlflow/man/mlflow_ui.Rd b/mlflow/R/mlflow/man/mlflow_ui.Rd index 02ff4e0e0ddf5..e61ec4aecc09a 100644 --- a/mlflow/R/mlflow/man/mlflow_ui.Rd +++ b/mlflow/R/mlflow/man/mlflow_ui.Rd @@ -2,22 +2,25 @@ % Please edit documentation in R/tracking-ui.R \name{mlflow_ui} \alias{mlflow_ui} -\title{MLflow User Interface} +\title{Run MLflow User Interface} \usage{ -mlflow_ui(x, ...) +mlflow_ui(client, ...) } \arguments{ -\item{x}{An `mlflow_client` object.} +\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}. +If specified, MLflow will use the tracking server associated with the passed-in client. If +unspecified (the common case), +MLflow will use the tracking server associated with the current tracking URI.} \item{...}{Optional arguments passed to `mlflow_server()` when `x` is a path to a file store.} } \description{ -Launches MLflow user interface. +Launches the MLflow user interface. } \examples{ \dontrun{ library(mlflow) -mlflow_install() +install_mlflow() # launch mlflow ui locally mlflow_ui() diff --git a/mlflow/R/mlflow/man/reexports.Rd b/mlflow/R/mlflow/man/reexports.Rd index 36ff9b8fda062..7bd3499a6388f 100644 --- a/mlflow/R/mlflow/man/reexports.Rd +++ b/mlflow/R/mlflow/man/reexports.Rd @@ -5,6 +5,7 @@ \alias{reexports} \alias{\%||\%} \alias{\%>\%} +\alias{\%<-\%} \title{Objects exported from other packages} \keyword{internal} \description{ @@ -15,5 +16,7 @@ below to see their documentation. \item{purrr}{\code{\link[purrr]{\%>\%}}} \item{rlang}{\code{\link[rlang]{\%||\%}}} + + \item{zeallot}{\code{\link[zeallot]{\%<-\%}}} }} diff --git a/mlflow/R/mlflow/man/mlflow_uninstall.Rd b/mlflow/R/mlflow/man/uninstall_mlflow.Rd similarity index 61% rename from mlflow/R/mlflow/man/mlflow_uninstall.Rd rename to mlflow/R/mlflow/man/uninstall_mlflow.Rd index 4b8aca4ae3a10..3c0061c8babf7 100644 --- a/mlflow/R/mlflow/man/mlflow_uninstall.Rd +++ b/mlflow/R/mlflow/man/uninstall_mlflow.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/install.R -\name{mlflow_uninstall} -\alias{mlflow_uninstall} -\title{Uninstalls MLflow.} +\name{uninstall_mlflow} +\alias{uninstall_mlflow} +\title{Uninstall MLflow} \usage{ -mlflow_uninstall() +uninstall_mlflow() } \description{ Uninstalls MLflow by removing the Conda environment. @@ -12,8 +12,8 @@ Uninstalls MLflow by removing the Conda environment. \examples{ \dontrun{ library(mlflow) -mlflow_install() -mlflow_uninstall() +install_mlflow() +uninstall_mlflow() } } diff --git a/mlflow/R/mlflow/tests/testthat.R b/mlflow/R/mlflow/tests/testthat.R index 407bb32d1c93c..467c736398bed 100644 --- a/mlflow/R/mlflow/tests/testthat.R +++ b/mlflow/R/mlflow/tests/testthat.R @@ -1,14 +1,11 @@ library(testthat) library(mlflow) +library(reticulate) if (identical(Sys.getenv("NOT_CRAN"), "true")) { - if (!"r-mlflow" %in% reticulate::conda_list()$name) { - mlflow_install() - - message("Current working directory: ", getwd()) - mlflow_home <- Sys.getenv("MLFLOW_HOME", "../../../../.") - reticulate::conda_install("r-mlflow", mlflow_home, pip = TRUE) - } - + mlflow:::mlflow_maybe_create_conda_env() + message("Current working directory: ", getwd()) + mlflow_home <- Sys.getenv("MLFLOW_HOME", "../../../../.") + conda_install(c(mlflow_home), envname = mlflow:::mlflow_conda_env_name(), pip = TRUE) test_check("mlflow") } diff --git a/mlflow/R/mlflow/tests/testthat/helpers.R b/mlflow/R/mlflow/tests/testthat/helpers.R index 0665106bcbb92..42eddc86b97df 100644 --- a/mlflow/R/mlflow/tests/testthat/helpers.R +++ b/mlflow/R/mlflow/tests/testthat/helpers.R @@ -1,6 +1,6 @@ mlflow_clear_test_dir <- function(path) { - mlflow_end_run() - mlflow_set_active_experiment_id(NULL) + purrr::safely(mlflow_end_run)() + mlflow:::mlflow_set_active_experiment_id(NULL) if (dir.exists(path)) { unlink(path, recursive = TRUE) } diff --git a/mlflow/R/mlflow/tests/testthat/test-client.R b/mlflow/R/mlflow/tests/testthat/test-client.R new file mode 100644 index 0000000000000..0718d4feecb52 --- /dev/null +++ b/mlflow/R/mlflow/tests/testthat/test-client.R @@ -0,0 +1,99 @@ +context("client") + +test_that("http(s) clients work as expected", { + mlflow_clear_test_dir("mlruns") + with_mock(.env = "mlflow", mlflow_rest = function(..., client) { + args <- list(...) + expect_true(paste(args, collapse = "/") == "experiments/list") + list(experiments = c(1, 2, 3)) + }, { + with_mock(.env = "mlflow", mlflow_register_local_server = function(...) NA, { + env <- list( + MLFLOW_USERNAME = "DonaldDuck", + MLFLOW_PASSWORD = "Quack", + MLFLOW_TOKEN = "$$$", + MLFLOW_INSECURE = "True" + ) + with_envvar(env, { + http_host <- "http://remote" + client1 <- mlflow:::mlflow_client(http_host) + config <- client1$get_host_creds() + print(config) + expect_true(config$host == http_host) + expect_true(config$username == "DonaldDuck") + expect_true(config$password == "Quack") + expect_true(config$token == "$$$") + expect_true(config$insecure == "True") + https_host <- "https://remote" + client2 <- mlflow:::mlflow_client("https://remote") + config <- client2$get_host_creds() + expect_true(config$host == https_host) + env_str <- paste(env, collapse = "|") + env_str_2 <- paste(client2$get_cli_env(), collapse = "|") + expect_true(env_str == env_str_2) + }) + with_mock(.env = "mlflow", mlflow_server = function(...) list(server_url = "local_server"), { + client3 <- mlflow:::mlflow_client() + config <- client3$get_host_creds() + expect_true(config$host == "local_server") + }) + }) + }) +}) + +test_that("rest call handles errors correctly", { + mlflow_clear_test_dir("mlruns") + mock_client <- mlflow:::new_mlflow_client_impl(get_host_creds = function() { + mlflow:::new_mlflow_host_creds(host = "localhost") + }) + with_mock(.env = "httr", POST = function(...) { + httr:::response( + status_code = 400, + content = charToRaw(paste("{\"error_code\":\"INVALID_PARAMETER_VALUE\",", + "\"message\":\"experiment_id must be set to a non-zero value\"}", + sep = "") + ) + )}, { + error_msg_regexp <- paste( + "API request to endpoint \'runs/create\' failed with error code 400", + "INVALID_PARAMETER_VALUE", + "experiment_id must be set to a non-zero value", + sep = ".*") + expect_error( + mlflow:::mlflow_rest( "runs", "create", client = mock_client, verb = "POST"), + error_msg_regexp + ) + }) + + with_mock(.env = "httr", GET = function(...) { + httr:::response( + status_code = 500, + content = charToRaw(paste("some text.")) + ) + }, { + error_msg_regexp <- paste( + "API request to endpoint \'runs/create\' failed with error code 500", + "some text", + sep = ".*") + expect_error( + mlflow:::mlflow_rest( "runs", "create", client = mock_client, verb = "GET"), + error_msg_regexp + ) + }) + + with_mock(.env = "httr", POST = function(...) { + httr:::response( + status_code = 503, + content = as.raw(c(0, 255)) + ) + }, { + error_msg_regexp <- paste( + "API request to endpoint \'runs/create\' failed with error code 503", + "00 ff", + sep = ".*") + expect_error( + mlflow:::mlflow_rest( "runs", "create", client = mock_client, verb = "POST"), + error_msg_regexp + ) + }) +}) diff --git a/mlflow/R/mlflow/tests/testthat/test-databricks-utils.R b/mlflow/R/mlflow/tests/testthat/test-databricks-utils.R new file mode 100644 index 0000000000000..b04397f7a5fca --- /dev/null +++ b/mlflow/R/mlflow/tests/testthat/test-databricks-utils.R @@ -0,0 +1,171 @@ +context("databricks-utils") + +library(withr) + +test_that("mlflow creates databricks client when scheme is databricks", { + with_mock(.env = "mlflow", get_databricks_config = function(profile) { + config_vars <- list(host = "databricks-host", token = "databricks") + config <- new_databricks_config( config_source = "env", config_vars = config_vars) + config$profile <- profile + config + }, { + with_mock(.env = "mlflow", mlflow_rest = function(..., client) { + args <- list(...) + expect_true(paste(args, collapse = "/") == "experiments/list") + list(experiments = c(1, 2, 3)) + }, { + client1 <- mlflow:::mlflow_client("databricks") + creds1 <- client1$get_host_creds() + expect_true(creds1$host == "databricks-host") + expect_true(creds1$token == "databricks") + expect_true(is.na(creds1$profile)) + env1 <- client1$get_cli_env() + expect_true(env1$DATABRICKS_HOST == "databricks-host") + expect_true(env1$DATABRICKS_TOKEN == "databricks") + client2 <- mlflow:::mlflow_client("databricks://dbprofile") + creds2 <- client2$get_host_creds() + expect_true(creds2$host == "databricks-host") + expect_true(creds2$token == "databricks") + expect_true(creds2$profile == "dbprofile") + }) + }) +}) + +test_that("mlflow reads databricks config from correct sources", { + with_mock(.env = "mlflow", get_databricks_config_for_profile = function(profile) list( + host = "databricks-host", token = "databricks", profile = profile), { + config <- get_databricks_config("profile") + expect_true(config$profile == "profile") + expect_true(config$host == "databricks-host") + expect_true(config$host == "databricks-host") + expect_true(config$token == "databricks") + config <- get_databricks_config(NA) + expect_true(config$profile == "DEFAULT") + expect_true(config$host == "databricks-host") + expect_true(config$host == "databricks-host") + expect_true(config$token == "databricks") + + with_mock(.env = "mlflow", + get_databricks_config_from_env = function() { + new_databricks_config("env", list(host = "host")) + }, { + config <- get_databricks_config(NA) + expect_true(config$profile == "DEFAULT") + expect_true(config$host == "databricks-host") + expect_true(config$host == "databricks-host") + expect_true(config$token == "databricks") + }) + with_mock(.env = "mlflow", + get_databricks_config_from_env = function() { + new_databricks_config("env", list(host = "env", token = "env")) + }, { + config <- get_databricks_config(NA) + expect_true(config$host == "env") + expect_true(config$token == "env") + }) + }) +}) + +test_that("mlflow can read .databrickscfg files", { + config_file <- file.path(tempdir(), ".databrickscfg") + Sys.setenv(DATABRICKS_CONFIG_FILE = config_file) + tryCatch( + { + config_file <- file.path(tempdir(), ".databrickscfg") + profile1 <- c("[PROFILE1]", "host = host1", "token = token1") + donald <- c("[DONALD]", "host = duckburg", "username = donaldduck", "password = quackquack", + "insecure = True") + broken_1 <- c("[BROKEN1]", "username = donaldduck", "token = abc") + broken_2 <- c("[BROKEN2]", "username = donaldduck", "host = duckburg") + profiles <- c(profile1, default_profile, donald, broken_1, broken_2) + write(profiles, file = config_file, + ncolumns = 1, append = FALSE, sep = "\n") + + profile1 <- mlflow:::get_databricks_config_for_profile("PROFILE1") + expect_true(profile1$config_source == "cfgfile") + expect_true(profile1$host == "host1") + expect_true(profile1$token == "token1") + expect_true(is.na(profile1$username)) + expect_true(is.na(profile1$password)) + expect_true(profile1$insecure == "False") + expect_true(mlflow:::databricks_config_is_valid(profile1)) + + profile2 <- mlflow:::get_databricks_config_for_profile("DONALD") + expect_true(profile2$config_source == "cfgfile") + expect_true(profile2$host == "duckburg") + expect_true(is.na(profile2$token)) + expect_true(profile2$username == "donaldduck") + expect_true(profile2$password == "quackquack") + expect_true(profile2$insecure == "True") + expect_true(mlflow:::databricks_config_is_valid(profile2)) + + profile3 <- mlflow:::get_databricks_config_for_profile("BROKEN1") + expect_true(profile3$config_source == "cfgfile") + expect_true(is.na(profile3$host)) + expect_true(profile3$token == "abc") + expect_true(profile3$username == "donaldduck") + expect_true(is.na(profile3$password)) + expect_true(profile3$insecure == "False") + expect_false(mlflow:::databricks_config_is_valid(profile3)) + + profile4 <- mlflow:::get_databricks_config_for_profile("BROKEN2") + expect_true(profile4$config_source == "cfgfile") + expect_true(profile4$host == "duckburg") + expect_true(is.na(profile4$token)) + expect_true(profile4$username == "donaldduck") + expect_true(is.na(profile4$password)) + expect_true(profile1$insecure == "False") + expect_false(mlflow:::databricks_config_is_valid(profile4)) + + unlink(config_file) + Sys.unsetenv(DATABRICKS_CONFIG_FILE) + }, + error = function(cnd) { + unlink(config_file) + Sys.unsetenv(DATABRICKS_CONFIG_FILE) + }, + interrupt = function(cnd) { + unlink(config_file) + Sys.unsetenv(DATABRICKS_CONFIG_FILE) + } + ) +}) + +test_that("mlflow can read databricks env congfig", { + env <- list( + DATABRICKS_HOST = "envhost", + DATABRICKS_USERNAME = "envusername", + DATABRICKS_PASSWORD = "envpassword", + DATABRICKS_TOKEN = "envtoken", + DATABRICKS_INSECURE = "True") + with_envvar( env, { + envprofile <- mlflow:::get_databricks_config_from_env() + expect_true(envprofile$host == "envhost") + expect_true(envprofile$token == "envtoken") + expect_true(envprofile$username == "envusername") + expect_true(envprofile$password == "envpassword") + expect_true(envprofile$insecure == "True") + expect_true(mlflow:::databricks_config_is_valid(envprofile)) + + extracted_env <- mlflow:::databricks_config_as_env(envprofile) + expect_true(paste(env, collapse = "|") == paste(extracted_env, collapse = "|")) + expect_true(length(setdiff(extracted_env, env)) == 0) + } + ) + env <- list(DATABRICKS_HOST = "envhost", + DATABRICKS_USERNAME = "envusername", + DATABRICKS_TOKEN = "envtoken") + + with_envvar(env, { + envprofile <- mlflow:::get_databricks_config_from_env() + expect_true(envprofile$host == "envhost") + expect_true(envprofile$token == "envtoken") + expect_true(envprofile$username == "envusername") + expect_true(is.na(envprofile$password)) + expect_true(envprofile$insecure == "False") + expect_true(mlflow:::databricks_config_is_valid(envprofile)) + extracted_env <- mlflow:::databricks_config_as_env(envprofile) + expect_true(paste(env, collapse = "|") == paste(extracted_env, collapse = "|")) + expect_true(length(setdiff(extracted_env, env)) == 0) + }) +}) diff --git a/mlflow/R/mlflow/tests/testthat/test-model.R b/mlflow/R/mlflow/tests/testthat/test-model.R index e44b4ae1b389f..76f27fc460b9d 100644 --- a/mlflow/R/mlflow/tests/testthat/test-model.R +++ b/mlflow/R/mlflow/tests/testthat/test-model.R @@ -1,33 +1,83 @@ context("Model") +library("carrier") + test_that("mlflow can save model function", { mlflow_clear_test_dir("model") model <- lm(Sepal.Width ~ Sepal.Length, iris) - fn <- crate(~ stats::predict(model, .x), model) + fn <- crate(~ stats::predict(model, .x), model = model) mlflow_save_model(fn, "model") expect_true(dir.exists("model")) - temp_in <- tempfile(fileext = ".csv") - temp_out <- tempfile(fileext = ".csv") - write.csv(iris, temp_in, row.names = FALSE) - mlflow_rfunc_predict("model", input_path = temp_in, output_path = temp_out) - prediction <- read.csv(temp_out)[[1]] - + # Test that we can load the model back and score it. + loaded_back_model <- mlflow_load_model("model") + prediction <- mlflow_predict(loaded_back_model, iris) + expect_equal( + prediction, + predict(model, iris) + ) + # Test that we can score this model with RFunc backend + temp_in_csv <- tempfile(fileext = ".csv") + temp_in_json <- tempfile(fileext = ".json") + temp_in_json_split <- tempfile(fileext = ".json") + temp_out <- tempfile(fileext = ".json") + write.csv(iris, temp_in_csv, row.names = FALSE) + mlflow_cli("models", "predict", "-m", "model", "-i", temp_in_csv, "-o", temp_out, "-t", "csv") + prediction <- unlist(jsonlite::read_json(temp_out)) expect_true(!is.null(prediction)) - expect_equal( prediction, unname(predict(model, iris)) ) -}) - -test_that("mlflow can write model with dependencies", { - mlflow_clear_test_dir("model") - model <- lm(Sepal.Width ~ Sepal.Length, iris) - fn <- crate(~ stats::predict(model, .x), model) - mlflow_save_model(fn, "model", conda_env = "conda.yaml") - mlmodel <- yaml::read_yaml("model/MLmodel") + # json records + jsonlite::write_json(iris, temp_in_json, row.names = FALSE) + mlflow_cli("models", "predict", "-m", "model", "-i", temp_in_json, "-o", temp_out, "-t", "json", + "--json-format", "records") + prediction <- unlist(jsonlite::read_json(temp_out)) + expect_true(!is.null(prediction)) + expect_equal( + prediction, + unname(predict(model, iris)) + ) + # json split + iris_split <- list(columns = names(iris)[1:4], index = row.names(iris), + data = as.matrix(iris[, 1:4])) + jsonlite::write_json(iris_split, temp_in_json_split, row.names = FALSE) + mlflow_cli("models", "predict", "-m", "model", "-i", temp_in_json_split, "-o", temp_out, "-t", + "json", "--json-format", "split") + prediction <- unlist(jsonlite::read_json(temp_out)) + expect_true(!is.null(prediction)) expect_equal( - mlmodel$flavors$crate$conda_env, - "conda.yaml" + prediction, + unname(predict(model, iris)) ) }) + +test_that("mlflow can log model and load it back with a uri", { + with(run <- mlflow_start_run(), { + model <- structure( + list(some = "stuff"), + class = "test" + ) + predictor <- crate(~ mean(as.matrix(.x)), model) + predicted <- predictor(0:10) + expect_true(5 == predicted) + mlflow_log_model(predictor, "model") + }) + runs_uri <- paste("runs:", run$run_uuid, "model", sep = "/") + loaded_model <- mlflow_load_model(runs_uri) + expect_true(5 == mlflow_predict(loaded_model, 0:10)) + actual_uri <- paste(run$artifact_uri, "model", sep = "/") + loaded_model_2 <- mlflow_load_model(actual_uri) + expect_true(5 == mlflow_predict(loaded_model_2, 0:10)) + temp_in <- tempfile(fileext = ".json") + temp_out <- tempfile(fileext = ".json") + jsonlite::write_json(0:10, temp_in) + mlflow:::mlflow_cli("models", "predict", "-m", runs_uri, "-i", temp_in, "-o", temp_out, + "--content-type", "json", "--json-format", "records") + prediction <- unlist(jsonlite::read_json(temp_out)) + expect_true(5 == prediction) + mlflow:::mlflow_cli("models", "predict", "-m", actual_uri, "-i", temp_in, "-o", temp_out, + "--content-type", "json", "--json-format", "records") + prediction <- unlist(jsonlite::read_json(temp_out)) + expect_true(5 == prediction) +}) diff --git a/mlflow/R/mlflow/tests/testthat/test-python-install.R b/mlflow/R/mlflow/tests/testthat/test-python-install.R new file mode 100644 index 0000000000000..b221e846d3146 --- /dev/null +++ b/mlflow/R/mlflow/tests/testthat/test-python-install.R @@ -0,0 +1,18 @@ +context("Installing Python MLflow Package") + +expected_conda_env_name <- function() { + paste("r-mlflow", packageVersion("mlflow"), sep = "-") +} + +test_that("MLflow installs into a conda environment with the same name as current Mlflow version", { + conda_env_name <- mlflow:::mlflow_conda_env_name() + expect_equal(conda_env_name, expected_conda_env_name()) +}) + +test_that("MLflow uses 'python' executable from correct conda environment", { + expect_true(grepl(expected_conda_env_name(), mlflow:::python_bin())) +}) + +test_that("MLflow uses 'mlflow' executable from correct conda environment", { + expect_true(grepl(expected_conda_env_name(), mlflow:::python_mlflow_bin())) +}) diff --git a/mlflow/R/mlflow/tests/testthat/test-rest.R b/mlflow/R/mlflow/tests/testthat/test-rest.R new file mode 100644 index 0000000000000..0eeacaef68f05 --- /dev/null +++ b/mlflow/R/mlflow/tests/testthat/test-rest.R @@ -0,0 +1,51 @@ +context("rest") + +test_that("user-agent header is set", { + config <- list() + config$insecure <- FALSE + config$username <- NA + config$password <- NA + config$token <- NA + + rest_config <- mlflow:::get_rest_config(config) + + expected_user_agent <- paste("mlflow-r-client", packageVersion("mlflow"), sep = "/") + expect_equal(rest_config$headers$`User-Agent`, expected_user_agent) + expect_equal(rest_config$config, list()) +}) + +test_that("basic auth is used", { + config <- list() + config$insecure <- FALSE + config$username <- "hello" + config$password <- "secret" + config$token <- NA + + rest_config <- mlflow:::get_rest_config(config) + + expect_equal(rest_config$headers$Authorization, "Basic aGVsbG86c2VjcmV0") +}) + +test_that("token auth is used", { + config <- list() + config$insecure <- FALSE + config$username <- NA + config$password <- NA + config$token <- "taken" + + rest_config <- mlflow:::get_rest_config(config) + + expect_equal(rest_config$headers$Authorization, "Bearer taken") +}) + +test_that("insecure is used", { + config <- list() + config$insecure <- TRUE + config$username <- NA + config$password <- NA + config$token <- NA + + rest_config <- mlflow:::get_rest_config(config) + + expect_equal(rest_config$config, httr::config(ssl_verifypeer = 0, ssl_verifyhost = 0)) +}) diff --git a/mlflow/R/mlflow/tests/testthat/test-run.R b/mlflow/R/mlflow/tests/testthat/test-run.R index 40ea79cc8c191..654b4e8a0e62a 100644 --- a/mlflow/R/mlflow/tests/testthat/test-run.R +++ b/mlflow/R/mlflow/tests/testthat/test-run.R @@ -9,3 +9,40 @@ test_that("mlflow can run and save model", { expect_true(dir.exists("mlruns/0")) expect_true(file.exists("mlruns/0/meta.yaml")) }) + + +test_that("mlflow run uses active experiment if not specified", { + with_mock(.env = "mlflow", mlflow_get_active_experiment_id = function() { + 123}, { + with_mock(.env = "mlflow", mlflow_cli = function(...){ + args <- list(...) + expect_true("--experiment-id" %in% args) + expect_false("--experiment-name" %in% args) + id <- which(args == "--experiment-id") + 1 + expect_true(args[[id]] == 123) + list(stderr = "=== Run (ID '48734e7e2e8f44228a11c0c2cbcdc8b0') succeeded ===") + }, { + mlflow_run("project") + }) + with_mock(.env = "mlflow", mlflow_cli = function(...){ + args <- list(...) + expect_true("--experiment-id" %in% args) + expect_false("--experiment-name" %in% args) + id <- which(args == "--experiment-id") + 1 + expect_true(args[[id]] == 321) + list(stderr = "=== Run (ID '48734e7e2e8f44228a11c0c2cbcdc8b0') succeeded ===") + }, { + mlflow_run("project", experiment_id = 321) + }) + with_mock(.env = "mlflow", mlflow_cli = function(...){ + args <- list(...) + expect_false("--experiment-id" %in% args) + expect_true("--experiment-name" %in% args) + id <- which(args == "--experiment-name") + 1 + expect_true(args[[id]] == "name") + list(stderr = "=== Run (ID '48734e7e2e8f44228a11c0c2cbcdc8b0') succeeded ===") + }, { + mlflow_run("project", experiment_name = "name") + }) + }) +}) diff --git a/mlflow/R/mlflow/tests/testthat/test-serve.R b/mlflow/R/mlflow/tests/testthat/test-serve.R index f55dd6d38df1e..93d0c430266d4 100644 --- a/mlflow/R/mlflow/tests/testthat/test-serve.R +++ b/mlflow/R/mlflow/tests/testthat/test-serve.R @@ -1,15 +1,14 @@ context("Serve") +library("carrier") + test_that("mlflow can serve a model function", { mlflow_clear_test_dir("model") model <- lm(Sepal.Width ~ Sepal.Length + Petal.Width, iris) - - fn <- crate(~ stats::predict(model, .x), model) - mlflow_save_model(fn) - + fn <- crate(~ stats::predict(model, .x), model = model) + mlflow_save_model(fn, path = "model") expect_true(dir.exists("model")) - model_server <- processx::process$new( "Rscript", c( @@ -20,13 +19,14 @@ test_that("mlflow can serve a model function", { stdout = "|", stderr = "|" ) - - Sys.sleep(5) - + Sys.sleep(10) tryCatch({ status_code <- httr::status_code(httr::GET("http://127.0.0.1:8090")) }, error = function(e) { - stop(e$message, ": ", paste(model_server$read_all_error_lines(), collapse = "\n")) + write("FAILED!", stderr()) + error_text <- model_server$read_error() + model_server$kill() + stop(e$message, ": ", error_text) }) expect_equal(status_code, 200) @@ -46,7 +46,7 @@ test_that("mlflow can serve a model function", { model_server$kill() expect_equal( - unlist(http_prediction$predictions), + unlist(http_prediction), as.vector(predict(model, newdata)), tolerance = 1e-5 ) diff --git a/mlflow/R/mlflow/tests/testthat/test-tracking-experiments.R b/mlflow/R/mlflow/tests/testthat/test-tracking-experiments.R new file mode 100644 index 0000000000000..44918b28410b2 --- /dev/null +++ b/mlflow/R/mlflow/tests/testthat/test-tracking-experiments.R @@ -0,0 +1,117 @@ +context("Tracking - Experiments") + +test_that("mlflow_create/get_experiment() basic functionality (fluent)", { + mlflow_clear_test_dir("mlruns") + + experiment_1_id <- mlflow_create_experiment("exp_name", "art_loc") + experiment_1a <- mlflow_get_experiment(experiment_id = experiment_1_id) + experiment_1b <- mlflow_get_experiment(name = "exp_name") + + expect_identical(experiment_1a, experiment_1b) + expect_identical(experiment_1a$artifact_location, "art_loc") + expect_identical(experiment_1a$name, "exp_name") +}) + +test_that("mlflow_create/get_experiment() basic functionality (client)", { + mlflow_clear_test_dir("mlruns") + + client <- mlflow_client() + + experiment_1_id <- mlflow_create_experiment(client = client, "exp_name", "art_loc") + experiment_1a <- mlflow_get_experiment(client = client, experiment_id = experiment_1_id) + experiment_1b <- mlflow_get_experiment(client = client, name = "exp_name") + + expect_identical(experiment_1a, experiment_1b) + expect_identical(experiment_1a$artifact_location, "art_loc") + expect_identical(experiment_1a$name, "exp_name") +}) + +test_that("mlflow_get_experiment() not found error", { + mlflow_clear_test_dir("mlruns") + + expect_error( + mlflow_get_experiment(experiment_id = "42"), + "Could not find experiment with ID 42" + ) +}) + +test_that("mlflow_list_experiments() works properly", { + mlflow_clear_test_dir("mlruns") + client <- mlflow_client() + mlflow_create_experiment(client = client, "foo1", "art_loc1") + mlflow_create_experiment(client = client, "foo2", "art_loc2") + + # client + experiments_list <- mlflow_list_experiments(client = client) + expect_setequal(experiments_list$experiment_id, c("0", "1", "2")) + expect_setequal(experiments_list$name, c("Default", "foo1", "foo2")) + default_artifact_loc <- file.path(getwd(), "mlruns", "0", fsep = "/") + expect_setequal(experiments_list$artifact_location, c(default_artifact_loc, + "art_loc1", + "art_loc2")) + + # fluent + experiments_list <- mlflow_list_experiments() + expect_setequal(experiments_list$experiment_id, c("0", "1", "2")) + expect_setequal(experiments_list$name, c("Default", "foo1", "foo2")) + default_artifact_loc <- file.path(getwd(), "mlruns", "0", fsep = "/") + expect_setequal(experiments_list$artifact_location, c(default_artifact_loc, + "art_loc1", + "art_loc2")) + + # Returns NULL when no experiments found + expect_null(mlflow_list_experiments("DELETED_ONLY")) + + # `view_type` is respected + mlflow_delete_experiment(experiment_id = "1") + deleted_experiments <- mlflow_list_experiments("DELETED_ONLY") + expect_identical(deleted_experiments$name, "foo1") +}) + + +test_that("mlflow_get_experiment_by_name() works properly", { + mlflow_clear_test_dir("mlruns") + client <- mlflow_client() + expect_error( + mlflow_get_experiment(client = client, name = "exp"), + "Experiment `exp` not found\\." + ) + experiment_id <- mlflow_create_experiment(client = client, "exp", "art") + experiment <- mlflow_get_experiment(client = client, name = "exp") + expect_identical(experiment_id, experiment$experiment_id) + expect_identical(experiment$name, "exp") + expect_identical(experiment$artifact_location, "art") +}) + +test_that("infer experiment id works properly", { + mlflow_clear_test_dir("mlruns") + experiment_id <- mlflow_create_experiment("test") + Sys.setenv(MLFLOW_EXPERIMENT_NAME = "test") + expect_true(experiment_id == mlflow_infer_experiment_id()) + Sys.unsetenv("MLFLOW_EXPERIMENT_NAME") + Sys.setenv(MLFLOW_EXPERIMENT_ID = experiment_id) + expect_true(experiment_id == mlflow_infer_experiment_id()) + Sys.unsetenv("MLFLOW_EXPERIMENT_ID") + mlflow_set_experiment("test") + expect_true(experiment_id == mlflow_infer_experiment_id()) +}) + +test_that("experiment setting works", { + mlflow_clear_test_dir("mlruns") + exp1_id <- mlflow_create_experiment("exp1") + exp2_id <- mlflow_create_experiment("exp2") + mlflow_set_experiment(experiment_name = "exp1") + expect_identical(exp1_id, mlflow_get_active_experiment_id()) + expect_identical(mlflow_get_experiment(exp1_id), mlflow_get_experiment()) + mlflow_set_experiment(experiment_id = exp2_id) + expect_identical(exp2_id, mlflow_get_active_experiment_id()) + expect_identical(mlflow_get_experiment(exp2_id), mlflow_get_experiment()) +}) + +test_that("mlflow_set_experiment() creates experiments", { + mlflow_clear_test_dir("mlruns") + mlflow_set_experiment(experiment_name = "foo", artifact_location = "artifact/location") + experiment <- mlflow_get_experiment() + expect_identical(experiment$artifact_location, "artifact/location") + expect_identical(experiment$name, "foo") +}) diff --git a/mlflow/R/mlflow/tests/testthat/test-tracking-runs.R b/mlflow/R/mlflow/tests/testthat/test-tracking-runs.R new file mode 100644 index 0000000000000..d6176e2ef3a3e --- /dev/null +++ b/mlflow/R/mlflow/tests/testthat/test-tracking-runs.R @@ -0,0 +1,511 @@ +context("Tracking") + +test_that("mlflow_start_run()/mlflow_get_run() work properly", { + mlflow_clear_test_dir("mlruns") + client <- mlflow_client() + run <- mlflow_start_run( + client = client, + experiment_id = "0", + tags = list(foo = "bar", foz = "baz", mlflow.user = "user1") + ) + + run <- mlflow_get_run(client = client, run$run_uuid) + + expect_identical(run$user_id, "user1") + + expect_true( + all(purrr::transpose(run$tags[[1]]) %in% + list( + list(key = "foz", value = "baz"), + list(key = "foo", value = "bar"), + list(key = "mlflow.user", value = "user1") + ) + ) + ) +}) + +test_that("mlflow_end_run() works properly", { + mlflow_clear_test_dir("mlruns") + mlflow_start_run() + killed_time <- mlflow:::current_time() + client <- mlflow_client() + run <- mlflow_end_run( + client = client, run_id = mlflow:::mlflow_get_active_run_id(), + status = "KILLED", end_time = killed_time + ) + expect_identical(run$status, "KILLED") + expect_identical(run$end_time, as.POSIXct(as.double(c(killed_time)) / 1000, origin = "1970-01-01")) + + # Verify that only expected run field names are present and that all run info fields are set + # (not NA). + run_info_names <- c("run_uuid", "experiment_id", "user_id", "status", "start_time", + "artifact_uri", "lifecycle_stage", "run_id", "end_time") + run_data_names <- c("metrics", "params", "tags") + expect_setequal(c(run_info_names, run_data_names), names(run)) + expect_true(!anyNA(run[run_info_names])) +}) + +test_that("mlflow_set_tag() should return NULL invisibly", { + mlflow_clear_test_dir("mlruns") + value <- mlflow_set_tag("foo", "bar") + expect_null(value) +}) + +test_that("logging functionality", { + mlflow_clear_test_dir("mlruns") + + start_time_lower_bound <- Sys.time() + mlflow_start_run() + + mlflow_log_metric("mse", 24) + mlflow_log_metric("mse", 25) + + mlflow_set_tag("tag_key", "tag_value") + mlflow_log_param("param_key", "param_value") + + run <- mlflow_get_run() + run_id <- run$run_uuid + tags <- run$tags[[1]] + expect_identical("tag_value", tags$value[tags$key == "tag_key"]) + expect_identical(run$params[[1]]$key, "param_key") + expect_identical(run$params[[1]]$value, "param_value") + + mlflow_delete_tag("tag_key", run_id) + run <- mlflow_get_run() + tags <- run$tags[[1]] + expect_false("tag_key" %in% tags$key) + + mlflow_end_run() + end_time_upper_bound <- Sys.time() + ended_run <- mlflow_get_run(run_id = run_id) + run_start_time <- ended_run$start_time + run_end_time <- ended_run$end_time + expect_true(difftime(run_start_time, start_time_lower_bound) >= 0) + expect_true(difftime(run_end_time, end_time_upper_bound) <= 0) + metric_history <- mlflow_get_metric_history("mse", ended_run$run_uuid) + expect_identical(metric_history$key, c("mse", "mse")) + expect_identical(metric_history$value, c(24, 25)) + expect_identical(metric_history$step, c(0, 0)) + expect_true(all(difftime(metric_history$timestamp, run_start_time) >= 0)) + expect_true(all(difftime(metric_history$timestamp, run_end_time) <= 0)) + + expect_error( + mlflow_get_run(), + "`run_id` must be specified when there is no active run\\." + ) +}) + +test_that("mlflow_log_metric() rounds step and timestamp inputs", { + mlflow_clear_test_dir("mlruns") + mlflow_start_run() + + step_inputs <- runif(10, min = -20, max = 100) + for (step_input in step_inputs) { + mlflow_log_metric(key = "step_metric", + value = runif(1), + step = step_input, + timestamp = 100) + } + expect_setequal( + mlflow_get_metric_history("step_metric")$step, + round(step_inputs) + ) + + timestamp_inputs <- runif(10, 1000, 100000) + for (timestamp_input in timestamp_inputs) { + mlflow_log_metric(key = "timestamp_metric", + value = runif(1), + step = 0, + timestamp = timestamp_input) + } + expect_setequal( + mlflow_get_metric_history("timestamp_metric")$timestamp, + purrr::map(round(timestamp_inputs), mlflow:::milliseconds_to_date) + ) +}) + +test_that("mlflow_log_metric() with step produces expected metric data", { + mlflow_clear_test_dir("mlruns") + mlflow_start_run() + + metric_key_1 <- "test_metric_1" + mlflow_log_metric(key = metric_key_1, + value = 1.2, + step = -2, + timestamp = 300) + mlflow_log_metric(key = metric_key_1, + value = 137.4, + timestamp = 100) + mlflow_log_metric(key = metric_key_1, + value = -20, + timestamp = 200) + + metric_key_2 <- "test_metric_2" + mlflow_log_metric(key = metric_key_2, + value = 14, + step = 120) + mlflow_log_metric(key = metric_key_2, + value = 56, + step = 137) + mlflow_log_metric(key = metric_key_2, + value = 20, + step = -5) + + run <- mlflow_get_run() + metrics <- run$metrics[[1]] + expect_setequal( + metrics$key, + c("test_metric_1", "test_metric_2") + ) + expect_setequal( + metrics$value, + c(-20, 56) + ) + expect_setequal( + metrics$step, + c(0, 137) + ) + + metric_history_1 <- mlflow_get_metric_history("test_metric_1") + expect_setequal( + metric_history_1$value, + c(1.2, 137.4, -20) + ) + expect_setequal( + metric_history_1$timestamp, + purrr::map(c(300, 100, 200), mlflow:::milliseconds_to_date) + ) + expect_setequal( + metric_history_1$step, + c(-2, 0, 0) + ) + + metric_history_2 <- mlflow_get_metric_history("test_metric_2") + expect_setequal( + metric_history_2$value, + c(14, 56, 20) + ) + expect_setequal( + metric_history_2$step, + c(120, 137, -5) + ) +}) + +test_that("mlflow_end_run() behavior", { + mlflow_clear_test_dir("mlruns") + expect_error( + mlflow_end_run(), + "There is no active run to end\\." + ) + + run <- mlflow_start_run() + run_id <- mlflow_id(run) + mlflow_end_run(run_id = run_id) + expect_error( + mlflow_get_run(), + "`run_id` must be specified when there is no active run\\." + ) + + run <- mlflow_start_run() + run_id <- mlflow_id(run) + client <- mlflow_client() + expect_error( + mlflow_end_run(client = client), + "`run_id` must be specified when `client` is specified\\." + ) + mlflow_end_run(client = client, run_id = run_id) + expect_error( + mlflow_get_run(), + "`run_id` must be specified when there is no active run\\." + ) + + mlflow_start_run() + run <- mlflow_end_run(status = "KILLED") + expect_identical( + run$status, + "KILLED" + ) +}) + +test_that("with() errors when not passed active run", { + mlflow_clear_test_dir("mlruns") + client <- mlflow_client() + mlflow_set_experiment("exp1") + run <- mlflow_start_run(client = client) + expect_error( + with(run, { + mlflow_log_metric("mse", 25) + }), + # TODO: somehow this isn't matching "`with()` should only be used with `mlflow_start_run()`\\." + NULL + ) +}) + +test_that("mlflow_search_runs() works", { + mlflow_clear_test_dir("mlruns") + with(mlflow_start_run(), { + mlflow_log_metric("test", 10) + }) + with(mlflow_start_run(), { + mlflow_log_metric("test", 20) + }) + expect_equal(nrow(mlflow_search_runs(experiment_ids = list("0"))), 2) + expect_equal(nrow(mlflow_search_runs(experiment_ids = "0")), 2) + expect_equal(nrow(mlflow_search_runs(filter = "metrics.test > 10", experiment_ids = list("0"))), 1) + expect_equal(nrow(mlflow_search_runs(filter = "metrics.test < 20", experiment_ids = list("0"))), 1) + expect_equal(nrow(mlflow_search_runs(filter = "metrics.test > 20", experiment_ids = list("0"))), 0) + + search <- mlflow_search_runs(order_by = "metrics.test", experiment_ids = list("0")) + expect_equal(search$metrics[[1]]$value[1], 10) + expect_equal(search$metrics[[2]]$value[1], 20) + + search <- mlflow_search_runs(order_by = list("metrics.test DESC"), experiment_ids = list("0")) + expect_equal(search$metrics[[1]]$value[1], 20) + expect_equal(search$metrics[[2]]$value[1], 10) + + mlflow_set_experiment("new-experiment") + expect_equal(nrow(mlflow_search_runs()), 0) + with(mlflow_start_run(), { + mlflow_log_metric("new_experiment_metric", 30) + }) + expect_equal(nrow(mlflow_search_runs(filter = "metrics.new_experiment_metric = 30")), 1) +}) + +test_that("mlflow_list_run_infos() works", { + mlflow_clear_test_dir("mlruns") + expect_equal(nrow(mlflow_list_run_infos(experiment_id = "0")), 0) + with(mlflow_start_run(), { + mlflow_log_metric("test", 10) + }) + expect_equal(nrow(mlflow_list_run_infos(experiment_id = "0")), 1) + mlflow_set_experiment("new-experiment") + expect_equal(nrow(mlflow_list_run_infos()), 0) + with(mlflow_start_run(), { + mlflow_log_metric("new_experiment_metric", 20) + }) + expect_equal(nrow(mlflow_list_run_infos()), 1) +}) + +test_that("mlflow_log_batch() works", { + mlflow_clear_test_dir("mlruns") + mlflow_start_run() + mlflow_log_batch( + metrics = data.frame(key = c("mse", "mse", "rmse", "rmse"), + value = c(21, 23, 42, 36), + timestamp = c(100, 200, 300, 300), + step = c(-4, 1, 7, 3)), + params = data.frame(key = c("l1", "optimizer"), value = c(0.01, "adam")), + tags = data.frame(key = c("model_type", "data_year"), + value = c("regression", "2015")) + ) + + run <- mlflow_get_run() + metrics <- run$metrics[[1]] + params <- run$params[[1]] + tags <- run$tags[[1]] + + expect_setequal( + metrics$key, + c("mse", "rmse") + ) + expect_setequal( + metrics$value, + c(23, 42) + ) + expect_setequal( + metrics$timestamp, + purrr::map(c(200, 300), mlflow:::milliseconds_to_date) + ) + expect_setequal( + metrics$step, + c(1, 7) + ) + + metric_history <- mlflow_get_metric_history("mse") + expect_setequal( + metric_history$value, + c(21, 23) + ) + expect_setequal( + metric_history$timestamp, + purrr::map(c(100, 200), mlflow:::milliseconds_to_date) + ) + expect_setequal( + metric_history$step, + c(-4, 1) + ) + + expect_setequal( + params$key, + c("optimizer", "l1") + ) + + expect_setequal( + params$value, + c("adam", "0.01") + ) + + expect_identical("regression", tags$value[tags$key == "model_type"]) + expect_identical("2015", tags$value[tags$key == "data_year"]) +}) + +test_that("mlflow_log_batch() throws for mismatched input data columns", { + mlflow_clear_test_dir("mlruns") + mlflow_start_run() + error_text_regexp <- "*input dataframe must contain exactly the following columns*" + + expect_error( + mlflow_log_batch( + metrics = data.frame(key = c("mse"), value = c(10)) + ), + regexp = error_text_regexp + ) + expect_error( + mlflow_log_batch( + metrics = data.frame(bad_column = c("bad")) + ), + regexp = error_text_regexp + ) + expect_error( + mlflow_log_batch( + metrics = data.frame( + key = c("mse"), + value = c(10), + timestamp = c(100), + step = c(1), + bad_column = c("bad") + ) + ), + regexp = error_text_regexp + ) + + expect_error( + mlflow_log_batch( + params = data.frame(key = c("alpha")) + ), + regexp = error_text_regexp + ) + expect_error( + mlflow_log_batch( + params = data.frame(bad_column = c("bad")) + ), + regexp = error_text_regexp + ) + expect_error( + mlflow_log_batch( + params = data.frame( + key = c("alpha"), + value = c(10), + bad_column = c("bad") + ) + ), + regexp = error_text_regexp + ) + + expect_error( + mlflow_log_batch( + tags = data.frame(key = c("my_tag")) + ), + regexp = error_text_regexp + ) + expect_error( + mlflow_log_batch( + tags = data.frame(bad_column = c("bad")) + ), + regexp = error_text_regexp + ) + expect_error( + mlflow_log_batch( + tags = data.frame( + key = c("my_tag"), + value = c("some tag info"), + bad_column = c("bad") + ) + ), + regexp = error_text_regexp + ) + + expect_error( + mlflow_log_batch( + metrics = data.frame( + bad_column = c("bad1") + ), + params = data.frame( + another_bad_column = c("bad2") + ), + tags = data.frame( + one_more_bad_column = c("bad3") + ) + ), + regexp = error_text_regexp + ) +}) + +test_that("mlflow_log_batch() throws for missing entries", { + mlflow_clear_test_dir("mlruns") + mlflow_start_run() + error_text_regexp <- "*input dataframe contains a missing \\('NA'\\) entry*" + + expect_error( + mlflow_log_batch( + metrics = data.frame(key = c("mse", "rmse", "na_metric"), + value = c(21, 42, NA), + timestamp = c(100, 200, 300), + step = c(-4, 1, 3)) + ), + regexp = error_text_regexp + ) + + expect_error( + mlflow_log_batch( + metrics = data.frame(key = c("mse", "rmse", "na_metric"), + value = c(21, 42, 9.2), + timestamp = c(NA, 200, 300), + step = c(-4, 1, NA)) + ), + regexp = error_text_regexp + ) + + expect_error( + mlflow_log_batch( + params = data.frame(key = c(NA, "alpha"), + value = c("0.5", "2")) + ), + regexp = error_text_regexp + ) + expect_error( + mlflow_log_batch( + params = data.frame(key = c("n_layers", "alpha"), + value = c("4", NA)) + ), + regexp = error_text_regexp + ) + + expect_error( + mlflow_log_batch( + tags = data.frame(key = c("first_tag", NA), + value = c("some tag info", "more tag info")) + ), + regexp = error_text_regexp + ) + expect_error( + mlflow_log_batch( + tags = data.frame(key = c("first_tag", "second_tag"), + value = c(NA, NA)) + ), + regexp = error_text_regexp + ) + + expect_error( + mlflow_log_batch( + metrics = data.frame(key = c("mse", "rmse", "na_metric"), + value = c(21, 42, 37), + timestamp = c(100, 200, 300), + step = c(-4, NA, 3)), + params = data.frame(key = c("l1", "optimizer"), value = c(NA, "adam")), + tags = data.frame(key = c(NA, "data_year"), + value = c("regression", NA)) + ), + regexp = error_text_regexp + ) +}) diff --git a/mlflow/R/mlflow/tests/testthat/test-tracking.R b/mlflow/R/mlflow/tests/testthat/test-tracking.R deleted file mode 100644 index 705bec2678d1f..0000000000000 --- a/mlflow/R/mlflow/tests/testthat/test-tracking.R +++ /dev/null @@ -1,80 +0,0 @@ -context("Tracking") - -test_that("mlflow_client_create_experiment() works properly", { - mlflow_clear_test_dir("mlruns") - client <- mlflow_client() - experiment_id <- mlflow_client_create_experiment(client = client, "exp_name", "art_loc") - experiment <- mlflow_client_get_experiment(client = client, experiment_id) - expect_identical(experiment$experiment$name, "exp_name") - expect_identical(experiment$experiment$artifact_location, "art_loc") -}) - -test_that("mlflow_client_list_experiments() works properly", { - mlflow_clear_test_dir("mlruns") - client <- mlflow_client() - mlflow_client_create_experiment(client = client, "foo1", "art_loc1") - mlflow_client_create_experiment(client = client, "foo2", "art_loc2") - experiments_list <- mlflow_client_list_experiments(client = client) - expect_setequal(experiments_list$experiment_id, c("0", "1", "2")) - expect_setequal(experiments_list$name, c("Default", "foo1", "foo2")) - expect_setequal(experiments_list$artifact_location, c("mlruns/0", "art_loc1", "art_loc2")) -}) - -test_that("mlflow_client_get_experiment() works properly", { - mlflow_clear_test_dir("mlruns") - client <- mlflow_client() - experiment_id <- mlflow_client_create_experiment(client = client, "foo1", "art_loc1") - experiment <- mlflow_client_get_experiment(client = client, experiment_id) - expect_identical(experiment$experiment$experiment_id, experiment_id) - expect_identical(experiment$experiment$name, "foo1") - expect_identical(experiment$experiment$artifact_location, "art_loc1") -}) - -test_that("mlflow_client_create_run()/mlflow_client_get_run() work properly", { - mlflow_clear_test_dir("mlruns") - client <- mlflow_client() - create_run_response <- mlflow_client_create_run( - client = client, - experiment_id = "0", - user_id = "user1", - run_name = "run1", - tags = list(foo = "bar", foz = "baz") - ) - - run <- mlflow_client_get_run(client = client, create_run_response$info$run_uuid) - run_info <- run$info - - expect_identical(run_info$user_id, "user1") - - actual_tags <- run$data$tags %>% - unname() %>% - purrr::transpose() %>% - purrr::map(purrr::flatten_chr) - expected_tags <- list(c("foo", "bar"), c("foz", "baz")) - - expect_true(all(expected_tags %in% actual_tags)) -}) - -test_that("mlflow_client_set_teminated() works properly", { - mlflow_clear_test_dir("mlruns") - mlflow_start_run() - killed_time <- mlflow:::current_time() - client <- mlflow_client() - run_info <- mlflow_client_set_terminated( - client = client, run_id = mlflow_active_run()$info$run_uuid, - status = "KILLED", end_time = killed_time - ) - expect_identical(run_info$status, "KILLED") - expect_identical(run_info$end_time, as.POSIXct(as.double(c(killed_time)) / 1000, origin = "1970-01-01")) -}) - -test_that("mlflow_create_experiment() works properly", { - experiment <- mlflow_create_experiment("test") - expect_gt(as.numeric(experiment), 0) -}) - -test_that("mlflow_set_tag() should return NULL invisibly", { - mlflow_clear_test_dir("mlruns") - value <- mlflow_set_tag("foo", "bar") - expect_null(value) -}) diff --git a/mlflow/R/mlflow/tests/testthat/test_keras_model.R b/mlflow/R/mlflow/tests/testthat/test_keras_model.R index 82e96a4562eab..5e25853d52d77 100644 --- a/mlflow/R/mlflow/tests/testthat/test_keras_model.R +++ b/mlflow/R/mlflow/tests/testthat/test_keras_model.R @@ -3,6 +3,7 @@ context("Model") library("keras") test_that("mlflow can save keras model ", { + PATH <- Sys.getenv("PATH", "") # keras package modifies PATH which breaks other tests mlflow_clear_test_dir("model") model <- keras_model_sequential() %>% layer_dense(units = 8, activation = "relu", input_shape = dim(iris)[2] - 1) %>% @@ -17,14 +18,11 @@ test_that("mlflow can save keras model ", { model %>% fit(train_x, train_y, epochs = 1) model %>% mlflow_save_model("model") expect_true(dir.exists("model")) - detach("package:keras", unload = TRUE) - model_reloaded <- mlflow_load_model("model") - expect_equal( predict(model, train_x), predict(model_reloaded, train_x), - mlflow_predict_flavor(model_reloaded, iris[, 1:4]) - ) + mlflow_predict(model_reloaded, iris[, 1:4])) + Sys.setenv(PATH = PATH) }) diff --git a/mlflow/__init__.py b/mlflow/__init__.py index 1238d29cbffcb..bcb47b011dc92 100644 --- a/mlflow/__init__.py +++ b/mlflow/__init__.py @@ -27,8 +27,6 @@ from mlflow.version import VERSION as __version__ -import os - # Filter annoying Cython warnings that serve no good purpose, and so before # importing other modules. # See: https://github.com/numpy/numpy/pull/432/commits/170ed4e33d6196d7 @@ -40,26 +38,34 @@ import mlflow.projects as projects # noqa import mlflow.tracking as tracking # noqa import mlflow.tracking.fluent +from mlflow.utils.logging_utils import _configure_mlflow_loggers + +_configure_mlflow_loggers(root_module_name=__name__) ActiveRun = mlflow.tracking.fluent.ActiveRun log_param = mlflow.tracking.fluent.log_param log_metric = mlflow.tracking.fluent.log_metric set_tag = mlflow.tracking.fluent.set_tag +delete_tag = mlflow.tracking.fluent.delete_tag log_artifacts = mlflow.tracking.fluent.log_artifacts log_artifact = mlflow.tracking.fluent.log_artifact active_run = mlflow.tracking.fluent.active_run start_run = mlflow.tracking.fluent.start_run end_run = mlflow.tracking.fluent.end_run +search_runs = mlflow.tracking.fluent.search_runs get_artifact_uri = mlflow.tracking.fluent.get_artifact_uri set_tracking_uri = tracking.set_tracking_uri get_tracking_uri = tracking.get_tracking_uri create_experiment = mlflow.tracking.fluent.create_experiment set_experiment = mlflow.tracking.fluent.set_experiment +log_params = mlflow.tracking.fluent.log_params +log_metrics = mlflow.tracking.fluent.log_metrics +set_tags = mlflow.tracking.fluent.set_tags run = projects.run __all__ = ["ActiveRun", "log_param", "log_metric", "set_tag", "log_artifacts", "log_artifact", - "active_run", "start_run", "end_run", "get_artifact_uri", "set_tracking_uri", - "create_experiment", "set_experiment", "run"] + "active_run", "start_run", "end_run", "search_runs", "get_artifact_uri", + "set_tracking_uri", "create_experiment", "set_experiment", "run"] diff --git a/mlflow/azureml/__init__.py b/mlflow/azureml/__init__.py index b8752586e4537..4cb1b042975e8 100644 --- a/mlflow/azureml/__init__.py +++ b/mlflow/azureml/__init__.py @@ -4,139 +4,327 @@ """ from __future__ import print_function - +import sys import os import shutil +import tempfile +import logging + +from distutils.version import StrictVersion import mlflow from mlflow import pyfunc +from mlflow.exceptions import MlflowException from mlflow.models import Model -from mlflow.tracking.utils import _get_model_log_dir -from mlflow.utils.logging_utils import eprint -from mlflow.utils.file_utils import TempDir +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils import PYTHON_VERSION, experimental, get_unique_resource_id +from mlflow.utils.file_utils import TempDir, _copy_file_or_tree, _copy_project from mlflow.version import VERSION as mlflow_version -def deploy(app_name, model_path, run_id=None, mlflow_home=None): - """ - Deploy an MLflow model to Azure Machine Learning. - - NOTE: +_logger = logging.getLogger(__name__) - - This command must be called from a console launched from Azure Machine Learning Workbench. - Caller is reponsible for setting up Azure Machine Learning environment and accounts. - - Azure Machine Learning cannot handle any Conda environment. In particular the Python - version is fixed. If the model contains Conda environment and it has been trained outside - of Azure Machine Learning, the Conda environment might need to be edited to work with - Azure Machine Learning. - - :param app_name: Name of the deployed application. - :param model_path: Local or MLflow-run-relative path to the model to be deployed. - :param run_id: MLflow run ID. - :param mlflow_home: Directory containing checkout of the MLflow GitHub project or - current directory if not specified. +@experimental +def build_image(model_uri, workspace, image_name=None, model_name=None, + mlflow_home=None, description=None, tags=None, synchronous=True): """ - if run_id: - model_path = _get_model_log_dir(model_path, run_id) - model_path = os.path.abspath(model_path) - with TempDir(chdr=True, remove_on_exit=True): - exec_str = _export(app_name, model_path, mlflow_home=mlflow_home) - eprint("executing", '"{}"'.format(exec_str)) - # Use os.system instead of subprocess due to the fact that currently all azureml commands - # have to be called within the same shell (launched from azureml workbench app by the user). - # We can change this once there is a python api (or general cli) available. - os.system(exec_str) - - -def export(output, model_path, run_id=None, mlflow_home=None): + Register an MLflow model with Azure ML and build an Azure ML ContainerImage for deployment. + The resulting image can be deployed as a web service to Azure Container Instances (ACI) or + Azure Kubernetes Service (AKS). + + The resulting Azure ML ContainerImage will contain a webserver that processes model queries. + For information about the input data formats accepted by this webserver, see the + :ref:`MLflow deployment tools documentation `. + + :param model_uri: The location, in URI format, of the MLflow model used to build the Azure + ML deployment image. For example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. + + :param image_name: The name to assign the Azure Container Image that will be created. If + unspecified, a unique image name will be generated. + :param model_name: The name to assign the Azure Model will be created. If unspecified, + a unique model name will be generated. + :param workspace: The AzureML workspace in which to build the image. This is a + `azureml.core.Workspace` object. + :param mlflow_home: Path to a local copy of the MLflow GitHub repository. If specified, the + image will install MLflow from this directory. Otherwise, it will install + MLflow from pip. + :param description: A string description to associate with the Azure Container Image and the + Azure Model that will be created. For more information, see + ``_ and + ``_. + :param tags: A collection of tags, represented as a dictionary of string key-value pairs, to + associate with the Azure Container Image and the Azure Model that will be created. + These tags are added to a set of default tags that include the model uri, + and more. For more information, see + ``_ and + ``_. + :param synchronous: If ``True``, this method blocks until the image creation procedure + terminates before returning. If ``False``, the method returns immediately, + but the returned image will not be available until the asynchronous + creation process completes. Use the + ``azureml.core.Image.wait_for_creation()`` function to wait for the creation + process to complete. + :return: A tuple containing the following elements in order: + - An ``azureml.core.image.ContainerImage`` object containing metadata for the new image. + - An ``azureml.core.model.Model`` object containing metadata for the new model. + + >>> import mlflow.azureml + >>> from azureml.core import Workspace + >>> from azureml.core.webservice import AciWebservice, Webservice + >>> + >>> # Load or create an Azure ML Workspace + >>> workspace_name = "" + >>> subscription_id = "" + >>> resource_group = "" + >>> location = "" + >>> azure_workspace = Workspace.create(name=workspace_name, + >>> subscription_id=subscription_id, + >>> resource_group=resource_group, + >>> location=location, + >>> create_resource_group=True, + >>> exist_okay=True) + >>> + >>> # Build an Azure ML Container Image for an MLflow model + >>> azure_image, azure_model = mlflow.azureml.build_image( + >>> model_uri="", + >>> workspace=azure_workspace, + >>> synchronous=True) + >>> # If your image build failed, you can access build logs at the following URI: + >>> print("Access the following URI for build logs: {}".format(azure_image.image_build_log_uri)) + >>> + >>> # Deploy the image to Azure Container Instances (ACI) for real-time serving + >>> webservice_deployment_config = AciWebservice.deploy_configuration() + >>> webservice = Webservice.deploy_from_image( + >>> image=azure_image, workspace=azure_workspace, name="") + >>> webservice.wait_for_deployment() """ - Export an MLflow model with everything needed to deploy on Azure Machine Learning. - Output includes sh script with command to deploy the generated model to Azure Machine Learning. - - NOTE: - - - This command does not need an Azure Machine Learning environment to run. + # The Azure ML SDK is only compatible with Python 3. However, the `mlflow.azureml` module should + # still be accessible for import from Python 2. Therefore, we will only import from the SDK + # upon method invocation. + # pylint: disable=import-error + from azureml.core.image import ContainerImage + from azureml.core.model import Model as AzureModel + + absolute_model_path = _download_artifact_from_uri(model_uri) + + model_pyfunc_conf = _load_pyfunc_conf(model_path=absolute_model_path) + model_python_version = model_pyfunc_conf.get(pyfunc.PY_VERSION, None) + if model_python_version is not None and\ + StrictVersion(model_python_version) < StrictVersion("3.0.0"): + raise MlflowException( + message=("Azure ML can only deploy models trained in Python 3 and above. See" + " the following MLflow GitHub issue for a thorough explanation of this" + " limitation and a workaround to enable support for deploying models" + " trained in Python 2: https://github.com/mlflow/mlflow/issues/668"), + error_code=INVALID_PARAMETER_VALUE) + + tags = _build_tags(model_uri=model_uri, model_python_version=model_python_version, + user_tags=tags) + + if image_name is None: + image_name = _get_mlflow_azure_resource_name() + if model_name is None: + model_name = _get_mlflow_azure_resource_name() + + with TempDir(chdr=True) as tmp: + model_directory_path = tmp.path("model") + tmp_model_path = os.path.join( + model_directory_path, + _copy_file_or_tree(src=absolute_model_path, dst=model_directory_path)) + + registered_model = AzureModel.register(workspace=workspace, model_path=tmp_model_path, + model_name=model_name, tags=tags, + description=description) + _logger.info("Registered an Azure Model with name: `%s` and version: `%s`", + registered_model.name, registered_model.version) + + # Create an execution script (entry point) for the image's model server. Azure ML requires + # the container's execution script to be located in the current working directory during + # image creation, so we create the execution script as a temporary file in the current + # working directory. + execution_script_path = tmp.path("execution_script.py") + _create_execution_script(output_path=execution_script_path, azure_model=registered_model) + # Azure ML copies the execution script into the image's application root directory by + # prepending "/var/azureml-app" to the specified script path. The script is then executed + # by referencing its path relative to the "/var/azureml-app" directory. Unfortunately, + # if the script path is an absolute path, Azure ML attempts to reference it directly, + # resulting in a failure. To circumvent this problem, we provide Azure ML with the relative + # script path. Because the execution script was created in the current working directory, + # this relative path is the script path's base name. + execution_script_path = os.path.basename(execution_script_path) + + if mlflow_home is not None: + _logger.info( + "Copying the specified mlflow_home directory: `%s` to a temporary location for" + " container creation", + mlflow_home) + mlflow_home = os.path.join(tmp.path(), + _copy_project(src_path=mlflow_home, dst_path=tmp.path())) + image_file_dependencies = [mlflow_home] + else: + image_file_dependencies = None + dockerfile_path = tmp.path("Dockerfile") + _create_dockerfile(output_path=dockerfile_path, mlflow_path=mlflow_home) + + conda_env_path = None + if pyfunc.ENV in model_pyfunc_conf: + conda_env_path = os.path.join(tmp_model_path, model_pyfunc_conf[pyfunc.ENV]) + + image_configuration = ContainerImage.image_configuration( + execution_script=execution_script_path, + runtime="python", + docker_file=dockerfile_path, + dependencies=image_file_dependencies, + conda_file=conda_env_path, + description=description, + tags=tags, + ) + image = ContainerImage.create(workspace=workspace, + name=image_name, + image_config=image_configuration, + models=[registered_model]) + _logger.info("Building an Azure Container Image with name: `%s` and version: `%s`", + image.name, image.version) + if synchronous: + image.wait_for_creation(show_output=True) + return image, registered_model + + +def _build_tags(model_uri, model_python_version=None, user_tags=None): + """ + :param model_uri: URI to the MLflow model. + :param model_python_version: The version of Python that was used to train the model, if + the model was trained in Python. + :param user_tags: A collection of user-specified tags to append to the set of default tags. + """ + tags = dict(user_tags) if user_tags is not None else {} + tags["model_uri"] = model_uri + if model_python_version is not None: + tags["python_version"] = model_python_version + return tags - - Azure Machine Learning cannot handle any Conda environment. In particular the Python - version is fixed. If the model contains Conda environment and it has been trained outside - of Azure Machine Learning, the Conda environment might need to be edited to work with - Azure Machine Learning. - :param output: Output folder where the model is going to be exported to. - :param model_path: Local or MLflow run relative path to the model to be exported. - :param run_id: MLflow run ID. - :param mlflow_home: Directory containing checkout of the MLflow GitHub project or - current directory if not specified. +def _create_execution_script(output_path, azure_model): """ - output = os.path.abspath(output) - if os.path.exists(output): - raise Exception("output folder {} already exists".format(output)) - os.mkdir(output) - if run_id: - model_path = _get_model_log_dir(model_path, run_id) - model_path = os.path.abspath(model_path) - curr_dir = os.path.abspath(os.getcwd()) - os.chdir(output) - try: - exec_str = _export("$1", model_path, mlflow_home=mlflow_home) - with open("create_service.sh", "w") as f: - f.write("\n".join(["#! /bin/sh", "cd {}".format(output), exec_str, ""])) - finally: - os.chdir(curr_dir) + Creates an Azure-compatibele execution script (entry point) for a model server backed by + the specified model. This script is created as a temporary file in the current working + directory. + :param output_path: The path where the execution script will be written. + :param azure_model: The Azure Model that the execution script will load for inference. + :return: A reference to the temporary file containing the execution script. + """ + execution_script_text = SCORE_SRC.format( + model_name=azure_model.name, model_version=azure_model.version) -def _export(app_name, model_path, mlflow_home): - conf = _load_conf(model_path) - score_py = "score.py" # NOTE: azure ml requires the main module to be in the current directory - - with open(score_py, "w") as f: - f.write(SCORE_SRC) + with open(output_path, "w") as f: + f.write(execution_script_text) - deps = "" - mlflow_dep = "mlflow=={}".format(mlflow_version) - if mlflow_home: - eprint("MLFLOW_HOME =", mlflow_home) - # copy current version of mlflow - mlflow_dir = mlflow.utils.file_utils._copy_project(src_path=mlflow_home, dst_path="./") - deps = "-d {}".format(mlflow_dir) - mlflow_dep = "-e /var/azureml-app/{}".format(mlflow_dir) +def _create_dockerfile(output_path, mlflow_path=None): + """ + Creates a Dockerfile containing additional Docker build steps to execute + when building the Azure container image. These build steps perform the following tasks: - with open("requirements.txt", "w") as f: - f.write(mlflow_dep + "\n") + - Install MLflow - shutil.copytree(src=model_path, dst="model") + :param output_path: The path where the Dockerfile will be written. + :param mlflow_path: Path to a local copy of the MLflow GitHub repository. If specified, the + Dockerfile command for MLflow installation will install MLflow from this + directory. Otherwise, it will install MLflow from pip. + """ + docker_cmds = ["RUN pip install azureml-sdk"] + + if mlflow_path is not None: + mlflow_install_cmd = "RUN pip install -e {mlflow_path}".format( + mlflow_path=_get_container_path(mlflow_path)) + elif not mlflow_version.endswith("dev"): + mlflow_install_cmd = "RUN pip install mlflow=={mlflow_version}".format( + mlflow_version=mlflow_version) + else: + raise MlflowException( + "You are running a 'dev' version of MLflow: `{mlflow_version}` that cannot be" + " installed from pip. In order to build a container image, either specify the" + " path to a local copy of the MLflow GitHub repository using the `mlflow_home`" + " parameter or install a release version of MLflow from pip".format( + mlflow_version=mlflow_version)) + docker_cmds.append(mlflow_install_cmd) + + with open(output_path, "w") as f: + f.write("\n".join(docker_cmds)) + + +def _get_container_path(local_path): + """ + Given a local path to a resource, obtains the path at which this resource will exist + when it is copied into the Azure ML container image. + """ + if local_path.startswith("/"): + local_path = local_path[1:] + return os.path.join("/var/azureml-app", local_path) - env = "-c {}".format(os.path.join("model", conf[pyfunc.ENV])) \ - if pyfunc.ENV in conf else "" - cmd = "az ml service create realtime -n {name} " + \ - "--model-file model -f score.py {conda_env} {deps} -r python -p requirements.txt" - return cmd.format(name=app_name, conda_env=env, deps=deps) +def _load_pyfunc_conf(model_path): + """ + Loads the `python_function` flavor configuration for the specified model or throws an exception + if the model does not contain the `python_function` flavor. -def _load_conf(path): - path = os.path.abspath(path) - model = Model.load(os.path.join(path, "MLmodel")) + :param model_path: The absolute path to the model. + :return: The model's `python_function` flavor configuration. + """ + model_path = os.path.abspath(model_path) + model = Model.load(os.path.join(model_path, "MLmodel")) if pyfunc.FLAVOR_NAME not in model.flavors: - raise Exception("Supports only pyfunc format.") + raise MlflowException( + message=("The specified model does not contain the `python_function` flavor. This " + " flavor is required for model deployment required for model deployment."), + error_code=INVALID_PARAMETER_VALUE) return model.flavors[pyfunc.FLAVOR_NAME] +def _get_mlflow_azure_resource_name(): + """ + :return: A unique name for an Azure resource indicating that the resource was created by + MLflow + """ + azureml_max_resource_length = 32 + resource_prefix = "mlflow-" + unique_id = get_unique_resource_id( + max_length=(azureml_max_resource_length - len(resource_prefix))) + return resource_prefix + unique_id + + SCORE_SRC = """ import pandas as pd +from azureml.core.model import Model from mlflow.pyfunc import load_pyfunc -from mlflow.utils import get_jsonable_obj +from mlflow.pyfunc.scoring_server import parse_json_input, _get_jsonable_obj def init(): global model - model = load_pyfunc("model") + model_path = Model.get_model_path(model_name="{model_name}", version={model_version}) + model = load_pyfunc(model_path) -def run(s): - input_df = pd.read_json(s, orient="records") - return get_jsonable_obj(model.predict(input_df)) +def run(json_input): + input_df = parse_json_input(json_input=json_input, orient="split") + return _get_jsonable_obj(model.predict(input_df), pandas_orient="records") """ diff --git a/mlflow/azureml/cli.py b/mlflow/azureml/cli.py index 9d327d6931c9f..cde662bddf7af 100644 --- a/mlflow/azureml/cli.py +++ b/mlflow/azureml/cli.py @@ -3,20 +3,18 @@ """ from __future__ import print_function -import os +import json import click -import mlflow import mlflow.azureml - -from mlflow.utils import cli_args +from mlflow.utils import cli_args, experimental @click.group("azureml") def commands(): """ - Serve models on Azure ML. + Serve models on Azure ML. **These commands require that MLflow be installed with Python 3.** To serve a model associated with a run on a tracking server, set the MLFLOW_TRACKING_URI environment variable to the URL of the desired server. @@ -24,42 +22,49 @@ def commands(): pass -@commands.command("deploy") -@click.option("--app-name", "-n", default=None, - help="The application name under which should this model be deployed. " - "Translates to service name on Azure ML", required=True) -@cli_args.MODEL_PATH -@cli_args.RUN_ID +@commands.command("build-image") +@cli_args.MODEL_URI +@click.option("--workspace-name", "-w", required=True, + help="The name of the Azure Workspace in which to build the image.") +@click.option("--subscription-id", "-s", default=None, + help=("The subscription id associated with the Azure Workspace in which to build" + " the image")) +@click.option("--image-name", "-i", default=None, + help=("The name to assign the Azure Container Image that is created. If unspecified," + " a unique image name will be generated.")) +@click.option("--model-name", "-n", default=None, + help=("The name to assign the Azure Model that is created. If unspecified," + " a unique image name will be generated.")) @cli_args.MLFLOW_HOME -def deploy(app_name, model_path, run_id, mlflow_home): - """Deploy MLflow model to Azure ML. - - NOTE: This command must be run from console launched from Azure ML Workbench. - Caller is reponsible for setting up Azure ML environment and accounts. - - NOTE: Azure ML cannot handle any Conda environment. In particular the Python version is - fixed. If the model contains Conda environment and it has been trained outside of Azure - ML, the Conda environment might need to be edited to work with Azure ML. +@click.option("--description", "-d", default=None, + help=("A string description to associate with the Azure Container Image and the" + " Azure Model that are created.")) +@click.option("--tags", "-t", default=None, + help=("A collection of tags, represented as a JSON-formatted dictionary of string" + " key-value pairs, to associate with the Azure Container Image and the Azure" + " Model that are created. These tags are added to a set of default tags" + " that include the model path, the model run id (if specified), and more.")) +@experimental +def build_image(model_uri, workspace_name, subscription_id, image_name, model_name, + mlflow_home, description, tags): """ - mlflow.azureml.deploy(app_name=app_name, model_path=model_path, run_id=run_id, - mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None) - - -@commands.command("export") -@click.option("--output", "-o", default=None, help="Output directory.", required=True) -@cli_args.MODEL_PATH -@cli_args.RUN_ID -@cli_args.MLFLOW_HOME -def export(output, model_path, run_id, mlflow_home): - """Export MLflow model as Azure ML compatible model ready to be deployed. - - Export MLflow model with everything needed to deploy on Azure ML. - Output includes sh script with command to deploy the generated model to Azure ML. - - NOTE: This command does not need Azure ML environment to run. + Register an MLflow model with Azure ML and build an Azure ML ContainerImage for deployment. + The resulting image can be deployed as a web service to Azure Container Instances (ACI) or + Azure Kubernetes Service (AKS). - NOTE: Azure ML can not handle any Conda environment. If the model contains Conda environment - and it has been trained outside of Azure ML, the Conda environment might need to be edited. + The resulting Azure ML ContainerImage will contain a webserver that processes model queries. + For information about the input data formats accepted by this webserver, see the following + documentation: https://www.mlflow.org/docs/latest/models.html#azureml-deployment. """ - mlflow.azureml.export(output=output, model_path=model_path, run_id=run_id, - mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None) + # The Azure ML SDK is only compatible with Python 3. However, this CLI should still be + # accessible for inspection rom Python 2. Therefore, we will only import from the SDK + # upon command invocation. + # pylint: disable=import-error + from azureml.core import Workspace + + workspace = Workspace.get(name=workspace_name, subscription_id=subscription_id) + if tags is not None: + tags = json.loads(tags) + mlflow.azureml.build_image( + model_uri=model_uri, workspace=workspace, image_name=image_name, model_name=model_name, + mlflow_home=mlflow_home, description=description, tags=tags, synchronous=True) diff --git a/mlflow/cli.py b/mlflow/cli.py index 7bef5be5849b9..c95959038306c 100644 --- a/mlflow/cli.py +++ b/mlflow/cli.py @@ -1,27 +1,36 @@ from __future__ import print_function +import json +import os import sys +import logging import click from click import UsageError import mlflow.azureml.cli import mlflow.projects as projects -import mlflow.sklearn import mlflow.data import mlflow.experiments -import mlflow.pyfunc.cli -import mlflow.rfunc.cli +import mlflow.models.cli + import mlflow.sagemaker.cli +import mlflow.runs +import mlflow.store.db.utils +import mlflow.db -from mlflow.entities.experiment import Experiment -from mlflow.utils.process import ShellCommandException +from mlflow.tracking.utils import _is_local_uri from mlflow.utils.logging_utils import eprint +from mlflow.utils.process import ShellCommandException from mlflow.utils import cli_args from mlflow.server import _run_server +from mlflow.server.handlers import _get_store +from mlflow.store import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH from mlflow import tracking import mlflow.store.cli +_logger = logging.getLogger(__name__) + @click.group() @click.version_option() @@ -42,41 +51,46 @@ def cli(): help="A parameter for the run, of the form -P name=value. Provided parameters that " "are not in the list of parameters for an entry point will be passed to the " "corresponding entry point as command-line arguments in the form `--name value`") -@click.option("--experiment-id", envvar=tracking._EXPERIMENT_ID_ENV_VAR, type=click.INT, - help="ID of the experiment under which to launch the run. Defaults to %s" % - Experiment.DEFAULT_EXPERIMENT_ID) +@click.option("--experiment-name", envvar=tracking._EXPERIMENT_NAME_ENV_VAR, + help="Name of the experiment under which to launch the run. If not " + "specified, 'experiment-id' option will be used to launch run.") +@click.option("--experiment-id", envvar=tracking._EXPERIMENT_ID_ENV_VAR, type=click.STRING, + help="ID of the experiment under which to launch the run.") # TODO: Add tracking server argument once we have it working. -@click.option("--mode", "-m", metavar="MODE", - help="Execution mode to use for run. Supported values: 'local' (runs project" - "locally) and 'databricks' (runs project on a Databricks cluster)." - "Defaults to 'local'. If running against Databricks, will run against the " - "Databricks workspace specified in the default Databricks CLI profile. " - "See https://github.com/databricks/databricks-cli for more info on configuring " - "a Databricks CLI profile.") -@click.option("--cluster-spec", "-c", metavar="FILE", - help="Path to JSON file describing the cluster to use when launching a run on " - "Databricks. See " - "https://docs.databricks.com/api/latest/jobs.html#jobsclusterspecnewcluster for " - "more info. Note that MLflow runs are currently launched against a new cluster.") -@click.option("--git-username", metavar="USERNAME", envvar="MLFLOW_GIT_USERNAME", - help="Username for HTTP(S) Git authentication.") -@click.option("--git-password", metavar="PASSWORD", envvar="MLFLOW_GIT_PASSWORD", - help="Password for HTTP(S) Git authentication.") +@click.option("--backend", "-b", metavar="BACKEND", + help="Execution backend to use for run. Supported values: 'local' (runs project " + "locally) and 'databricks' (runs project on a Databricks cluster). " + "Defaults to 'local'. If running against Databricks, will run against a " + "Databricks workspace determined as follows: if a Databricks tracking URI " + "of the form 'databricks://profile' has been set (e.g. by setting " + "the MLFLOW_TRACKING_URI environment variable), will run against the " + "workspace specified by . Otherwise, runs against the workspace " + "specified by the default Databricks CLI profile. See " + "https://github.com/databricks/databricks-cli for more info on configuring a " + "Databricks CLI profile.") +@click.option("--backend-config", "-c", metavar="FILE", + help="Path to JSON file (must end in '.json') or JSON string which will be passed " + "as config to the backend. For the Databricks backend, this should be a " + "cluster spec: see " + "https://docs.databricks.com/api/latest/jobs.html#jobsclusterspecnewcluster " + "for more information. Note that MLflow runs are currently launched against " + "a new cluster.") @cli_args.NO_CONDA @click.option("--storage-dir", envvar="MLFLOW_TMP_DIR", - help="Only valid when `mode` is local." + help="Only valid when ``backend`` is local." "MLflow downloads artifacts from distributed URIs passed to parameters of " "type 'path' to subdirectories of storage_dir.") @click.option("--run-id", metavar="RUN_ID", help="If specified, the given run ID will be used instead of creating a new run. " "Note: this argument is used internally by the MLflow project APIs " "and should not be specified.") -def run(uri, entry_point, version, param_list, experiment_id, mode, cluster_spec, git_username, - git_password, no_conda, storage_dir, run_id): +def run(uri, entry_point, version, param_list, experiment_name, experiment_id, backend, + backend_config, no_conda, storage_dir, run_id): """ Run an MLflow project from the given URI. - For local runs, blocks the run completes. Otherwise, runs the project asynchronously. + For local runs, the run will block until it completes. + Otherwise, the project will run asynchronously. If running locally (the default), the URI can be either a Git repository URI or a local path. If running on Databricks, the URI must be a Git repository. @@ -84,61 +98,109 @@ def run(uri, entry_point, version, param_list, experiment_id, mode, cluster_spec By default, Git projects run in a new working directory with the given parameters, while local projects run from the project's root directory. """ + if experiment_id is not None and experiment_name is not None: + eprint("Specify only one of 'experiment-name' or 'experiment-id' options.") + sys.exit(1) + param_dict = {} for s in param_list: index = s.find("=") if index == -1: - print("Invalid format for -P parameter: '%s'. Use -P name=value." % s, file=sys.stderr) + eprint("Invalid format for -P parameter: '%s'. Use -P name=value." % s) sys.exit(1) name = s[:index] value = s[index + 1:] if name in param_dict: - print("Repeated parameter: '%s'" % name, file=sys.stderr) + eprint("Repeated parameter: '%s'" % name) sys.exit(1) param_dict[name] = value + if backend_config is not None and os.path.splitext(backend_config)[-1] != ".json": + try: + backend_config = json.loads(backend_config) + except ValueError as e: + eprint("Invalid backend config JSON. Parse error: %s" % e) + raise + if backend == "kubernetes": + if backend_config is None: + eprint("Specify 'backend_config' when using kubernetes mode.") + sys.exit(1) try: projects.run( uri, entry_point, version, + experiment_name=experiment_name, experiment_id=experiment_id, parameters=param_dict, - mode=mode, - cluster_spec=cluster_spec, - git_username=git_username, - git_password=git_password, + backend=backend, + backend_config=backend_config, use_conda=(not no_conda), storage_dir=storage_dir, - block=mode == "local" or mode is None, - run_id=run_id, + synchronous=backend in ("local", "kubernetes") or backend is None, + run_id=run_id ) except projects.ExecutionException as e: - eprint("=== %s ===" % e) + _logger.error("=== %s ===", e) sys.exit(1) +def _validate_server_args(gunicorn_opts=None, workers=None, waitress_opts=None): + if sys.platform == "win32": + if gunicorn_opts is not None or workers is not None: + raise NotImplementedError( + "waitress replaces gunicorn on Windows, " + "cannot specify --gunicorn-opts or --workers") + else: + if waitress_opts is not None: + raise NotImplementedError( + "gunicorn replaces waitress on non-Windows platforms, " + "cannot specify --waitress-opts") + + @cli.command() -@click.option("--file-store", metavar="PATH", default=None, - help="The root of the backing file store for experiment and run data " - "(default: ./mlruns).") -@click.option("--host", "-h", metavar="HOST", default="127.0.0.1", - help="The network address to listen on (default: 127.0.0.1). " - "Use 0.0.0.0 to bind to all addresses if you want to access the UI from " - "other machines.") +@click.option("--backend-store-uri", metavar="PATH", + default=DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH, + help="URI to which to persist experiment and run data. Acceptable URIs are " + "SQLAlchemy-compatible database connection strings " + "(e.g. 'sqlite:///path/to/file.db') or local filesystem URIs " + "(e.g. 'file:///absolute/path/to/directory'). By default, data will be logged " + "to the ./mlruns directory.") +@click.option("--default-artifact-root", metavar="URI", default=None, + help="Path to local directory to store artifacts, for new experiments. " + "Note that this flag does not impact already-created experiments. " + "Default: " + DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH) @click.option("--port", "-p", default=5000, help="The port to listen on (default: 5000).") -def ui(file_store, host, port): +def ui(backend_store_uri, default_artifact_root, port): """ - Launch the MLflow tracking UI. + Launch the MLflow tracking UI for local viewing of run results. To launch a production + server, use the "mlflow server" command instead. The UI will be visible at http://localhost:5000 by default. """ + + # Ensure that both backend_store_uri and default_artifact_uri are set correctly. + if not backend_store_uri: + backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH + + if not default_artifact_root: + if _is_local_uri(backend_store_uri): + default_artifact_root = backend_store_uri + else: + default_artifact_root = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH + + try: + _get_store(backend_store_uri, default_artifact_root) + except Exception as e: # pylint: disable=broad-except + _logger.error("Error initializing backend store") + _logger.exception(e) + sys.exit(1) + # TODO: We eventually want to disable the write path in this version of the server. try: - _run_server(file_store, file_store, host, port, 1, None) + _run_server(backend_store_uri, default_artifact_root, "127.0.0.1", port, None, 1) except ShellCommandException: - print("Running the mlflow server failed. Please see the logs above for details.", - file=sys.stderr) + eprint("Running the mlflow server failed. Please see the logs above for details.") sys.exit(1) @@ -157,24 +219,29 @@ def _validate_static_prefix(ctx, param, value): # pylint: disable=unused-argume @cli.command() -@click.option("--file-store", metavar="PATH", default=None, - help="The root of the backing file store for experiment and run data " - "(default: ./mlruns).") +@click.option("--backend-store-uri", metavar="PATH", + default=DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH, + help="URI to which to persist experiment and run data. Acceptable URIs are " + "SQLAlchemy-compatible database connection strings " + "(e.g. 'sqlite:///path/to/file.db') or local filesystem URIs " + "(e.g. 'file:///absolute/path/to/directory'). By default, data will be logged " + "to the ./mlruns directory.") @click.option("--default-artifact-root", metavar="URI", default=None, - help="Local or S3 URI to store artifacts in, for newly created experiments. " + help="Local or S3 URI to store artifacts, for new experiments. " "Note that this flag does not impact already-created experiments. " - "Default: inside file store.") -@click.option("--host", "-h", metavar="HOST", default="127.0.0.1", - help="The network address to listen on (default: 127.0.0.1). " - "Use 0.0.0.0 to bind to all addresses if you want to access the tracking " - "server from other machines.") -@click.option("--port", "-p", default=5000, - help="The port to listen on (default: 5000).") -@click.option("--workers", "-w", default=4, - help="Number of gunicorn worker processes to handle requests (default: 4).") + "Default: Within file store, if a file:/ URI is provided. If a sql backend is" + " used, then this option is required.") +@cli_args.HOST +@cli_args.PORT +@cli_args.WORKERS @click.option("--static-prefix", default=None, callback=_validate_static_prefix, help="A prefix which will be prepended to the path of all static paths.") -def server(file_store, default_artifact_root, host, port, workers, static_prefix): +@click.option("--gunicorn-opts", default=None, + help="Additional command line options forwarded to gunicorn processes.") +@click.option("--waitress-opts", default=None, + help="Additional command line options for waitress-serve.") +def server(backend_store_uri, default_artifact_root, host, port, + workers, static_prefix, gunicorn_opts, waitress_opts): """ Run the MLflow tracking server. @@ -182,22 +249,43 @@ def server(file_store, default_artifact_root, host, port, workers, static_prefix the local machine. To let the server accept connections from other machines, you will need to pass --host 0.0.0.0 to listen on all network interfaces (or a specific interface address). """ + + _validate_server_args(gunicorn_opts=gunicorn_opts, workers=workers, waitress_opts=waitress_opts) + + # Ensure that both backend_store_uri and default_artifact_uri are set correctly. + if not backend_store_uri: + backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH + + if not default_artifact_root: + if _is_local_uri(backend_store_uri): + default_artifact_root = backend_store_uri + else: + eprint("Option 'default-artifact-root' is required, when backend store is not " + "local file based.") + sys.exit(1) + try: - _run_server(file_store, default_artifact_root, host, port, workers, static_prefix) + _get_store(backend_store_uri, default_artifact_root) + except Exception as e: # pylint: disable=broad-except + _logger.error("Error initializing backend store") + _logger.exception(e) + sys.exit(1) + + try: + _run_server(backend_store_uri, default_artifact_root, host, port, + static_prefix, workers, gunicorn_opts, waitress_opts) except ShellCommandException: - print("Running the mlflow server failed. Please see the logs above for details.", - file=sys.stderr) + eprint("Running the mlflow server failed. Please see the logs above for details.") sys.exit(1) -cli.add_command(mlflow.sklearn.commands) -cli.add_command(mlflow.data.download) -cli.add_command(mlflow.pyfunc.cli.commands) -cli.add_command(mlflow.rfunc.cli.commands) +cli.add_command(mlflow.models.cli.commands) cli.add_command(mlflow.sagemaker.cli.commands) -cli.add_command(mlflow.azureml.cli.commands) cli.add_command(mlflow.experiments.commands) cli.add_command(mlflow.store.cli.commands) +cli.add_command(mlflow.azureml.cli.commands) +cli.add_command(mlflow.runs.commands) +cli.add_command(mlflow.db.commands) if __name__ == '__main__': cli() diff --git a/mlflow/data.py b/mlflow/data.py index 46debb191b7a1..e7186b3876681 100644 --- a/mlflow/data.py +++ b/mlflow/data.py @@ -3,16 +3,16 @@ import os import re -import boto3 -import click from six.moves import urllib from mlflow.utils import process DBFS_PREFIX = "dbfs:/" S3_PREFIX = "s3://" +GS_PREFIX = "gs://" DBFS_REGEX = re.compile("^%s" % re.escape(DBFS_PREFIX)) S3_REGEX = re.compile("^%s" % re.escape(S3_PREFIX)) +GS_REGEX = re.compile("^%s" % re.escape(GS_PREFIX)) class DownloadException(Exception): @@ -25,11 +25,19 @@ def _fetch_dbfs(uri, local_path): def _fetch_s3(uri, local_path): + import boto3 print("=== Downloading S3 object %s to local path %s ===" % (uri, os.path.abspath(local_path))) (bucket, s3_path) = parse_s3_uri(uri) boto3.client('s3').download_file(bucket, s3_path, local_path) +def _fetch_gs(uri, local_path): + from google.cloud import storage + print("=== Downloading GCS file %s to local path %s ===" % (uri, os.path.abspath(local_path))) + (bucket, gs_path) = parse_gs_uri(uri) + storage.Client().get_bucket(bucket).get_blob(gs_path).download_to_filename(local_path) + + def parse_s3_uri(uri): """Parse an S3 URI, returning (bucket, path)""" parsed = urllib.parse.urlparse(uri) @@ -41,6 +49,17 @@ def parse_s3_uri(uri): return parsed.netloc, path +def parse_gs_uri(uri): + """Parse an GCS URI, returning (bucket, path)""" + parsed = urllib.parse.urlparse(uri) + if parsed.scheme != "gs": + raise Exception("Not a GCS URI: %s" % uri) + path = parsed.path + if path.startswith('/'): + path = path[1:] + return parsed.netloc, path + + def is_uri(string): parsed_uri = urllib.parse.urlparse(string) return len(parsed_uri.scheme) > 0 @@ -51,20 +70,8 @@ def download_uri(uri, output_path): _fetch_dbfs(uri, output_path) elif S3_REGEX.match(uri): _fetch_s3(uri, output_path) + elif GS_REGEX.match(uri): + _fetch_gs(uri, output_path) else: - raise DownloadException("`uri` must be a DBFS (%s) or S3 (%s) URI, got " - "%s" % (DBFS_PREFIX, S3_PREFIX, uri)) - - -@click.command("download") -@click.argument("uri") -@click.option("--output-path", "-o", metavar="PATH", - help="Output path into which to download the artifact.") -def download(uri, output_path): - """ - Download the artifact at the specified DBFS or S3 URI into the specified local output path, or - the current directory if no output path is specified. - """ - if output_path is None: - output_path = os.path.basename(uri) - download_uri(uri, output_path) + raise DownloadException("`uri` must be a DBFS (%s), S3 (%s), or GCS (%s) URI, got " + "%s" % (DBFS_PREFIX, S3_PREFIX, GS_PREFIX, uri)) diff --git a/mlflow/db.py b/mlflow/db.py new file mode 100644 index 0000000000000..90dbe1ba047ff --- /dev/null +++ b/mlflow/db.py @@ -0,0 +1,24 @@ +import click + +import mlflow.store.db.utils + + +@click.group("db") +def commands(): + """ + Commands for managing an MLflow tracking database. + """ + pass + + +@commands.command() +@click.argument("url") +def upgrade(url): + """ + Upgrade the schema of an MLflow tracking database to the latest supported version. + version. Note that schema migrations can be slow and are not guaranteed to be transactional - + always take a backup of your database before running migrations. + """ + if mlflow.store.db.utils._is_initialized_before_mlflow_1(url): + mlflow.store.db.utils._upgrade_db_initialized_before_mlflow_1(url) + mlflow.store.db.utils._upgrade_db(url) diff --git a/mlflow/entities/__init__.py b/mlflow/entities/__init__.py index 2b152913f20c4..0d63aebf6e3af 100644 --- a/mlflow/entities/__init__.py +++ b/mlflow/entities/__init__.py @@ -14,6 +14,7 @@ from mlflow.entities.run_tag import RunTag from mlflow.entities.source_type import SourceType from mlflow.entities.view_type import ViewType +from mlflow.entities.lifecycle_stage import LifecycleStage __all__ = [ "Experiment", @@ -26,5 +27,6 @@ "RunStatus", "RunTag", "SourceType", - "ViewType" + "ViewType", + "LifecycleStage" ] diff --git a/mlflow/entities/_mlflow_object.py b/mlflow/entities/_mlflow_object.py index 7d990db940070..2da7083af1e12 100644 --- a/mlflow/entities/_mlflow_object.py +++ b/mlflow/entities/_mlflow_object.py @@ -9,9 +9,8 @@ def __iter__(self): yield prop, self.__getattribute__(prop) @classmethod - @abstractmethod def _properties(cls): - pass + return sorted([p for p in cls.__dict__ if isinstance(getattr(cls, p), property)]) @classmethod @abstractmethod @@ -36,7 +35,6 @@ def get_classname(obj): class _MLflowObjectPrinter(object): - _MAX_LIST_LEN = 2 def __init__(self): super(_MLflowObjectPrinter, self).__init__() @@ -45,12 +43,6 @@ def __init__(self): def to_string(self, obj): if isinstance(obj, _MLflowObject): return "<%s: %s>" % (get_classname(obj), self._entity_to_string(obj)) - # Handle nested lists inside MLflow entities (e.g. lists of metrics/params) - if isinstance(obj, list): - res = [self.to_string(elem) for elem in obj[:self._MAX_LIST_LEN]] - if len(obj) > self._MAX_LIST_LEN: - res.append("...") - return "[%s]" % ", ".join(res) return self.printer.pformat(obj) def _entity_to_string(self, entity): diff --git a/mlflow/entities/experiment.py b/mlflow/entities/experiment.py index 775bce216d084..d1a1953b1489c 100644 --- a/mlflow/entities/experiment.py +++ b/mlflow/entities/experiment.py @@ -6,9 +6,7 @@ class Experiment(_MLflowObject): """ Experiment object. """ - DEFAULT_EXPERIMENT_ID = 0 - ACTIVE_LIFECYCLE = 'active' - DELETED_LIFECYCLE = 'deleted' + DEFAULT_EXPERIMENT_NAME = "Default" def __init__(self, experiment_id, name, artifact_location, lifecycle_stage): super(Experiment, self).__init__() @@ -19,7 +17,7 @@ def __init__(self, experiment_id, name, artifact_location, lifecycle_stage): @property def experiment_id(self): - """Integer ID of the experiment.""" + """String ID of the experiment.""" return self._experiment_id @property @@ -51,8 +49,3 @@ def to_proto(self): proto.artifact_location = self.artifact_location proto.lifecycle_stage = self.lifecycle_stage return proto - - @classmethod - def _properties(cls): - # TODO: Hard coding this list of props for now. There has to be a clearer way... - return ["experiment_id", "name", "artifact_location", "lifecycle_stage"] diff --git a/mlflow/entities/file_info.py b/mlflow/entities/file_info.py index 068dea93e6c52..77928e95dc3cf 100644 --- a/mlflow/entities/file_info.py +++ b/mlflow/entities/file_info.py @@ -6,11 +6,17 @@ class FileInfo(_MLflowObject): """ Metadata about a file or directory. """ + def __init__(self, path, is_dir, file_size): self._path = path self._is_dir = is_dir self._bytes = file_size + def __eq__(self, other): + if type(other) is type(self): + return self.__dict__ == other.__dict__ + return False + @property def path(self): """String path of the file or directory.""" @@ -37,8 +43,3 @@ def to_proto(self): @classmethod def from_proto(cls, proto): return cls(proto.path, proto.is_dir, proto.file_size) - - @classmethod - def _properties(cls): - # TODO: Hard coding this list of props for now. There has to be a clearer way... - return ["path", "is_dir", "file_size"] diff --git a/mlflow/entities/lifecycle_stage.py b/mlflow/entities/lifecycle_stage.py new file mode 100644 index 0000000000000..e5c6a4b2251c1 --- /dev/null +++ b/mlflow/entities/lifecycle_stage.py @@ -0,0 +1,35 @@ +from mlflow.entities.view_type import ViewType +from mlflow.exceptions import MlflowException + + +class LifecycleStage(object): + ACTIVE = "active" + DELETED = "deleted" + _VALID_STAGES = set([ACTIVE, DELETED]) + + @classmethod + def view_type_to_stages(cls, view_type=ViewType.ALL): + stages = [] + if view_type == ViewType.ACTIVE_ONLY or view_type == ViewType.ALL: + stages.append(cls.ACTIVE) + if view_type == ViewType.DELETED_ONLY or view_type == ViewType.ALL: + stages.append(cls.DELETED) + return stages + + @classmethod + def is_valid(cls, lifecycle_stage): + return lifecycle_stage in cls._VALID_STAGES + + @classmethod + def matches_view_type(cls, view_type, lifecycle_stage): + if not cls.is_valid(lifecycle_stage): + raise MlflowException("Invalid lifecycle stage '%s'" % str(lifecycle_stage)) + + if view_type == ViewType.ALL: + return True + elif view_type == ViewType.ACTIVE_ONLY: + return lifecycle_stage == LifecycleStage.ACTIVE + elif view_type == ViewType.DELETED_ONLY: + return lifecycle_stage == LifecycleStage.DELETED + else: + raise MlflowException("Invalid view type '%s'" % str(view_type)) diff --git a/mlflow/entities/metric.py b/mlflow/entities/metric.py index f8850d5dc6ce6..e06f9878b9798 100644 --- a/mlflow/entities/metric.py +++ b/mlflow/entities/metric.py @@ -7,10 +7,11 @@ class Metric(_MLflowObject): Metric object. """ - def __init__(self, key, value, timestamp): + def __init__(self, key, value, timestamp, step): self._key = key self._value = value self._timestamp = timestamp + self._step = step @property def key(self): @@ -27,18 +28,19 @@ def timestamp(self): """Metric timestamp as an integer (milliseconds since the Unix epoch).""" return self._timestamp + @property + def step(self): + """Integer metric step (x-coordinate).""" + return self._step + def to_proto(self): metric = ProtoMetric() metric.key = self.key metric.value = self.value metric.timestamp = self.timestamp + metric.step = self.step return metric @classmethod def from_proto(cls, proto): - return cls(proto.key, proto.value, proto.timestamp) - - @classmethod - def _properties(cls): - # TODO: Hard coding this list of props for now. There has to be a clearer way... - return ["key", "value", "timestamp"] + return cls(proto.key, proto.value, proto.timestamp, proto.step) diff --git a/mlflow/entities/param.py b/mlflow/entities/param.py index dcb3c90a282b8..fb61e139150f4 100644 --- a/mlflow/entities/param.py +++ b/mlflow/entities/param.py @@ -30,8 +30,3 @@ def to_proto(self): @classmethod def from_proto(cls, proto): return cls(proto.key, proto.value) - - @classmethod - def _properties(cls): - # TODO: Hard coding this list of props for now. There has to be a clearer way... - return ["key", "value"] diff --git a/mlflow/entities/run.py b/mlflow/entities/run.py index 0d128a36a28f2..732af9e2ffdd5 100644 --- a/mlflow/entities/run.py +++ b/mlflow/entities/run.py @@ -1,6 +1,7 @@ from mlflow.entities._mlflow_object import _MLflowObject from mlflow.entities.run_data import RunData from mlflow.entities.run_info import RunInfo +from mlflow.exceptions import MlflowException from mlflow.protos.service_pb2 import Run as ProtoRun @@ -11,7 +12,7 @@ class Run(_MLflowObject): def __init__(self, run_info, run_data): if run_info is None: - raise Exception("run_info cannot be None") + raise MlflowException("run_info cannot be None") self._info = run_info self._data = run_data @@ -44,17 +45,10 @@ def to_proto(self): def from_proto(cls, proto): return cls(RunInfo.from_proto(proto.info), RunData.from_proto(proto.data)) - @classmethod - def from_dictionary(cls, the_dict): - if "info" not in the_dict or "data" not in the_dict: - raise Exception("Malformed input '%s'. Run cannot be constructed." % str(the_dict)) - the_info = RunInfo.from_dictionary(the_dict.get("info")) - the_data = RunData.from_dictionary(the_dict.get("data")) - return cls(the_info, the_data) - def to_dictionary(self): - return {"info": dict(self.info), "data": dict(self.data)} - - @classmethod - def _properties(cls): - return ["info", "data"] + run_dict = { + "info": dict(self.info), + } + if self.data: + run_dict["data"] = self.data.to_dictionary() + return run_dict diff --git a/mlflow/entities/run_data.py b/mlflow/entities/run_data.py index 3b4b3a069a4f9..acf48d9d912a1 100644 --- a/mlflow/entities/run_data.py +++ b/mlflow/entities/run_data.py @@ -2,7 +2,8 @@ from mlflow.entities.metric import Metric from mlflow.entities.param import Param from mlflow.entities.run_tag import RunTag -from mlflow.protos.service_pb2 import RunData as ProtoRunData +from mlflow.protos.service_pb2 import RunData as ProtoRunData, Param as ProtoParam,\ + RunTag as ProtoRunTag class RunData(_MLflowObject): @@ -10,50 +11,62 @@ class RunData(_MLflowObject): Run data (metrics and parameters). """ def __init__(self, metrics=None, params=None, tags=None): - self._metrics = [] - self._params = [] - self._tags = [] - if metrics is not None: - for m in metrics: - self._add_metric(m) - if params is not None: - for p in params: - self._add_param(p) - if tags is not None: - for t in tags: - self._add_tag(t) + """ + Construct a new :py:class:`mlflow.entities.RunData` instance. + :param metrics: List of :py:class:`mlflow.entities.Metric`. + :param params: List of :py:class:`mlflow.entities.Param`. + :param tags: List of :py:class:`mlflow.entities.RunTag`. + """ + # Maintain the original list of metrics so that we can easily convert it back to + # protobuf + self._metric_objs = metrics or [] + self._metrics = {metric.key: metric.value for metric in self._metric_objs} + self._params = {param.key: param.value for param in (params or [])} + self._tags = {tag.key: tag.value for tag in (tags or [])} @property def metrics(self): - """List of :py:class:`mlflow.entities.Metric` for the current run.""" + """ + Dictionary of string key -> metric value for the current run. + For each metric key, the metric value with the latest timestamp is returned. In case there + are multiple values with the same latest timestamp, the maximum of these values is returned. + """ return self._metrics @property def params(self): - """List of :py:class:`mlflow.entities.Param` for the current run.""" + """Dictionary of param key (string) -> param value for the current run.""" return self._params @property def tags(self): - """List of :py:class:`mlflow.entities.RunTag` for the current run.""" + """Dictionary of tag key (string) -> tag value for the current run.""" return self._tags def _add_metric(self, metric): - self._metrics.append(metric) + self._metrics[metric.key] = metric.value + self._metric_objs.append(metric) def _add_param(self, param): - self._params.append(param) + self._params[param.key] = param.value def _add_tag(self, tag): - self._tags.append(tag) + self._tags[tag.key] = tag.value def to_proto(self): run_data = ProtoRunData() - run_data.metrics.extend([m.to_proto() for m in self.metrics]) - run_data.params.extend([p.to_proto() for p in self.params]) - run_data.tags.extend([t.to_proto() for t in self.tags]) + run_data.metrics.extend([m.to_proto() for m in self._metric_objs]) + run_data.params.extend([ProtoParam(key=key, value=val) for key, val in self.params.items()]) + run_data.tags.extend([ProtoRunTag(key=key, value=val) for key, val in self.tags.items()]) return run_data + def to_dictionary(self): + return { + "metrics": self.metrics, + "params": self.params, + "tags": self.tags, + } + @classmethod def from_proto(cls, proto): run_data = cls() @@ -64,21 +77,4 @@ def from_proto(cls, proto): run_data._add_param(Param.from_proto(proto_param)) for proto_tag in proto.tags: run_data._add_tag(RunTag.from_proto(proto_tag)) - - return run_data - - @classmethod - def from_dictionary(cls, the_dict): - run_data = cls() - for p in the_dict.get("metrics", []): - run_data._add_metric(p) - for p in the_dict.get("params", []): - run_data._add_param(p) - for t in the_dict.get("tags", []): - run_data._add_tag(t) return run_data - - @classmethod - def _properties(cls): - # TODO: Hard coding this list of props for now. There has to be a clearer way... - return ["metrics", "params", "tags"] diff --git a/mlflow/entities/run_info.py b/mlflow/entities/run_info.py index 38816655ef3e9..950f22b22afc5 100644 --- a/mlflow/entities/run_info.py +++ b/mlflow/entities/run_info.py @@ -1,58 +1,62 @@ +from mlflow.entities.run_status import RunStatus from mlflow.entities._mlflow_object import _MLflowObject +from mlflow.entities.lifecycle_stage import LifecycleStage from mlflow.exceptions import MlflowException from mlflow.protos.service_pb2 import RunInfo as ProtoRunInfo def check_run_is_active(run_info): - if run_info.lifecycle_stage != RunInfo.ACTIVE_LIFECYCLE: - raise MlflowException('The run {} must be in an active lifecycle_stage.' - .format(run_info.run_uuid)) + if run_info.lifecycle_stage != LifecycleStage.ACTIVE: + raise MlflowException("The run {} must be in 'active' lifecycle_stage." + .format(run_info.run_id)) def check_run_is_deleted(run_info): - if run_info.lifecycle_stage != RunInfo.DELETED_LIFECYCLE: - raise MlflowException('The run {} must be in an deleted lifecycle_stage.' - .format(run_info.run_uuid)) + if run_info.lifecycle_stage != LifecycleStage.DELETED: + raise MlflowException("The run {} must be in 'deleted' lifecycle_stage." + .format(run_info.run_id)) + + +class searchable_attribute(property): + # Wrapper class over property to designate some of the properties as searchable + # run attributes + pass + + +class orderable_attribute(property): + # Wrapper class over property to designate some of the properties as orderable + # run attributes + pass class RunInfo(_MLflowObject): """ Metadata about a run. """ - ACTIVE_LIFECYCLE = "active" - DELETED_LIFECYCLE = "deleted" - def __init__(self, run_uuid, experiment_id, name, source_type, source_name, entry_point_name, - user_id, status, start_time, end_time, source_version, lifecycle_stage, - artifact_uri=None): + def __init__(self, run_uuid, experiment_id, user_id, status, start_time, end_time, + lifecycle_stage, artifact_uri=None, run_id=None): if run_uuid is None: raise Exception("run_uuid cannot be None") if experiment_id is None: raise Exception("experiment_id cannot be None") - if name is None: - raise Exception("name cannot be None") - if source_type is None: - raise Exception("source_type cannot be None") - if source_name is None: - raise Exception("source_name cannot be None") if user_id is None: raise Exception("user_id cannot be None") if status is None: raise Exception("status cannot be None") if start_time is None: raise Exception("start_time cannot be None") - self._run_uuid = run_uuid + actual_run_id = run_id or run_uuid + if actual_run_id is None: + raise Exception("run_id and run_uuid cannot both be None") + self._run_uuid = actual_run_id + self._run_id = actual_run_id self._experiment_id = experiment_id - self._name = name - self._source_type = source_type - self._source_name = source_name - self._entry_point_name = entry_point_name self._user_id = user_id self._status = status self._start_time = start_time self._end_time = end_time - self._source_version = source_version self._lifecycle_stage = lifecycle_stage self._artifact_uri = artifact_uri @@ -75,45 +79,25 @@ def _copy_with_overrides(self, status=None, end_time=None, lifecycle_stage=None) @property def run_uuid(self): - """String containing run UUID.""" + """[Deprecated, use run_id instead] String containing run UUID.""" return self._run_uuid @property - def experiment_id(self): - """Integer ID of the experiment for the current run.""" - return self._experiment_id + def run_id(self): + """String containing run id.""" + return self._run_id @property - def name(self): - """String name of the run.""" - return self._name - - @property - def source_type(self): - """ - :py:class:`mlflow.entities.SourceType` describing the source of the run. - """ - return self._source_type - - @property - def source_name(self): - """ - String name of the source of the run (GitHub URI of the project corresponding to the run, - etc). - """ - return self._source_name - - @property - def entry_point_name(self): - """String name of the entry point for the run.""" - return self._entry_point_name + def experiment_id(self): + """String ID of the experiment for the current run.""" + return self._experiment_id @property def user_id(self): """String ID of the user who initiated this run.""" return self._user_id - @property + @searchable_attribute def status(self): """ One of the values in :py:class:`mlflow.entities.RunStatus` @@ -121,7 +105,7 @@ def status(self): """ return self._status - @property + @orderable_attribute def start_time(self): """Start time of the run, in number of milliseconds since the UNIX epoch.""" return self._start_time @@ -131,12 +115,7 @@ def end_time(self): """End time of the run, in number of milliseconds since the UNIX epoch.""" return self._end_time - @property - def source_version(self): - """String Git commit hash of the code used for the run, if available.""" - return self._source_version - - @property + @searchable_attribute def artifact_uri(self): """String root artifact URI of the run.""" return self._artifact_uri @@ -148,19 +127,13 @@ def lifecycle_stage(self): def to_proto(self): proto = ProtoRunInfo() proto.run_uuid = self.run_uuid + proto.run_id = self.run_id proto.experiment_id = self.experiment_id - proto.name = self.name - proto.source_type = self.source_type - proto.source_name = self.source_name - if self.entry_point_name: - proto.entry_point_name = self.entry_point_name proto.user_id = self.user_id - proto.status = self.status + proto.status = RunStatus.from_string(self.status) proto.start_time = self.start_time if self.end_time: proto.end_time = self.end_time - if self.source_version: - proto.source_version = self.source_version if self.artifact_uri: proto.artifact_uri = self.artifact_uri proto.lifecycle_stage = self.lifecycle_stage @@ -168,14 +141,24 @@ def to_proto(self): @classmethod def from_proto(cls, proto): - return cls(proto.run_uuid, proto.experiment_id, proto.name, proto.source_type, - proto.source_name, proto.entry_point_name, proto.user_id, proto.status, - proto.start_time, proto.end_time, proto.source_version, proto.lifecycle_stage, - proto.artifact_uri) + end_time = proto.end_time + # The proto2 default scalar value of zero indicates that the run's end time is absent. + # An absent end time is represented with a NoneType in the `RunInfo` class + if end_time == 0: + end_time = None + return cls(run_uuid=proto.run_uuid, run_id=proto.run_id, experiment_id=proto.experiment_id, + user_id=proto.user_id, status=RunStatus.to_string(proto.status), + start_time=proto.start_time, end_time=end_time, + lifecycle_stage=proto.lifecycle_stage, artifact_uri=proto.artifact_uri) + + @classmethod + def get_searchable_attributes(cls): + return sorted([p for p in cls.__dict__ + if isinstance(getattr(cls, p), searchable_attribute)]) @classmethod - def _properties(cls): - # TODO: Hard coding this list of props for now. There has to be a clearer way... - return ["run_uuid", "experiment_id", "name", "source_type", "source_name", - "entry_point_name", "user_id", "status", "start_time", "end_time", - "source_version", "lifecycle_stage", "artifact_uri"] + def get_orderable_attributes(cls): + # Note that all searchable attributes are also orderable. + return sorted([p for p in cls.__dict__ + if isinstance(getattr(cls, p), searchable_attribute) or + isinstance(getattr(cls, p), orderable_attribute)]) diff --git a/mlflow/entities/run_status.py b/mlflow/entities/run_status.py index 3a7c6a85a25b9..bbe311ea3abc5 100644 --- a/mlflow/entities/run_status.py +++ b/mlflow/entities/run_status.py @@ -1,14 +1,17 @@ +from mlflow.protos.service_pb2 import RunStatus as ProtoRunStatus + + class RunStatus(object): """Enum for status of an :py:class:`mlflow.entities.Run`.""" - RUNNING, SCHEDULED, FINISHED, FAILED = range(1, 5) - _STRING_TO_STATUS = { - "RUNNING": RUNNING, - "SCHEDULED": SCHEDULED, - "FINISHED": FINISHED, - "FAILED": FAILED, - } + RUNNING = ProtoRunStatus.Value('RUNNING') + SCHEDULED = ProtoRunStatus.Value('SCHEDULED') + FINISHED = ProtoRunStatus.Value('FINISHED') + FAILED = ProtoRunStatus.Value('FAILED') + KILLED = ProtoRunStatus.Value('KILLED') + + _STRING_TO_STATUS = {k: ProtoRunStatus.Value(k) for k in ProtoRunStatus.keys()} _STATUS_TO_STRING = {value: key for key, value in _STRING_TO_STATUS.items()} - _TERMINATED_STATUSES = set([FINISHED, FAILED]) + _TERMINATED_STATUSES = set([FINISHED, FAILED, KILLED]) @staticmethod def from_string(status_str): @@ -28,3 +31,7 @@ def to_string(status): @staticmethod def is_terminated(status): return status in RunStatus._TERMINATED_STATUSES + + @staticmethod + def all_status(): + return list(RunStatus._STATUS_TO_STRING.keys()) diff --git a/mlflow/entities/run_tag.py b/mlflow/entities/run_tag.py index 5615b134ca851..aa7b734c94d37 100644 --- a/mlflow/entities/run_tag.py +++ b/mlflow/entities/run_tag.py @@ -33,8 +33,3 @@ def to_proto(self): @classmethod def from_proto(cls, proto): return cls(proto.key, proto.value) - - @classmethod - def _properties(cls): - # TODO: Hard coding this list of props for now. There has to be a clearer way... - return ["key", "value"] diff --git a/mlflow/entities/source_type.py b/mlflow/entities/source_type.py index f99bf6cac4e56..cd42aad8cfa8b 100644 --- a/mlflow/entities/source_type.py +++ b/mlflow/entities/source_type.py @@ -1,3 +1,27 @@ class SourceType(object): """Enum for originating source of a :py:class:`mlflow.entities.Run`.""" NOTEBOOK, JOB, PROJECT, LOCAL, UNKNOWN = range(1, 6) + + _STRING_TO_SOURCETYPE = { + "NOTEBOOK": NOTEBOOK, + "JOB": JOB, + "PROJECT": PROJECT, + "LOCAL": LOCAL, + "UNKNOWN": UNKNOWN + } + SOURCETYPE_TO_STRING = {value: key for key, value in _STRING_TO_SOURCETYPE.items()} + + @staticmethod + def from_string(status_str): + if status_str not in SourceType._STRING_TO_SOURCETYPE: + raise Exception( + "Could not get run status corresponding to string %s. Valid run " + "status strings: %s" % (status_str, list(SourceType._STRING_TO_SOURCETYPE.keys()))) + return SourceType._STRING_TO_SOURCETYPE[status_str] + + @staticmethod + def to_string(status): + if status not in SourceType.SOURCETYPE_TO_STRING: + raise Exception("Could not get string corresponding to run status %s. Valid run " + "statuses: %s" % (status, list(SourceType.SOURCETYPE_TO_STRING.keys()))) + return SourceType.SOURCETYPE_TO_STRING[status] diff --git a/mlflow/exceptions.py b/mlflow/exceptions.py index 689431f810ccd..6f7b0167117fd 100644 --- a/mlflow/exceptions.py +++ b/mlflow/exceptions.py @@ -1,6 +1,22 @@ import json -from mlflow.protos.databricks_pb2 import INTERNAL_ERROR, ErrorCode +from mlflow.protos.databricks_pb2 import INTERNAL_ERROR, TEMPORARILY_UNAVAILABLE, \ + ENDPOINT_NOT_FOUND, PERMISSION_DENIED, REQUEST_LIMIT_EXCEEDED, BAD_REQUEST, \ + INVALID_PARAMETER_VALUE, RESOURCE_DOES_NOT_EXIST, INVALID_STATE, RESOURCE_ALREADY_EXISTS, \ + ErrorCode + +ERROR_CODE_TO_HTTP_STATUS = { + ErrorCode.Name(INTERNAL_ERROR): 500, + ErrorCode.Name(INVALID_STATE): 500, + ErrorCode.Name(TEMPORARILY_UNAVAILABLE): 503, + ErrorCode.Name(REQUEST_LIMIT_EXCEEDED): 429, + ErrorCode.Name(ENDPOINT_NOT_FOUND): 404, + ErrorCode.Name(RESOURCE_DOES_NOT_EXIST): 404, + ErrorCode.Name(PERMISSION_DENIED): 403, + ErrorCode.Name(BAD_REQUEST): 400, + ErrorCode.Name(RESOURCE_ALREADY_EXISTS): 400, + ErrorCode.Name(INVALID_PARAMETER_VALUE): 400 +} class MlflowException(Exception): @@ -10,33 +26,49 @@ class MlflowException(Exception): for debugging purposes. If the error text is sensitive, raise a generic `Exception` object instead. """ - def __init__(self, message, error_code=INTERNAL_ERROR): + def __init__(self, message, error_code=INTERNAL_ERROR, **kwargs): + """ + :param message: The message describing the error that occured. This will be included in the + exception's serialized JSON representation. + :param error_code: An appropriate error code for the error that occured; it will be included + in the exception's serialized JSON representation. This should be one of + the codes listed in the `mlflow.protos.databricks_pb2` proto. + :param kwargs: Additional key-value pairs to include in the serialized JSON representation + of the MlflowException. + """ try: self.error_code = ErrorCode.Name(error_code) except (ValueError, TypeError): self.error_code = ErrorCode.Name(INTERNAL_ERROR) self.message = message + self.json_kwargs = kwargs super(MlflowException, self).__init__(message) def serialize_as_json(self): - return json.dumps({'error_code': self.error_code, 'message': self.message}) + exception_dict = {'error_code': self.error_code, 'message': self.message} + exception_dict.update(self.json_kwargs) + return json.dumps(exception_dict) + + def get_http_status_code(self): + return ERROR_CODE_TO_HTTP_STATUS.get(self.error_code, 500) class RestException(MlflowException): """Exception thrown on non 200-level responses from the REST API""" def __init__(self, json): - error_code = json['error_code'] - message = error_code - if 'message' in json: - message = "%s: %s" % (error_code, json['message']) + error_code = json.get('error_code', INTERNAL_ERROR) + message = "%s: %s" % (error_code, + json['message'] if 'message' in json else "Response: " + str(json)) + super(RestException, self).__init__(message, error_code=error_code) self.json = json -class IllegalArtifactPathError(MlflowException): - """The artifact_path parameter was invalid.""" - - class ExecutionException(MlflowException): """Exception thrown when executing a project fails.""" pass + + +class MissingConfigException(MlflowException): + """Exception thrown when expected configuration file/directory not found""" + pass diff --git a/mlflow/experiments.py b/mlflow/experiments.py index 4be42b7f5e3cf..55edff5d63cf3 100644 --- a/mlflow/experiments.py +++ b/mlflow/experiments.py @@ -9,6 +9,8 @@ from mlflow.entities import ViewType from mlflow.tracking import _get_store +EXPERIMENT_ID = click.option("--experiment-id", "-x", type=click.STRING, required=True) + @click.group("experiments") def commands(): @@ -20,7 +22,7 @@ def commands(): @commands.command() -@click.argument("experiment_name") +@click.option("--experiment-name", "-n", type=click.STRING, required=True) @click.option("--artifact-location", "-l", help="Base location for runs to store artifact results. Artifacts will be stored " "at $artifact_location/$run_id/artifacts. See " @@ -29,11 +31,18 @@ def commands(): "If no location is provided, the tracking server will pick a default.") def create(experiment_name, artifact_location): """ - Create an experiment in the configured tracking server. + Create an experiment. + + All artifacts generated by runs related to this experiment will be stored under artifact + location, organized under specific run_id sub-directories. + + Implementation of experiment and metadata store is dependent on backend storage. ``FileStore`` + creates a folder for each experiment ID and stores metadata in ``meta.yaml``. Runs are stored + as subfolders. """ store = _get_store() exp_id = store.create_experiment(experiment_name, artifact_location) - print("Created experiment '%s' with id %d" % (experiment_name, exp_id)) + print("Created experiment '%s' with id %s" % (experiment_name, exp_id)) @commands.command("list") @@ -53,12 +62,22 @@ def list_experiments(view): @commands.command("delete") -@click.argument("experiment_id") +@EXPERIMENT_ID def delete_experiment(experiment_id): """ - Mark an experiment for deletion. Return an error if the experiment does not exist or - is already marked. You can restore a marked experiment with ``restore_experiment``, - or permanently delete an experiment in the backend store. + Mark an active experiment for deletion. This also applies to experiment's metadata, runs and + associated data, and artifacts if they are store in default location. Use ``list`` command to + view artifact location. Command will throw an error if experiment is not found or already + marked for deletion. + + Experiments marked for deletion can be restored using ``restore`` command, unless they are + permanently deleted. + + Specific implementation of deletion is dependent on backend stores. ``FileStore`` moves + experiments marked for deletion under a ``.trash`` folder under the main folder used to + instantiate ``FileStore``. Experiments marked for deletion can be permanently deleted by + clearing the ``.trash`` folder. It is recommended to use a ``cron`` job or an alternate + workflow mechanism to clear ``.trash`` folder. """ store = _get_store() store.delete_experiment(experiment_id) @@ -66,11 +85,12 @@ def delete_experiment(experiment_id): @commands.command("restore") -@click.argument("experiment_id") +@EXPERIMENT_ID def restore_experiment(experiment_id): """ - Restore a deleted experiment. - Returns an error if the experiment is active or has been permanently deleted. + Restore a deleted experiment. This also applies to experiment's metadata, runs and associated + data. The command throws an error if the experiment is already active, cannot be found, or + permanently deleted. """ store = _get_store() store.restore_experiment(experiment_id) @@ -78,8 +98,8 @@ def restore_experiment(experiment_id): @commands.command("rename") -@click.argument("experiment_id") -@click.argument("new_name") +@EXPERIMENT_ID +@click.option("--new-name", type=click.STRING, required=True) def rename_experiment(experiment_id, new_name): """ Renames an active experiment. diff --git a/mlflow/h2o.py b/mlflow/h2o.py index 59a7dc54efaa7..567cdd2d3e495 100644 --- a/mlflow/h2o.py +++ b/mlflow/h2o.py @@ -13,9 +13,29 @@ import os import yaml +import mlflow from mlflow import pyfunc from mlflow.models import Model -import mlflow.tracking +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration + +FLAVOR_NAME = "h2o" + + +def get_default_conda_env(): + """ + :return: The default Conda environment for MLflow Models produced by calls to + :func:`save_model()` and :func:`log_model()`. + """ + import h2o + + return _mlflow_conda_env( + additional_conda_deps=None, + additional_pip_deps=[ + "h2o=={}".format(h2o.__version__), + ], + additional_conda_channels=None) def save_model(h2o_model, path, conda_env=None, mlflow_model=Model(), settings=None): @@ -24,6 +44,25 @@ def save_model(h2o_model, path, conda_env=None, mlflow_model=Model(), settings=N :param h2o_model: H2O model to be saved. :param path: Local path where the model is to be saved. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If ``None``, the default + :func:`get_default_conda_env()` environment is added to the model. + The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'pip': [ + 'h2o==3.20.0.8' + ] + ] + } + :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to. """ import h2o @@ -31,11 +70,12 @@ def save_model(h2o_model, path, conda_env=None, mlflow_model=Model(), settings=N path = os.path.abspath(path) if os.path.exists(path): raise Exception("Path '{}' already exists".format(path)) - model_dir = os.path.join(path, "model.h2o") - os.makedirs(model_dir) + model_data_subpath = "model.h2o" + model_data_path = os.path.join(path, model_data_subpath) + os.makedirs(model_data_path) # Save h2o-model - h2o_save_location = h2o.save_model(model=h2o_model, path=model_dir, force=True) + h2o_save_location = h2o.save_model(model=h2o_model, path=model_data_path, force=True) model_file = os.path.basename(h2o_save_location) # Save h2o-settings @@ -43,30 +83,59 @@ def save_model(h2o_model, path, conda_env=None, mlflow_model=Model(), settings=N settings = {} settings['full_file'] = h2o_save_location settings['model_file'] = model_file - settings['model_dir'] = model_dir - with open(os.path.join(model_dir, "h2o.yaml"), 'w') as settings_file: + settings['model_dir'] = model_data_path + with open(os.path.join(model_data_path, "h2o.yaml"), 'w') as settings_file: yaml.safe_dump(settings, stream=settings_file) + conda_env_subpath = "conda.yaml" + if conda_env is None: + conda_env = get_default_conda_env() + elif not isinstance(conda_env, dict): + with open(conda_env, "r") as f: + conda_env = yaml.safe_load(f) + with open(os.path.join(path, conda_env_subpath), "w") as f: + yaml.safe_dump(conda_env, stream=f, default_flow_style=False) + pyfunc.add_to_model(mlflow_model, loader_module="mlflow.h2o", - data="model.h2o", env=conda_env) - mlflow_model.add_flavor("h2o", saved_model=model_file, h2o_version=h2o.__version__) + data=model_data_subpath, env=conda_env_subpath) + mlflow_model.add_flavor(FLAVOR_NAME, h2o_version=h2o.__version__, data=model_data_subpath) mlflow_model.save(os.path.join(path, "MLmodel")) -def log_model(h2o_model, artifact_path, **kwargs): +def log_model(h2o_model, artifact_path, conda_env=None, **kwargs): """ Log an H2O model as an MLflow artifact for the current run. :param h2o_model: H2O model to be saved. :param artifact_path: Run-relative artifact path. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If ``None``, the default + :func:`get_default_conda_env()` environment is added to the model. + The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'pip': [ + 'h2o==3.20.0.8' + ] + ] + } + :param kwargs: kwargs to pass to ``h2o.save_model`` method. """ Model.log(artifact_path=artifact_path, flavor=mlflow.h2o, - h2o_model=h2o_model, **kwargs) + h2o_model=h2o_model, conda_env=conda_env, **kwargs) def _load_model(path, init=False): import h2o + path = os.path.abspath(path) with open(os.path.join(path, "h2o.yaml")) as f: params = yaml.safe_load(f.read()) @@ -82,6 +151,7 @@ def __init__(self, h2o_model): def predict(self, dataframe): import h2o + predicted = self.h2o_model.predict(h2o.H2OFrame(dataframe)).as_data_frame() predicted.index = dataframe.index return predicted @@ -90,19 +160,34 @@ def predict(self, dataframe): def _load_pyfunc(path): """ Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. + + :param path: Local filesystem path to the MLflow Model with the ``h2o`` flavor. """ return _H2OModelWrapper(_load_model(path, init=True)) -def load_model(path, run_id=None): +def load_model(model_uri): """ Load an H2O model from a local file (if ``run_id`` is ``None``) or a run. This function expects there is an H2O instance initialised with ``h2o.init``. - :param path: Local filesystem path or run-relative artifact path to the model saved - by :py:func:`mlflow.h2o.save_model`. - :param run_id: Run ID. If provided, combined with ``path`` to identify the model. + :param model_uri: The location, in URI format, of the MLflow model. For example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. + + :return: An `H2OEstimator model object + `_. """ - if run_id is not None: - path = mlflow.tracking.utils._get_model_log_dir(model_name=path, run_id=run_id) - return _load_model(os.path.join(path, "model.h2o")) + local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) + flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) + # Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a + # `data` key; in this case, we assume the model artifact path to be `model.h2o` + h2o_model_file_path = os.path.join(local_model_path, flavor_conf.get("data", "model.h2o")) + return _load_model(path=h2o_model_file_path) diff --git a/mlflow/java/client/README.md b/mlflow/java/client/README.md index c32d473666b31..6392e7a2bbf09 100644 --- a/mlflow/java/client/README.md +++ b/mlflow/java/client/README.md @@ -2,7 +2,7 @@ Java client for [MLflow](https://mlflow.org) REST API. See also the MLflow [Python API](https://mlflow.org/docs/latest/python_api/index.html) -and [REST API](https://mlflow.org/docs/latest/rest_api.html). +and [REST API](https://mlflow.org/docs/latest/rest-api.html). ## Requirements @@ -46,33 +46,33 @@ See [ApiClient.java](src/main/java/org/mlflow/client/ApiClient.java) and [Service.java domain objects](src/main/java/org/mlflow/api/proto/mlflow/Service.java). ``` -Run getRun(String runUuid) +Run getRun(String runId) RunInfo createRun() -RunInfo createRun(long experimentId) -RunInfo createRun(long experimentId, String appName) +RunInfo createRun(String experimentId) +RunInfo createRun(String experimentId, String appName) RunInfo createRun(CreateRun request) -List listRunInfos(long experimentId) +List listRunInfos(String experimentId) List listExperiments() -GetExperiment.Response getExperiment(long experimentId) +GetExperiment.Response getExperiment(String experimentId) Optional getExperimentByName(String experimentName) long createExperiment(String experimentName) -void logParam(String runUuid, String key, String value) -void logMetric(String runUuid, String key, float value) -void setTerminated(String runUuid) -void setTerminated(String runUuid, RunStatus status) -void setTerminated(String runUuid, RunStatus status, long endTime) -ListArtifacts.Response listArtifacts(String runUuid, String path) +void logParam(String runId, String key, String value) +void logMetric(String runId, String key, float value) +void setTerminated(String runId) +void setTerminated(String runId, RunStatus status) +void setTerminated(String runId, RunStatus status, long endTime) +ListArtifacts.Response listArtifacts(String runId, String path) ``` ## Usage ### Java Usage -For a simple example see [QuickStartDriver.java](src/main/java/org/mlflow/client/samples/QuickStartDriver.java). -For full examples of API coverage see the [tests](src/test/java/org/mlflow/client) such as [ApiClientTest.java](src/test/java/org/mlflow/client/ApiClientTest.java). +For a simple example see [QuickStartDriver.java](src/main/java/org/mlflow/tracking/samples/QuickStartDriver.java). +For full examples of API coverage see the [tests](src/test/java/org/mlflow/tracking) such as [MlflowClientTest.java](src/test/java/org/mlflow/tracking/MlflowClientTest.java). ``` package org.mlflow.tracking.samples; @@ -110,7 +110,7 @@ public class QuickStartDriver { System.out.println("====== createExperiment"); String expName = "Exp_" + System.currentTimeMillis(); - long expId = client.createExperiment(expName); + String expId = client.createExperiment(expName); System.out.println("createExperiment: expId=" + expId); System.out.println("====== getExperiment"); @@ -133,7 +133,7 @@ public class QuickStartDriver { System.out.println("getExperimentByName: " + exp3); } - void createRun(MlflowClient client, long expId) { + void createRun(MlflowClient client, String expId) { System.out.println("====== createRun"); // Create run diff --git a/mlflow/java/client/pom.xml b/mlflow/java/client/pom.xml index 3cd6d4fefc0fd..b05d5aec20832 100644 --- a/mlflow/java/client/pom.xml +++ b/mlflow/java/client/pom.xml @@ -5,13 +5,12 @@ org.mlflow mlflow-parent - 0.7.1 + 1.0.0 ../pom.xml mlflow-client jar - 0.7.1 MLflow Tracking API http://mlflow.org @@ -63,6 +62,11 @@ slf4j-jdk14 test + + org.mockito + mockito-core + test + @@ -137,6 +141,15 @@ ${mlflow.shade.packageName}.ini4j + + + + public-suffix-list.txt + log4j.properties + .proto + + + diff --git a/mlflow/java/client/src/main/java/com/databricks/api/proto/databricks/Databricks.java b/mlflow/java/client/src/main/java/com/databricks/api/proto/databricks/Databricks.java index 2589673c304af..cc408b6a0b332 100644 --- a/mlflow/java/client/src/main/java/com/databricks/api/proto/databricks/Databricks.java +++ b/mlflow/java/client/src/main/java/com/databricks/api/proto/databricks/Databricks.java @@ -31,7 +31,7 @@ public static void registerAllExtensions( * Visibility defines who is allowed to use the RPC. * * - * Protobuf enum {@code Visibility} + * Protobuf enum {@code mlflow.Visibility} */ public enum Visibility implements com.google.protobuf.ProtocolMessageEnum { @@ -152,11 +152,11 @@ private Visibility(int value) { this.value = value; } - // @@protoc_insertion_point(enum_scope:Visibility) + // @@protoc_insertion_point(enum_scope:mlflow.Visibility) } /** - * Protobuf enum {@code ErrorCode} + * Protobuf enum {@code mlflow.ErrorCode} */ public enum ErrorCode implements com.google.protobuf.ProtocolMessageEnum { @@ -584,33 +584,33 @@ private ErrorCode(int value) { this.value = value; } - // @@protoc_insertion_point(enum_scope:ErrorCode) + // @@protoc_insertion_point(enum_scope:mlflow.ErrorCode) } public interface DatabricksRpcOptionsOrBuilder extends - // @@protoc_insertion_point(interface_extends:DatabricksRpcOptions) + // @@protoc_insertion_point(interface_extends:mlflow.DatabricksRpcOptions) com.google.protobuf.MessageOrBuilder { /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ java.util.List getEndpointsList(); /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ com.databricks.api.proto.databricks.Databricks.HttpEndpoint getEndpoints(int index); /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ int getEndpointsCount(); /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ java.util.List getEndpointsOrBuilderList(); /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpointsOrBuilder( int index); @@ -620,7 +620,7 @@ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpoint * Indicates which users are allowed to initiate this RPC. * * - * optional .Visibility visibility = 2; + * optional .mlflow.Visibility visibility = 2; */ boolean hasVisibility(); /** @@ -628,7 +628,7 @@ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpoint * Indicates which users are allowed to initiate this RPC. * * - * optional .Visibility visibility = 2; + * optional .mlflow.Visibility visibility = 2; */ com.databricks.api.proto.databricks.Databricks.Visibility getVisibility(); @@ -638,7 +638,7 @@ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpoint * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ java.util.List getErrorCodesList(); /** @@ -647,7 +647,7 @@ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpoint * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ int getErrorCodesCount(); /** @@ -656,7 +656,7 @@ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpoint * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ com.databricks.api.proto.databricks.Databricks.ErrorCode getErrorCodes(int index); @@ -665,7 +665,7 @@ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpoint * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ boolean hasRateLimit(); /** @@ -673,7 +673,7 @@ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpoint * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ com.databricks.api.proto.databricks.Databricks.RateLimit getRateLimit(); /** @@ -681,7 +681,7 @@ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpoint * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ com.databricks.api.proto.databricks.Databricks.RateLimitOrBuilder getRateLimitOrBuilder(); @@ -720,11 +720,11 @@ com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpoint * direct RPCs to endpoints, as well as other metadata about the RPC. * * - * Protobuf type {@code DatabricksRpcOptions} + * Protobuf type {@code mlflow.DatabricksRpcOptions} */ public static final class DatabricksRpcOptions extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:DatabricksRpcOptions) + // @@protoc_insertion_point(message_implements:mlflow.DatabricksRpcOptions) DatabricksRpcOptionsOrBuilder { private static final long serialVersionUID = 0L; // Use DatabricksRpcOptions.newBuilder() to construct. @@ -864,13 +864,13 @@ private DatabricksRpcOptions( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksRpcOptions_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksRpcOptions_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksRpcOptions_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksRpcOptions_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptions.class, com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptions.Builder.class); } @@ -879,32 +879,32 @@ private DatabricksRpcOptions( public static final int ENDPOINTS_FIELD_NUMBER = 1; private java.util.List endpoints_; /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public java.util.List getEndpointsList() { return endpoints_; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public java.util.List getEndpointsOrBuilderList() { return endpoints_; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public int getEndpointsCount() { return endpoints_.size(); } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public com.databricks.api.proto.databricks.Databricks.HttpEndpoint getEndpoints(int index) { return endpoints_.get(index); } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpointsOrBuilder( int index) { @@ -918,7 +918,7 @@ public com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getE * Indicates which users are allowed to initiate this RPC. * * - * optional .Visibility visibility = 2; + * optional .mlflow.Visibility visibility = 2; */ public boolean hasVisibility() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -928,7 +928,7 @@ public boolean hasVisibility() { * Indicates which users are allowed to initiate this RPC. * * - * optional .Visibility visibility = 2; + * optional .mlflow.Visibility visibility = 2; */ public com.databricks.api.proto.databricks.Databricks.Visibility getVisibility() { @SuppressWarnings("deprecation") @@ -954,7 +954,7 @@ public com.databricks.api.proto.databricks.Databricks.ErrorCode convert(java.lan * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public java.util.List getErrorCodesList() { return new com.google.protobuf.Internal.ListAdapter< @@ -966,7 +966,7 @@ public java.util.List * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public int getErrorCodesCount() { return errorCodes_.size(); @@ -977,7 +977,7 @@ public int getErrorCodesCount() { * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public com.databricks.api.proto.databricks.Databricks.ErrorCode getErrorCodes(int index) { return errorCodes_converter_.convert(errorCodes_.get(index)); @@ -990,7 +990,7 @@ public com.databricks.api.proto.databricks.Databricks.ErrorCode getErrorCodes(in * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public boolean hasRateLimit() { return ((bitField0_ & 0x00000002) == 0x00000002); @@ -1000,7 +1000,7 @@ public boolean hasRateLimit() { * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public com.databricks.api.proto.databricks.Databricks.RateLimit getRateLimit() { return rateLimit_ == null ? com.databricks.api.proto.databricks.Databricks.RateLimit.getDefaultInstance() : rateLimit_; @@ -1010,7 +1010,7 @@ public com.databricks.api.proto.databricks.Databricks.RateLimit getRateLimit() { * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public com.databricks.api.proto.databricks.Databricks.RateLimitOrBuilder getRateLimitOrBuilder() { return rateLimit_ == null ? com.databricks.api.proto.databricks.Databricks.RateLimit.getDefaultInstance() : rateLimit_; @@ -1300,21 +1300,21 @@ protected Builder newBuilderForType( * direct RPCs to endpoints, as well as other metadata about the RPC. * * - * Protobuf type {@code DatabricksRpcOptions} + * Protobuf type {@code mlflow.DatabricksRpcOptions} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:DatabricksRpcOptions) + // @@protoc_insertion_point(builder_implements:mlflow.DatabricksRpcOptions) com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptionsOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksRpcOptions_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksRpcOptions_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksRpcOptions_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksRpcOptions_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptions.class, com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptions.Builder.class); } @@ -1363,7 +1363,7 @@ public Builder clear() { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksRpcOptions_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksRpcOptions_descriptor; } @java.lang.Override @@ -1554,7 +1554,7 @@ private void ensureEndpointsIsMutable() { com.databricks.api.proto.databricks.Databricks.HttpEndpoint, com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder, com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder> endpointsBuilder_; /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public java.util.List getEndpointsList() { if (endpointsBuilder_ == null) { @@ -1564,7 +1564,7 @@ public java.util.Listrepeated .HttpEndpoint endpoints = 1;
+ * repeated .mlflow.HttpEndpoint endpoints = 1; */ public int getEndpointsCount() { if (endpointsBuilder_ == null) { @@ -1574,7 +1574,7 @@ public int getEndpointsCount() { } } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public com.databricks.api.proto.databricks.Databricks.HttpEndpoint getEndpoints(int index) { if (endpointsBuilder_ == null) { @@ -1584,7 +1584,7 @@ public com.databricks.api.proto.databricks.Databricks.HttpEndpoint getEndpoints( } } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public Builder setEndpoints( int index, com.databricks.api.proto.databricks.Databricks.HttpEndpoint value) { @@ -1601,7 +1601,7 @@ public Builder setEndpoints( return this; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public Builder setEndpoints( int index, com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder builderForValue) { @@ -1615,7 +1615,7 @@ public Builder setEndpoints( return this; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public Builder addEndpoints(com.databricks.api.proto.databricks.Databricks.HttpEndpoint value) { if (endpointsBuilder_ == null) { @@ -1631,7 +1631,7 @@ public Builder addEndpoints(com.databricks.api.proto.databricks.Databricks.HttpE return this; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public Builder addEndpoints( int index, com.databricks.api.proto.databricks.Databricks.HttpEndpoint value) { @@ -1648,7 +1648,7 @@ public Builder addEndpoints( return this; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public Builder addEndpoints( com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder builderForValue) { @@ -1662,7 +1662,7 @@ public Builder addEndpoints( return this; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public Builder addEndpoints( int index, com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder builderForValue) { @@ -1676,7 +1676,7 @@ public Builder addEndpoints( return this; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public Builder addAllEndpoints( java.lang.Iterable values) { @@ -1691,7 +1691,7 @@ public Builder addAllEndpoints( return this; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public Builder clearEndpoints() { if (endpointsBuilder_ == null) { @@ -1704,7 +1704,7 @@ public Builder clearEndpoints() { return this; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public Builder removeEndpoints(int index) { if (endpointsBuilder_ == null) { @@ -1717,14 +1717,14 @@ public Builder removeEndpoints(int index) { return this; } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder getEndpointsBuilder( int index) { return getEndpointsFieldBuilder().getBuilder(index); } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getEndpointsOrBuilder( int index) { @@ -1734,7 +1734,7 @@ public com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getE } } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public java.util.List getEndpointsOrBuilderList() { @@ -1745,14 +1745,14 @@ public com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder getE } } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder addEndpointsBuilder() { return getEndpointsFieldBuilder().addBuilder( com.databricks.api.proto.databricks.Databricks.HttpEndpoint.getDefaultInstance()); } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder addEndpointsBuilder( int index) { @@ -1760,7 +1760,7 @@ public com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder addEn index, com.databricks.api.proto.databricks.Databricks.HttpEndpoint.getDefaultInstance()); } /** - * repeated .HttpEndpoint endpoints = 1; + * repeated .mlflow.HttpEndpoint endpoints = 1; */ public java.util.List getEndpointsBuilderList() { @@ -1787,7 +1787,7 @@ public com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder addEn * Indicates which users are allowed to initiate this RPC. * * - * optional .Visibility visibility = 2; + * optional .mlflow.Visibility visibility = 2; */ public boolean hasVisibility() { return ((bitField0_ & 0x00000002) == 0x00000002); @@ -1797,7 +1797,7 @@ public boolean hasVisibility() { * Indicates which users are allowed to initiate this RPC. * * - * optional .Visibility visibility = 2; + * optional .mlflow.Visibility visibility = 2; */ public com.databricks.api.proto.databricks.Databricks.Visibility getVisibility() { @SuppressWarnings("deprecation") @@ -1809,7 +1809,7 @@ public com.databricks.api.proto.databricks.Databricks.Visibility getVisibility() * Indicates which users are allowed to initiate this RPC. * * - * optional .Visibility visibility = 2; + * optional .mlflow.Visibility visibility = 2; */ public Builder setVisibility(com.databricks.api.proto.databricks.Databricks.Visibility value) { if (value == null) { @@ -1825,7 +1825,7 @@ public Builder setVisibility(com.databricks.api.proto.databricks.Databricks.Visi * Indicates which users are allowed to initiate this RPC. * * - * optional .Visibility visibility = 2; + * optional .mlflow.Visibility visibility = 2; */ public Builder clearVisibility() { bitField0_ = (bitField0_ & ~0x00000002); @@ -1848,7 +1848,7 @@ private void ensureErrorCodesIsMutable() { * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public java.util.List getErrorCodesList() { return new com.google.protobuf.Internal.ListAdapter< @@ -1860,7 +1860,7 @@ public java.util.List * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public int getErrorCodesCount() { return errorCodes_.size(); @@ -1871,7 +1871,7 @@ public int getErrorCodesCount() { * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public com.databricks.api.proto.databricks.Databricks.ErrorCode getErrorCodes(int index) { return errorCodes_converter_.convert(errorCodes_.get(index)); @@ -1882,7 +1882,7 @@ public com.databricks.api.proto.databricks.Databricks.ErrorCode getErrorCodes(in * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public Builder setErrorCodes( int index, com.databricks.api.proto.databricks.Databricks.ErrorCode value) { @@ -1900,7 +1900,7 @@ public Builder setErrorCodes( * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public Builder addErrorCodes(com.databricks.api.proto.databricks.Databricks.ErrorCode value) { if (value == null) { @@ -1917,7 +1917,7 @@ public Builder addErrorCodes(com.databricks.api.proto.databricks.Databricks.Erro * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public Builder addAllErrorCodes( java.lang.Iterable values) { @@ -1934,7 +1934,7 @@ public Builder addAllErrorCodes( * may return. * * - * repeated .ErrorCode error_codes = 3; + * repeated .mlflow.ErrorCode error_codes = 3; */ public Builder clearErrorCodes() { errorCodes_ = java.util.Collections.emptyList(); @@ -1951,7 +1951,7 @@ public Builder clearErrorCodes() { * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public boolean hasRateLimit() { return ((bitField0_ & 0x00000008) == 0x00000008); @@ -1961,7 +1961,7 @@ public boolean hasRateLimit() { * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public com.databricks.api.proto.databricks.Databricks.RateLimit getRateLimit() { if (rateLimitBuilder_ == null) { @@ -1975,7 +1975,7 @@ public com.databricks.api.proto.databricks.Databricks.RateLimit getRateLimit() { * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public Builder setRateLimit(com.databricks.api.proto.databricks.Databricks.RateLimit value) { if (rateLimitBuilder_ == null) { @@ -1995,7 +1995,7 @@ public Builder setRateLimit(com.databricks.api.proto.databricks.Databricks.RateL * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public Builder setRateLimit( com.databricks.api.proto.databricks.Databricks.RateLimit.Builder builderForValue) { @@ -2013,7 +2013,7 @@ public Builder setRateLimit( * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public Builder mergeRateLimit(com.databricks.api.proto.databricks.Databricks.RateLimit value) { if (rateLimitBuilder_ == null) { @@ -2037,7 +2037,7 @@ public Builder mergeRateLimit(com.databricks.api.proto.databricks.Databricks.Rat * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public Builder clearRateLimit() { if (rateLimitBuilder_ == null) { @@ -2054,7 +2054,7 @@ public Builder clearRateLimit() { * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public com.databricks.api.proto.databricks.Databricks.RateLimit.Builder getRateLimitBuilder() { bitField0_ |= 0x00000008; @@ -2066,7 +2066,7 @@ public com.databricks.api.proto.databricks.Databricks.RateLimit.Builder getRateL * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ public com.databricks.api.proto.databricks.Databricks.RateLimitOrBuilder getRateLimitOrBuilder() { if (rateLimitBuilder_ != null) { @@ -2081,7 +2081,7 @@ public com.databricks.api.proto.databricks.Databricks.RateLimitOrBuilder getRate * If defined, a rate limit will be applied to this RPC for all requests from the API proxy. * * - * optional .RateLimit rate_limit = 4; + * optional .mlflow.RateLimit rate_limit = 4; */ private com.google.protobuf.SingleFieldBuilderV3< com.databricks.api.proto.databricks.Databricks.RateLimit, com.databricks.api.proto.databricks.Databricks.RateLimit.Builder, com.databricks.api.proto.databricks.Databricks.RateLimitOrBuilder> @@ -2215,10 +2215,10 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:DatabricksRpcOptions) + // @@protoc_insertion_point(builder_scope:mlflow.DatabricksRpcOptions) } - // @@protoc_insertion_point(class_scope:DatabricksRpcOptions) + // @@protoc_insertion_point(class_scope:mlflow.DatabricksRpcOptions) private static final com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptions DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptions(); @@ -2256,7 +2256,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptions getDe } public interface HttpEndpointOrBuilder extends - // @@protoc_insertion_point(interface_extends:HttpEndpoint) + // @@protoc_insertion_point(interface_extends:mlflow.HttpEndpoint) com.google.protobuf.MessageOrBuilder { /** @@ -2317,7 +2317,7 @@ public interface HttpEndpointOrBuilder extends * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ boolean hasSince(); /** @@ -2326,7 +2326,7 @@ public interface HttpEndpointOrBuilder extends * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ com.databricks.api.proto.databricks.Databricks.ApiVersion getSince(); /** @@ -2335,16 +2335,16 @@ public interface HttpEndpointOrBuilder extends * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ com.databricks.api.proto.databricks.Databricks.ApiVersionOrBuilder getSinceOrBuilder(); } /** - * Protobuf type {@code HttpEndpoint} + * Protobuf type {@code mlflow.HttpEndpoint} */ public static final class HttpEndpoint extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:HttpEndpoint) + // @@protoc_insertion_point(message_implements:mlflow.HttpEndpoint) HttpEndpointOrBuilder { private static final long serialVersionUID = 0L; // Use HttpEndpoint.newBuilder() to construct. @@ -2426,13 +2426,13 @@ private HttpEndpoint( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_HttpEndpoint_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_HttpEndpoint_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_HttpEndpoint_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_HttpEndpoint_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.HttpEndpoint.class, com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder.class); } @@ -2554,7 +2554,7 @@ public java.lang.String getPath() { * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public boolean hasSince() { return ((bitField0_ & 0x00000004) == 0x00000004); @@ -2565,7 +2565,7 @@ public boolean hasSince() { * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public com.databricks.api.proto.databricks.Databricks.ApiVersion getSince() { return since_ == null ? com.databricks.api.proto.databricks.Databricks.ApiVersion.getDefaultInstance() : since_; @@ -2576,7 +2576,7 @@ public com.databricks.api.proto.databricks.Databricks.ApiVersion getSince() { * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public com.databricks.api.proto.databricks.Databricks.ApiVersionOrBuilder getSinceOrBuilder() { return since_ == null ? com.databricks.api.proto.databricks.Databricks.ApiVersion.getDefaultInstance() : since_; @@ -2774,21 +2774,21 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code HttpEndpoint} + * Protobuf type {@code mlflow.HttpEndpoint} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:HttpEndpoint) + // @@protoc_insertion_point(builder_implements:mlflow.HttpEndpoint) com.databricks.api.proto.databricks.Databricks.HttpEndpointOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_HttpEndpoint_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_HttpEndpoint_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_HttpEndpoint_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_HttpEndpoint_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.HttpEndpoint.class, com.databricks.api.proto.databricks.Databricks.HttpEndpoint.Builder.class); } @@ -2828,7 +2828,7 @@ public Builder clear() { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.databricks.api.proto.databricks.Databricks.internal_static_HttpEndpoint_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_HttpEndpoint_descriptor; } @java.lang.Override @@ -3167,7 +3167,7 @@ public Builder setPathBytes( * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public boolean hasSince() { return ((bitField0_ & 0x00000004) == 0x00000004); @@ -3178,7 +3178,7 @@ public boolean hasSince() { * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public com.databricks.api.proto.databricks.Databricks.ApiVersion getSince() { if (sinceBuilder_ == null) { @@ -3193,7 +3193,7 @@ public com.databricks.api.proto.databricks.Databricks.ApiVersion getSince() { * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public Builder setSince(com.databricks.api.proto.databricks.Databricks.ApiVersion value) { if (sinceBuilder_ == null) { @@ -3214,7 +3214,7 @@ public Builder setSince(com.databricks.api.proto.databricks.Databricks.ApiVersio * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public Builder setSince( com.databricks.api.proto.databricks.Databricks.ApiVersion.Builder builderForValue) { @@ -3233,7 +3233,7 @@ public Builder setSince( * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public Builder mergeSince(com.databricks.api.proto.databricks.Databricks.ApiVersion value) { if (sinceBuilder_ == null) { @@ -3258,7 +3258,7 @@ public Builder mergeSince(com.databricks.api.proto.databricks.Databricks.ApiVers * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public Builder clearSince() { if (sinceBuilder_ == null) { @@ -3276,7 +3276,7 @@ public Builder clearSince() { * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public com.databricks.api.proto.databricks.Databricks.ApiVersion.Builder getSinceBuilder() { bitField0_ |= 0x00000004; @@ -3289,7 +3289,7 @@ public com.databricks.api.proto.databricks.Databricks.ApiVersion.Builder getSinc * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ public com.databricks.api.proto.databricks.Databricks.ApiVersionOrBuilder getSinceOrBuilder() { if (sinceBuilder_ != null) { @@ -3305,7 +3305,7 @@ public com.databricks.api.proto.databricks.Databricks.ApiVersionOrBuilder getSin * Breaking changes to an RPC must use a different version number. * * - * optional .ApiVersion since = 3; + * optional .mlflow.ApiVersion since = 3; */ private com.google.protobuf.SingleFieldBuilderV3< com.databricks.api.proto.databricks.Databricks.ApiVersion, com.databricks.api.proto.databricks.Databricks.ApiVersion.Builder, com.databricks.api.proto.databricks.Databricks.ApiVersionOrBuilder> @@ -3333,10 +3333,10 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:HttpEndpoint) + // @@protoc_insertion_point(builder_scope:mlflow.HttpEndpoint) } - // @@protoc_insertion_point(class_scope:HttpEndpoint) + // @@protoc_insertion_point(class_scope:mlflow.HttpEndpoint) private static final com.databricks.api.proto.databricks.Databricks.HttpEndpoint DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.databricks.api.proto.databricks.Databricks.HttpEndpoint(); @@ -3374,7 +3374,7 @@ public com.databricks.api.proto.databricks.Databricks.HttpEndpoint getDefaultIns } public interface ApiVersionOrBuilder extends - // @@protoc_insertion_point(interface_extends:ApiVersion) + // @@protoc_insertion_point(interface_extends:mlflow.ApiVersion) com.google.protobuf.MessageOrBuilder { /** @@ -3396,11 +3396,11 @@ public interface ApiVersionOrBuilder extends int getMinor(); } /** - * Protobuf type {@code ApiVersion} + * Protobuf type {@code mlflow.ApiVersion} */ public static final class ApiVersion extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:ApiVersion) + // @@protoc_insertion_point(message_implements:mlflow.ApiVersion) ApiVersionOrBuilder { private static final long serialVersionUID = 0L; // Use ApiVersion.newBuilder() to construct. @@ -3467,13 +3467,13 @@ private ApiVersion( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_ApiVersion_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_ApiVersion_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_ApiVersion_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_ApiVersion_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.ApiVersion.class, com.databricks.api.proto.databricks.Databricks.ApiVersion.Builder.class); } @@ -3687,21 +3687,21 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code ApiVersion} + * Protobuf type {@code mlflow.ApiVersion} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:ApiVersion) + // @@protoc_insertion_point(builder_implements:mlflow.ApiVersion) com.databricks.api.proto.databricks.Databricks.ApiVersionOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_ApiVersion_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_ApiVersion_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_ApiVersion_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_ApiVersion_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.ApiVersion.class, com.databricks.api.proto.databricks.Databricks.ApiVersion.Builder.class); } @@ -3734,7 +3734,7 @@ public Builder clear() { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.databricks.api.proto.databricks.Databricks.internal_static_ApiVersion_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_ApiVersion_descriptor; } @java.lang.Override @@ -3925,10 +3925,10 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:ApiVersion) + // @@protoc_insertion_point(builder_scope:mlflow.ApiVersion) } - // @@protoc_insertion_point(class_scope:ApiVersion) + // @@protoc_insertion_point(class_scope:mlflow.ApiVersion) private static final com.databricks.api.proto.databricks.Databricks.ApiVersion DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.databricks.api.proto.databricks.Databricks.ApiVersion(); @@ -3966,7 +3966,7 @@ public com.databricks.api.proto.databricks.Databricks.ApiVersion getDefaultInsta } public interface RateLimitOrBuilder extends - // @@protoc_insertion_point(interface_extends:RateLimit) + // @@protoc_insertion_point(interface_extends:mlflow.RateLimit) com.google.protobuf.MessageOrBuilder { /** @@ -4013,11 +4013,11 @@ public interface RateLimitOrBuilder extends * per organization basis. * * - * Protobuf type {@code RateLimit} + * Protobuf type {@code mlflow.RateLimit} */ public static final class RateLimit extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:RateLimit) + // @@protoc_insertion_point(message_implements:mlflow.RateLimit) RateLimitOrBuilder { private static final long serialVersionUID = 0L; // Use RateLimit.newBuilder() to construct. @@ -4084,13 +4084,13 @@ private RateLimit( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_RateLimit_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_RateLimit_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_RateLimit_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_RateLimit_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.RateLimit.class, com.databricks.api.proto.databricks.Databricks.RateLimit.Builder.class); } @@ -4331,21 +4331,21 @@ protected Builder newBuilderForType( * per organization basis. * * - * Protobuf type {@code RateLimit} + * Protobuf type {@code mlflow.RateLimit} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:RateLimit) + // @@protoc_insertion_point(builder_implements:mlflow.RateLimit) com.databricks.api.proto.databricks.Databricks.RateLimitOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_RateLimit_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_RateLimit_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_RateLimit_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_RateLimit_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.RateLimit.class, com.databricks.api.proto.databricks.Databricks.RateLimit.Builder.class); } @@ -4378,7 +4378,7 @@ public Builder clear() { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.databricks.api.proto.databricks.Databricks.internal_static_RateLimit_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_RateLimit_descriptor; } @java.lang.Override @@ -4609,10 +4609,10 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:RateLimit) + // @@protoc_insertion_point(builder_scope:mlflow.RateLimit) } - // @@protoc_insertion_point(class_scope:RateLimit) + // @@protoc_insertion_point(class_scope:mlflow.RateLimit) private static final com.databricks.api.proto.databricks.Databricks.RateLimit DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.databricks.api.proto.databricks.Databricks.RateLimit(); @@ -4650,7 +4650,7 @@ public com.databricks.api.proto.databricks.Databricks.RateLimit getDefaultInstan } public interface DocumentationMetadataOrBuilder extends - // @@protoc_insertion_point(interface_extends:DocumentationMetadata) + // @@protoc_insertion_point(interface_extends:mlflow.DocumentationMetadata) com.google.protobuf.MessageOrBuilder { /** @@ -4718,7 +4718,7 @@ public interface DocumentationMetadataOrBuilder extends * visibility level. The documentation is then generated for each visibility level. * * - * optional .Visibility visibility = 3; + * optional .mlflow.Visibility visibility = 3; */ boolean hasVisibility(); /** @@ -4728,7 +4728,7 @@ public interface DocumentationMetadataOrBuilder extends * visibility level. The documentation is then generated for each visibility level. * * - * optional .Visibility visibility = 3; + * optional .mlflow.Visibility visibility = 3; */ com.databricks.api.proto.databricks.Databricks.Visibility getVisibility(); @@ -4803,11 +4803,11 @@ public interface DocumentationMetadataOrBuilder extends * A block of documentation that is added to the AST after parsing the original protocol buffer. * * - * Protobuf type {@code DocumentationMetadata} + * Protobuf type {@code mlflow.DocumentationMetadata} */ public static final class DocumentationMetadata extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:DocumentationMetadata) + // @@protoc_insertion_point(message_implements:mlflow.DocumentationMetadata) DocumentationMetadataOrBuilder { private static final long serialVersionUID = 0L; // Use DocumentationMetadata.newBuilder() to construct. @@ -4908,13 +4908,13 @@ private DocumentationMetadata( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DocumentationMetadata_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DocumentationMetadata_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DocumentationMetadata_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DocumentationMetadata_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.class, com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.Builder.class); } @@ -5043,7 +5043,7 @@ public java.lang.String getLeadDoc() { * visibility level. The documentation is then generated for each visibility level. * * - * optional .Visibility visibility = 3; + * optional .mlflow.Visibility visibility = 3; */ public boolean hasVisibility() { return ((bitField0_ & 0x00000004) == 0x00000004); @@ -5055,7 +5055,7 @@ public boolean hasVisibility() { * visibility level. The documentation is then generated for each visibility level. * * - * optional .Visibility visibility = 3; + * optional .mlflow.Visibility visibility = 3; */ public com.databricks.api.proto.databricks.Databricks.Visibility getVisibility() { @SuppressWarnings("deprecation") @@ -5373,21 +5373,21 @@ protected Builder newBuilderForType( * A block of documentation that is added to the AST after parsing the original protocol buffer. * * - * Protobuf type {@code DocumentationMetadata} + * Protobuf type {@code mlflow.DocumentationMetadata} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:DocumentationMetadata) + // @@protoc_insertion_point(builder_implements:mlflow.DocumentationMetadata) com.databricks.api.proto.databricks.Databricks.DocumentationMetadataOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DocumentationMetadata_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DocumentationMetadata_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DocumentationMetadata_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DocumentationMetadata_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.class, com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.Builder.class); } @@ -5426,7 +5426,7 @@ public Builder clear() { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DocumentationMetadata_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DocumentationMetadata_descriptor; } @java.lang.Override @@ -5794,7 +5794,7 @@ public Builder setLeadDocBytes( * visibility level. The documentation is then generated for each visibility level. * * - * optional .Visibility visibility = 3; + * optional .mlflow.Visibility visibility = 3; */ public boolean hasVisibility() { return ((bitField0_ & 0x00000004) == 0x00000004); @@ -5806,7 +5806,7 @@ public boolean hasVisibility() { * visibility level. The documentation is then generated for each visibility level. * * - * optional .Visibility visibility = 3; + * optional .mlflow.Visibility visibility = 3; */ public com.databricks.api.proto.databricks.Databricks.Visibility getVisibility() { @SuppressWarnings("deprecation") @@ -5820,7 +5820,7 @@ public com.databricks.api.proto.databricks.Databricks.Visibility getVisibility() * visibility level. The documentation is then generated for each visibility level. * * - * optional .Visibility visibility = 3; + * optional .mlflow.Visibility visibility = 3; */ public Builder setVisibility(com.databricks.api.proto.databricks.Databricks.Visibility value) { if (value == null) { @@ -5838,7 +5838,7 @@ public Builder setVisibility(com.databricks.api.proto.databricks.Databricks.Visi * visibility level. The documentation is then generated for each visibility level. * * - * optional .Visibility visibility = 3; + * optional .mlflow.Visibility visibility = 3; */ public Builder clearVisibility() { bitField0_ = (bitField0_ & ~0x00000004); @@ -6067,10 +6067,10 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:DocumentationMetadata) + // @@protoc_insertion_point(builder_scope:mlflow.DocumentationMetadata) } - // @@protoc_insertion_point(class_scope:DocumentationMetadata) + // @@protoc_insertion_point(class_scope:mlflow.DocumentationMetadata) private static final com.databricks.api.proto.databricks.Databricks.DocumentationMetadata DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.databricks.api.proto.databricks.Databricks.DocumentationMetadata(); @@ -6108,15 +6108,15 @@ public com.databricks.api.proto.databricks.Databricks.DocumentationMetadata getD } public interface DatabricksServiceExceptionProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:DatabricksServiceExceptionProto) + // @@protoc_insertion_point(interface_extends:mlflow.DatabricksServiceExceptionProto) com.google.protobuf.MessageOrBuilder { /** - * optional .ErrorCode error_code = 1; + * optional .mlflow.ErrorCode error_code = 1; */ boolean hasErrorCode(); /** - * optional .ErrorCode error_code = 1; + * optional .mlflow.ErrorCode error_code = 1; */ com.databricks.api.proto.databricks.Databricks.ErrorCode getErrorCode(); @@ -6153,11 +6153,11 @@ public interface DatabricksServiceExceptionProtoOrBuilder extends * Serialization format for DatabricksServiceException. * * - * Protobuf type {@code DatabricksServiceExceptionProto} + * Protobuf type {@code mlflow.DatabricksServiceExceptionProto} */ public static final class DatabricksServiceExceptionProto extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:DatabricksServiceExceptionProto) + // @@protoc_insertion_point(message_implements:mlflow.DatabricksServiceExceptionProto) DatabricksServiceExceptionProtoOrBuilder { private static final long serialVersionUID = 0L; // Use DatabricksServiceExceptionProto.newBuilder() to construct. @@ -6239,13 +6239,13 @@ private DatabricksServiceExceptionProto( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksServiceExceptionProto_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksServiceExceptionProto_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksServiceExceptionProto_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksServiceExceptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.DatabricksServiceExceptionProto.class, com.databricks.api.proto.databricks.Databricks.DatabricksServiceExceptionProto.Builder.class); } @@ -6254,13 +6254,13 @@ private DatabricksServiceExceptionProto( public static final int ERROR_CODE_FIELD_NUMBER = 1; private int errorCode_; /** - * optional .ErrorCode error_code = 1; + * optional .mlflow.ErrorCode error_code = 1; */ public boolean hasErrorCode() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .ErrorCode error_code = 1; + * optional .mlflow.ErrorCode error_code = 1; */ public com.databricks.api.proto.databricks.Databricks.ErrorCode getErrorCode() { @SuppressWarnings("deprecation") @@ -6547,21 +6547,21 @@ protected Builder newBuilderForType( * Serialization format for DatabricksServiceException. * * - * Protobuf type {@code DatabricksServiceExceptionProto} + * Protobuf type {@code mlflow.DatabricksServiceExceptionProto} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:DatabricksServiceExceptionProto) + // @@protoc_insertion_point(builder_implements:mlflow.DatabricksServiceExceptionProto) com.databricks.api.proto.databricks.Databricks.DatabricksServiceExceptionProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksServiceExceptionProto_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksServiceExceptionProto_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksServiceExceptionProto_fieldAccessorTable + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksServiceExceptionProto_fieldAccessorTable .ensureFieldAccessorsInitialized( com.databricks.api.proto.databricks.Databricks.DatabricksServiceExceptionProto.class, com.databricks.api.proto.databricks.Databricks.DatabricksServiceExceptionProto.Builder.class); } @@ -6596,7 +6596,7 @@ public Builder clear() { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.databricks.api.proto.databricks.Databricks.internal_static_DatabricksServiceExceptionProto_descriptor; + return com.databricks.api.proto.databricks.Databricks.internal_static_mlflow_DatabricksServiceExceptionProto_descriptor; } @java.lang.Override @@ -6724,13 +6724,13 @@ public Builder mergeFrom( private int errorCode_ = 1; /** - * optional .ErrorCode error_code = 1; + * optional .mlflow.ErrorCode error_code = 1; */ public boolean hasErrorCode() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .ErrorCode error_code = 1; + * optional .mlflow.ErrorCode error_code = 1; */ public com.databricks.api.proto.databricks.Databricks.ErrorCode getErrorCode() { @SuppressWarnings("deprecation") @@ -6738,7 +6738,7 @@ public com.databricks.api.proto.databricks.Databricks.ErrorCode getErrorCode() { return result == null ? com.databricks.api.proto.databricks.Databricks.ErrorCode.INTERNAL_ERROR : result; } /** - * optional .ErrorCode error_code = 1; + * optional .mlflow.ErrorCode error_code = 1; */ public Builder setErrorCode(com.databricks.api.proto.databricks.Databricks.ErrorCode value) { if (value == null) { @@ -6750,7 +6750,7 @@ public Builder setErrorCode(com.databricks.api.proto.databricks.Databricks.Error return this; } /** - * optional .ErrorCode error_code = 1; + * optional .mlflow.ErrorCode error_code = 1; */ public Builder clearErrorCode() { bitField0_ = (bitField0_ & ~0x00000001); @@ -6923,10 +6923,10 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:DatabricksServiceExceptionProto) + // @@protoc_insertion_point(builder_scope:mlflow.DatabricksServiceExceptionProto) } - // @@protoc_insertion_point(class_scope:DatabricksServiceExceptionProto) + // @@protoc_insertion_point(class_scope:mlflow.DatabricksServiceExceptionProto) private static final com.databricks.api.proto.databricks.Databricks.DatabricksServiceExceptionProto DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.databricks.api.proto.databricks.Databricks.DatabricksServiceExceptionProto(); @@ -6963,7 +6963,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException } - public static final int VISIBILITY_FIELD_NUMBER = 50000; + public static final int VISIBILITY_FIELD_NUMBER = 51310; /** *
    * Indicates an overriding visibility for this field. This can only reduce the visibility;
@@ -6979,7 +6979,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         com.databricks.api.proto.databricks.Databricks.Visibility.class,
         null);
-  public static final int VALIDATE_REQUIRED_FIELD_NUMBER = 50001;
+  public static final int VALIDATE_REQUIRED_FIELD_NUMBER = 51311;
   /**
    * 
    * This annotation indicates that certain fields must be supplied for the request to be carried
@@ -6999,7 +6999,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         java.lang.Boolean.class,
         null);
-  public static final int JSON_INLINE_FIELD_NUMBER = 50002;
+  public static final int JSON_INLINE_FIELD_NUMBER = 51312;
   /**
    * 
    * Causes the fields within the tagged Message to be inlined into this Message, for the purposes
@@ -7026,7 +7026,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         java.lang.Boolean.class,
         null);
-  public static final int JSON_MAP_FIELD_NUMBER = 50003;
+  public static final int JSON_MAP_FIELD_NUMBER = 51313;
   /**
    * 
    * Causes a field which conceptually represents a Map to be serialized as a JSON Map.
@@ -7047,7 +7047,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         java.lang.Boolean.class,
         null);
-  public static final int FIELD_DOC_FIELD_NUMBER = 50004;
+  public static final int FIELD_DOC_FIELD_NUMBER = 51314;
   /**
    * 
    * The documentation meta data for this field. This gets added automatically when the proto is
@@ -7066,7 +7066,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.class,
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.getDefaultInstance());
-  public static final int RPC_FIELD_NUMBER = 50000;
+  public static final int RPC_FIELD_NUMBER = 51310;
   /**
    * extend .google.protobuf.MethodOptions { ... }
    */
@@ -7077,7 +7077,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptions.class,
         com.databricks.api.proto.databricks.Databricks.DatabricksRpcOptions.getDefaultInstance());
-  public static final int METHOD_DOC_FIELD_NUMBER = 50004;
+  public static final int METHOD_DOC_FIELD_NUMBER = 51314;
   /**
    * 
    * The documentation metadata.
@@ -7094,7 +7094,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.class,
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.getDefaultInstance());
-  public static final int MESSAGE_DOC_FIELD_NUMBER = 50004;
+  public static final int MESSAGE_DOC_FIELD_NUMBER = 51314;
   /**
    * 
    * The documentation metadata.
@@ -7111,7 +7111,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.class,
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.getDefaultInstance());
-  public static final int SERVICE_DOC_FIELD_NUMBER = 50004;
+  public static final int SERVICE_DOC_FIELD_NUMBER = 51314;
   /**
    * 
    * The documentation metadata.
@@ -7128,7 +7128,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.class,
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.getDefaultInstance());
-  public static final int ENUM_DOC_FIELD_NUMBER = 50004;
+  public static final int ENUM_DOC_FIELD_NUMBER = 51314;
   /**
    * 
    * The documentation metadata.
@@ -7145,7 +7145,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.class,
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.getDefaultInstance());
-  public static final int ENUM_VALUE_VISIBILITY_FIELD_NUMBER = 50000;
+  public static final int ENUM_VALUE_VISIBILITY_FIELD_NUMBER = 51310;
   /**
    * 
    * Indicates an overriding visibility for this field. This can only reduce the visibility;
@@ -7161,7 +7161,7 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
           .newFileScopedGeneratedExtension(
         com.databricks.api.proto.databricks.Databricks.Visibility.class,
         null);
-  public static final int ENUM_VALUE_DOC_FIELD_NUMBER = 50004;
+  public static final int ENUM_VALUE_DOC_FIELD_NUMBER = 51314;
   /**
    * 
    * The documentation metadata.
@@ -7179,35 +7179,35 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.class,
         com.databricks.api.proto.databricks.Databricks.DocumentationMetadata.getDefaultInstance());
   private static final com.google.protobuf.Descriptors.Descriptor
-    internal_static_DatabricksRpcOptions_descriptor;
+    internal_static_mlflow_DatabricksRpcOptions_descriptor;
   private static final 
     com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_DatabricksRpcOptions_fieldAccessorTable;
+      internal_static_mlflow_DatabricksRpcOptions_fieldAccessorTable;
   private static final com.google.protobuf.Descriptors.Descriptor
-    internal_static_HttpEndpoint_descriptor;
+    internal_static_mlflow_HttpEndpoint_descriptor;
   private static final 
     com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_HttpEndpoint_fieldAccessorTable;
+      internal_static_mlflow_HttpEndpoint_fieldAccessorTable;
   private static final com.google.protobuf.Descriptors.Descriptor
-    internal_static_ApiVersion_descriptor;
+    internal_static_mlflow_ApiVersion_descriptor;
   private static final 
     com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_ApiVersion_fieldAccessorTable;
+      internal_static_mlflow_ApiVersion_fieldAccessorTable;
   private static final com.google.protobuf.Descriptors.Descriptor
-    internal_static_RateLimit_descriptor;
+    internal_static_mlflow_RateLimit_descriptor;
   private static final 
     com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_RateLimit_fieldAccessorTable;
+      internal_static_mlflow_RateLimit_fieldAccessorTable;
   private static final com.google.protobuf.Descriptors.Descriptor
-    internal_static_DocumentationMetadata_descriptor;
+    internal_static_mlflow_DocumentationMetadata_descriptor;
   private static final 
     com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_DocumentationMetadata_fieldAccessorTable;
+      internal_static_mlflow_DocumentationMetadata_fieldAccessorTable;
   private static final com.google.protobuf.Descriptors.Descriptor
-    internal_static_DatabricksServiceExceptionProto_descriptor;
+    internal_static_mlflow_DatabricksServiceExceptionProto_descriptor;
   private static final 
     com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_DatabricksServiceExceptionProto_fieldAccessorTable;
+      internal_static_mlflow_DatabricksServiceExceptionProto_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -7217,62 +7217,65 @@ public com.databricks.api.proto.databricks.Databricks.DatabricksServiceException
       descriptor;
   static {
     java.lang.String[] descriptorData = {
-      "\n\020databricks.proto\032 google/protobuf/desc" +
-      "riptor.proto\032\025scalapb/scalapb.proto\"\261\001\n\024" +
-      "DatabricksRpcOptions\022 \n\tendpoints\030\001 \003(\0132" +
-      "\r.HttpEndpoint\022\037\n\nvisibility\030\002 \001(\0162\013.Vis" +
-      "ibility\022\037\n\013error_codes\030\003 \003(\0162\n.ErrorCode" +
-      "\022\036\n\nrate_limit\030\004 \001(\0132\n.RateLimit\022\025\n\rrpc_" +
-      "doc_title\030\005 \001(\t\"N\n\014HttpEndpoint\022\024\n\006metho" +
-      "d\030\001 \001(\t:\004POST\022\014\n\004path\030\002 \001(\t\022\032\n\005since\030\003 \001" +
-      "(\0132\013.ApiVersion\"*\n\nApiVersion\022\r\n\005major\030\001" +
-      " \001(\005\022\r\n\005minor\030\002 \001(\005\"@\n\tRateLimit\022\021\n\tmax_" +
-      "burst\030\001 \001(\003\022 \n\030max_sustained_per_second\030" +
-      "\002 \001(\003\"\214\001\n\025DocumentationMetadata\022\021\n\tdocst" +
-      "ring\030\001 \001(\t\022\020\n\010lead_doc\030\002 \001(\t\022\037\n\nvisibili" +
-      "ty\030\003 \001(\0162\013.Visibility\022\033\n\023original_proto_" +
-      "path\030\004 \003(\t\022\020\n\010position\030\005 \001(\005\"g\n\037Databric" +
-      "ksServiceExceptionProto\022\036\n\nerror_code\030\001 " +
-      "\001(\0162\n.ErrorCode\022\017\n\007message\030\002 \001(\t\022\023\n\013stac" +
-      "k_trace\030\003 \001(\t*?\n\nVisibility\022\n\n\006PUBLIC\020\001\022" +
-      "\014\n\010INTERNAL\020\002\022\027\n\023PUBLIC_UNDOCUMENTED\020\003*\366" +
-      "\004\n\tErrorCode\022\022\n\016INTERNAL_ERROR\020\001\022\033\n\027TEMP" +
-      "ORARILY_UNAVAILABLE\020\002\022\014\n\010IO_ERROR\020\003\022\017\n\013B" +
-      "AD_REQUEST\020\004\022\034\n\027INVALID_PARAMETER_VALUE\020" +
-      "\350\007\022\027\n\022ENDPOINT_NOT_FOUND\020\351\007\022\026\n\021MALFORMED" +
-      "_REQUEST\020\352\007\022\022\n\rINVALID_STATE\020\353\007\022\026\n\021PERMI" +
-      "SSION_DENIED\020\354\007\022\025\n\020FEATURE_DISABLED\020\355\007\022\032" +
-      "\n\025CUSTOMER_UNAUTHORIZED\020\356\007\022\033\n\026REQUEST_LI" +
-      "MIT_EXCEEDED\020\357\007\022\035\n\030INVALID_STATE_TRANSIT" +
-      "ION\020\321\017\022\033\n\026COULD_NOT_ACQUIRE_LOCK\020\322\017\022\034\n\027R" +
-      "ESOURCE_ALREADY_EXISTS\020\271\027\022\034\n\027RESOURCE_DO" +
-      "ES_NOT_EXIST\020\272\027\022\023\n\016QUOTA_EXCEEDED\020\241\037\022\034\n\027" +
-      "MAX_BLOCK_SIZE_EXCEEDED\020\242\037\022\033\n\026MAX_READ_S" +
-      "IZE_EXCEEDED\020\243\037\022\023\n\016DRY_RUN_FAILED\020\211\'\022\034\n\027" +
-      "RESOURCE_LIMIT_EXCEEDED\020\212\'\022\030\n\023DIRECTORY_" +
-      "NOT_EMPTY\020\361.\022\030\n\023DIRECTORY_PROTECTED\020\362.\022\037" +
-      "\n\032MAX_NOTEBOOK_SIZE_EXCEEDED\020\363.:@\n\nvisib" +
-      "ility\022\035.google.protobuf.FieldOptions\030\320\206\003" +
-      " \001(\0162\013.Visibility::\n\021validate_required\022\035" +
-      ".google.protobuf.FieldOptions\030\321\206\003 \001(\010:4\n" +
-      "\013json_inline\022\035.google.protobuf.FieldOpti" +
-      "ons\030\322\206\003 \001(\010:1\n\010json_map\022\035.google.protobu" +
-      "f.FieldOptions\030\323\206\003 \001(\010:J\n\tfield_doc\022\035.go" +
-      "ogle.protobuf.FieldOptions\030\324\206\003 \003(\0132\026.Doc" +
-      "umentationMetadata:D\n\003rpc\022\036.google.proto" +
-      "buf.MethodOptions\030\320\206\003 \001(\0132\025.DatabricksRp" +
-      "cOptions:L\n\nmethod_doc\022\036.google.protobuf" +
-      ".MethodOptions\030\324\206\003 \003(\0132\026.DocumentationMe" +
-      "tadata:N\n\013message_doc\022\037.google.protobuf." +
-      "MessageOptions\030\324\206\003 \003(\0132\026.DocumentationMe" +
-      "tadata:N\n\013service_doc\022\037.google.protobuf." +
-      "ServiceOptions\030\324\206\003 \003(\0132\026.DocumentationMe" +
-      "tadata:H\n\010enum_doc\022\034.google.protobuf.Enu" +
-      "mOptions\030\324\206\003 \003(\0132\026.DocumentationMetadata" +
-      ":O\n\025enum_value_visibility\022!.google.proto" +
-      "buf.EnumValueOptions\030\320\206\003 \001(\0162\013.Visibilit" +
-      "y:S\n\016enum_value_doc\022!.google.protobuf.En" +
-      "umValueOptions\030\324\206\003 \003(\0132\026.DocumentationMe" +
+      "\n\020databricks.proto\022\006mlflow\032 google/proto" +
+      "buf/descriptor.proto\032\025scalapb/scalapb.pr" +
+      "oto\"\315\001\n\024DatabricksRpcOptions\022\'\n\tendpoint" +
+      "s\030\001 \003(\0132\024.mlflow.HttpEndpoint\022&\n\nvisibil" +
+      "ity\030\002 \001(\0162\022.mlflow.Visibility\022&\n\013error_c" +
+      "odes\030\003 \003(\0162\021.mlflow.ErrorCode\022%\n\nrate_li" +
+      "mit\030\004 \001(\0132\021.mlflow.RateLimit\022\025\n\rrpc_doc_" +
+      "title\030\005 \001(\t\"U\n\014HttpEndpoint\022\024\n\006method\030\001 " +
+      "\001(\t:\004POST\022\014\n\004path\030\002 \001(\t\022!\n\005since\030\003 \001(\0132\022" +
+      ".mlflow.ApiVersion\"*\n\nApiVersion\022\r\n\005majo" +
+      "r\030\001 \001(\005\022\r\n\005minor\030\002 \001(\005\"@\n\tRateLimit\022\021\n\tm" +
+      "ax_burst\030\001 \001(\003\022 \n\030max_sustained_per_seco" +
+      "nd\030\002 \001(\003\"\223\001\n\025DocumentationMetadata\022\021\n\tdo" +
+      "cstring\030\001 \001(\t\022\020\n\010lead_doc\030\002 \001(\t\022&\n\nvisib" +
+      "ility\030\003 \001(\0162\022.mlflow.Visibility\022\033\n\023origi" +
+      "nal_proto_path\030\004 \003(\t\022\020\n\010position\030\005 \001(\005\"n" +
+      "\n\037DatabricksServiceExceptionProto\022%\n\nerr" +
+      "or_code\030\001 \001(\0162\021.mlflow.ErrorCode\022\017\n\007mess" +
+      "age\030\002 \001(\t\022\023\n\013stack_trace\030\003 \001(\t*?\n\nVisibi" +
+      "lity\022\n\n\006PUBLIC\020\001\022\014\n\010INTERNAL\020\002\022\027\n\023PUBLIC" +
+      "_UNDOCUMENTED\020\003*\366\004\n\tErrorCode\022\022\n\016INTERNA" +
+      "L_ERROR\020\001\022\033\n\027TEMPORARILY_UNAVAILABLE\020\002\022\014" +
+      "\n\010IO_ERROR\020\003\022\017\n\013BAD_REQUEST\020\004\022\034\n\027INVALID" +
+      "_PARAMETER_VALUE\020\350\007\022\027\n\022ENDPOINT_NOT_FOUN" +
+      "D\020\351\007\022\026\n\021MALFORMED_REQUEST\020\352\007\022\022\n\rINVALID_" +
+      "STATE\020\353\007\022\026\n\021PERMISSION_DENIED\020\354\007\022\025\n\020FEAT" +
+      "URE_DISABLED\020\355\007\022\032\n\025CUSTOMER_UNAUTHORIZED" +
+      "\020\356\007\022\033\n\026REQUEST_LIMIT_EXCEEDED\020\357\007\022\035\n\030INVA" +
+      "LID_STATE_TRANSITION\020\321\017\022\033\n\026COULD_NOT_ACQ" +
+      "UIRE_LOCK\020\322\017\022\034\n\027RESOURCE_ALREADY_EXISTS\020" +
+      "\271\027\022\034\n\027RESOURCE_DOES_NOT_EXIST\020\272\027\022\023\n\016QUOT" +
+      "A_EXCEEDED\020\241\037\022\034\n\027MAX_BLOCK_SIZE_EXCEEDED" +
+      "\020\242\037\022\033\n\026MAX_READ_SIZE_EXCEEDED\020\243\037\022\023\n\016DRY_" +
+      "RUN_FAILED\020\211\'\022\034\n\027RESOURCE_LIMIT_EXCEEDED" +
+      "\020\212\'\022\030\n\023DIRECTORY_NOT_EMPTY\020\361.\022\030\n\023DIRECTO" +
+      "RY_PROTECTED\020\362.\022\037\n\032MAX_NOTEBOOK_SIZE_EXC" +
+      "EEDED\020\363.:G\n\nvisibility\022\035.google.protobuf" +
+      ".FieldOptions\030\356\220\003 \001(\0162\022.mlflow.Visibilit" +
+      "y::\n\021validate_required\022\035.google.protobuf" +
+      ".FieldOptions\030\357\220\003 \001(\010:4\n\013json_inline\022\035.g" +
+      "oogle.protobuf.FieldOptions\030\360\220\003 \001(\010:1\n\010j" +
+      "son_map\022\035.google.protobuf.FieldOptions\030\361" +
+      "\220\003 \001(\010:Q\n\tfield_doc\022\035.google.protobuf.Fi" +
+      "eldOptions\030\362\220\003 \003(\0132\035.mlflow.Documentatio" +
+      "nMetadata:K\n\003rpc\022\036.google.protobuf.Metho" +
+      "dOptions\030\356\220\003 \001(\0132\034.mlflow.DatabricksRpcO" +
+      "ptions:S\n\nmethod_doc\022\036.google.protobuf.M" +
+      "ethodOptions\030\362\220\003 \003(\0132\035.mlflow.Documentat" +
+      "ionMetadata:U\n\013message_doc\022\037.google.prot" +
+      "obuf.MessageOptions\030\362\220\003 \003(\0132\035.mlflow.Doc" +
+      "umentationMetadata:U\n\013service_doc\022\037.goog" +
+      "le.protobuf.ServiceOptions\030\362\220\003 \003(\0132\035.mlf" +
+      "low.DocumentationMetadata:O\n\010enum_doc\022\034." +
+      "google.protobuf.EnumOptions\030\362\220\003 \003(\0132\035.ml" +
+      "flow.DocumentationMetadata:V\n\025enum_value" +
+      "_visibility\022!.google.protobuf.EnumValueO" +
+      "ptions\030\356\220\003 \001(\0162\022.mlflow.Visibility:Z\n\016en" +
+      "um_value_doc\022!.google.protobuf.EnumValue" +
+      "Options\030\362\220\003 \003(\0132\035.mlflow.DocumentationMe" +
       "tadataB*\n#com.databricks.api.proto.datab" +
       "ricks\342?\002\020\001"
     };
@@ -7290,41 +7293,41 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
           com.google.protobuf.DescriptorProtos.getDescriptor(),
           org.mlflow.scalapb_interface.Scalapb.getDescriptor(),
         }, assigner);
-    internal_static_DatabricksRpcOptions_descriptor =
+    internal_static_mlflow_DatabricksRpcOptions_descriptor =
       getDescriptor().getMessageTypes().get(0);
-    internal_static_DatabricksRpcOptions_fieldAccessorTable = new
+    internal_static_mlflow_DatabricksRpcOptions_fieldAccessorTable = new
       com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-        internal_static_DatabricksRpcOptions_descriptor,
+        internal_static_mlflow_DatabricksRpcOptions_descriptor,
         new java.lang.String[] { "Endpoints", "Visibility", "ErrorCodes", "RateLimit", "RpcDocTitle", });
-    internal_static_HttpEndpoint_descriptor =
+    internal_static_mlflow_HttpEndpoint_descriptor =
       getDescriptor().getMessageTypes().get(1);
-    internal_static_HttpEndpoint_fieldAccessorTable = new
+    internal_static_mlflow_HttpEndpoint_fieldAccessorTable = new
       com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-        internal_static_HttpEndpoint_descriptor,
+        internal_static_mlflow_HttpEndpoint_descriptor,
         new java.lang.String[] { "Method", "Path", "Since", });
-    internal_static_ApiVersion_descriptor =
+    internal_static_mlflow_ApiVersion_descriptor =
       getDescriptor().getMessageTypes().get(2);
-    internal_static_ApiVersion_fieldAccessorTable = new
+    internal_static_mlflow_ApiVersion_fieldAccessorTable = new
       com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-        internal_static_ApiVersion_descriptor,
+        internal_static_mlflow_ApiVersion_descriptor,
         new java.lang.String[] { "Major", "Minor", });
-    internal_static_RateLimit_descriptor =
+    internal_static_mlflow_RateLimit_descriptor =
       getDescriptor().getMessageTypes().get(3);
-    internal_static_RateLimit_fieldAccessorTable = new
+    internal_static_mlflow_RateLimit_fieldAccessorTable = new
       com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-        internal_static_RateLimit_descriptor,
+        internal_static_mlflow_RateLimit_descriptor,
         new java.lang.String[] { "MaxBurst", "MaxSustainedPerSecond", });
-    internal_static_DocumentationMetadata_descriptor =
+    internal_static_mlflow_DocumentationMetadata_descriptor =
       getDescriptor().getMessageTypes().get(4);
-    internal_static_DocumentationMetadata_fieldAccessorTable = new
+    internal_static_mlflow_DocumentationMetadata_fieldAccessorTable = new
       com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-        internal_static_DocumentationMetadata_descriptor,
+        internal_static_mlflow_DocumentationMetadata_descriptor,
         new java.lang.String[] { "Docstring", "LeadDoc", "Visibility", "OriginalProtoPath", "Position", });
-    internal_static_DatabricksServiceExceptionProto_descriptor =
+    internal_static_mlflow_DatabricksServiceExceptionProto_descriptor =
       getDescriptor().getMessageTypes().get(5);
-    internal_static_DatabricksServiceExceptionProto_fieldAccessorTable = new
+    internal_static_mlflow_DatabricksServiceExceptionProto_fieldAccessorTable = new
       com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-        internal_static_DatabricksServiceExceptionProto_descriptor,
+        internal_static_mlflow_DatabricksServiceExceptionProto_descriptor,
         new java.lang.String[] { "ErrorCode", "Message", "StackTrace", });
     visibility.internalInit(descriptor.getExtensions().get(0));
     validateRequired.internalInit(descriptor.getExtensions().get(1));
diff --git a/mlflow/java/client/src/main/java/org/mlflow/api/proto/Service.java b/mlflow/java/client/src/main/java/org/mlflow/api/proto/Service.java
index 31379a3fcef8e..05ddd18b7ae7a 100644
--- a/mlflow/java/client/src/main/java/org/mlflow/api/proto/Service.java
+++ b/mlflow/java/client/src/main/java/org/mlflow/api/proto/Service.java
@@ -526,6 +526,23 @@ public interface MetricOrBuilder extends
      * optional int64 timestamp = 3;
      */
     long getTimestamp();
+
+    /**
+     * 
+     * Step at which to log the metric.
+     * 
+ * + * optional int64 step = 4 [default = 0]; + */ + boolean hasStep(); + /** + *
+     * Step at which to log the metric.
+     * 
+ * + * optional int64 step = 4 [default = 0]; + */ + long getStep(); } /** *
@@ -547,6 +564,7 @@ private Metric() {
       key_ = "";
       value_ = 0D;
       timestamp_ = 0L;
+      step_ = 0L;
     }
 
     @java.lang.Override
@@ -589,6 +607,11 @@ private Metric(
               timestamp_ = input.readInt64();
               break;
             }
+            case 32: {
+              bitField0_ |= 0x00000008;
+              step_ = input.readInt64();
+              break;
+            }
             default: {
               if (!parseUnknownField(
                   input, unknownFields, extensionRegistry, tag)) {
@@ -722,6 +745,29 @@ public long getTimestamp() {
       return timestamp_;
     }
 
+    public static final int STEP_FIELD_NUMBER = 4;
+    private long step_;
+    /**
+     * 
+     * Step at which to log the metric.
+     * 
+ * + * optional int64 step = 4 [default = 0]; + */ + public boolean hasStep() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + *
+     * Step at which to log the metric.
+     * 
+ * + * optional int64 step = 4 [default = 0]; + */ + public long getStep() { + return step_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -745,6 +791,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt64(3, timestamp_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(4, step_); + } unknownFields.writeTo(output); } @@ -765,6 +814,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt64Size(3, timestamp_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, step_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -798,6 +851,11 @@ public boolean equals(final java.lang.Object obj) { result = result && (getTimestamp() == other.getTimestamp()); } + result = result && (hasStep() == other.hasStep()); + if (hasStep()) { + result = result && (getStep() + == other.getStep()); + } result = result && unknownFields.equals(other.unknownFields); return result; } @@ -823,6 +881,11 @@ public int hashCode() { hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getTimestamp()); } + if (hasStep()) { + hash = (37 * hash) + STEP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getStep()); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -966,6 +1029,8 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00000002); timestamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); + step_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -1006,6 +1071,10 @@ public org.mlflow.api.proto.Service.Metric buildPartial() { to_bitField0_ |= 0x00000004; } result.timestamp_ = timestamp_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.step_ = step_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1066,6 +1135,9 @@ public Builder mergeFrom(org.mlflow.api.proto.Service.Metric other) { if (other.hasTimestamp()) { setTimestamp(other.getTimestamp()); } + if (other.hasStep()) { + setStep(other.getStep()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1291,6 +1363,54 @@ public Builder clearTimestamp() { onChanged(); return this; } + + private long step_ ; + /** + *
+       * Step at which to log the metric.
+       * 
+ * + * optional int64 step = 4 [default = 0]; + */ + public boolean hasStep() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + *
+       * Step at which to log the metric.
+       * 
+ * + * optional int64 step = 4 [default = 0]; + */ + public long getStep() { + return step_; + } + /** + *
+       * Step at which to log the metric.
+       * 
+ * + * optional int64 step = 4 [default = 0]; + */ + public Builder setStep(long value) { + bitField0_ |= 0x00000008; + step_ = value; + onChanged(); + return this; + } + /** + *
+       * Step at which to log the metric.
+       * 
+ * + * optional int64 step = 4 [default = 0]; + */ + public Builder clearStep() { + bitField0_ = (bitField0_ & ~0x00000008); + step_ = 0L; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -3278,7 +3398,7 @@ org.mlflow.api.proto.Service.RunTagOrBuilder getTagsOrBuilder( } /** *
-   * Run data (metrics, params, etc).
+   * Run data (metrics, params, and tags).
    * 
* * Protobuf type {@code mlflow.RunData} @@ -3741,7 +3861,7 @@ protected Builder newBuilderForType( } /** *
-     * Run data (metrics, params, etc).
+     * Run data (metrics, params, and tags).
      * 
* * Protobuf type {@code mlflow.RunData} @@ -5861,116 +5981,87 @@ public interface RunInfoOrBuilder extends * Unique identifier for the run. *
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ - boolean hasRunUuid(); + boolean hasRunId(); /** *
      * Unique identifier for the run.
      * 
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ - java.lang.String getRunUuid(); + java.lang.String getRunId(); /** *
      * Unique identifier for the run.
      * 
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ com.google.protobuf.ByteString - getRunUuidBytes(); - - /** - *
-     * The experiment ID.
-     * 
- * - * optional int64 experiment_id = 2; - */ - boolean hasExperimentId(); - /** - *
-     * The experiment ID.
-     * 
- * - * optional int64 experiment_id = 2; - */ - long getExperimentId(); + getRunIdBytes(); /** *
-     * Human readable name that identifies this run.
+     * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string name = 3; + * optional string run_uuid = 1; */ - boolean hasName(); + boolean hasRunUuid(); /** *
-     * Human readable name that identifies this run.
+     * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string name = 3; + * optional string run_uuid = 1; */ - java.lang.String getName(); + java.lang.String getRunUuid(); /** *
-     * Human readable name that identifies this run.
+     * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string name = 3; + * optional string run_uuid = 1; */ com.google.protobuf.ByteString - getNameBytes(); - - /** - *
-     * Source type.
-     * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - boolean hasSourceType(); - /** - *
-     * Source type.
-     * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - org.mlflow.api.proto.Service.SourceType getSourceType(); + getRunUuidBytes(); /** *
-     * Source identifier: GitHub URL, name of notebook, name of job, etc.
+     * The experiment ID.
      * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ - boolean hasSourceName(); + boolean hasExperimentId(); /** *
-     * Source identifier: GitHub URL, name of notebook, name of job, etc.
+     * The experiment ID.
      * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ - java.lang.String getSourceName(); + java.lang.String getExperimentId(); /** *
-     * Source identifier: GitHub URL, name of notebook, name of job, etc.
+     * The experiment ID.
      * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ com.google.protobuf.ByteString - getSourceNameBytes(); + getExperimentIdBytes(); /** *
      * User who initiated the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* * optional string user_id = 6; @@ -5979,6 +6070,8 @@ public interface RunInfoOrBuilder extends /** *
      * User who initiated the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* * optional string user_id = 6; @@ -5987,6 +6080,8 @@ public interface RunInfoOrBuilder extends /** *
      * User who initiated the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* * optional string user_id = 6; @@ -6045,58 +6140,6 @@ public interface RunInfoOrBuilder extends */ long getEndTime(); - /** - *
-     * Git commit hash of the code used for the run.
-     * 
- * - * optional string source_version = 10; - */ - boolean hasSourceVersion(); - /** - *
-     * Git commit hash of the code used for the run.
-     * 
- * - * optional string source_version = 10; - */ - java.lang.String getSourceVersion(); - /** - *
-     * Git commit hash of the code used for the run.
-     * 
- * - * optional string source_version = 10; - */ - com.google.protobuf.ByteString - getSourceVersionBytes(); - - /** - *
-     * Name of the entry point for the run.
-     * 
- * - * optional string entry_point_name = 11; - */ - boolean hasEntryPointName(); - /** - *
-     * Name of the entry point for the run.
-     * 
- * - * optional string entry_point_name = 11; - */ - java.lang.String getEntryPointName(); - /** - *
-     * Name of the entry point for the run.
-     * 
- * - * optional string entry_point_name = 11; - */ - com.google.protobuf.ByteString - getEntryPointNameBytes(); - /** *
      * URI of the directory where artifacts should be uploaded.
@@ -6175,17 +6218,13 @@ private RunInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) {
       super(builder);
     }
     private RunInfo() {
+      runId_ = "";
       runUuid_ = "";
-      experimentId_ = 0L;
-      name_ = "";
-      sourceType_ = 1;
-      sourceName_ = "";
+      experimentId_ = "";
       userId_ = "";
       status_ = 1;
       startTime_ = 0L;
       endTime_ = 0L;
-      sourceVersion_ = "";
-      entryPointName_ = "";
       artifactUri_ = "";
       lifecycleStage_ = "";
     }
@@ -6216,42 +6255,19 @@ private RunInfo(
               break;
             case 10: {
               com.google.protobuf.ByteString bs = input.readBytes();
-              bitField0_ |= 0x00000001;
-              runUuid_ = bs;
-              break;
-            }
-            case 16: {
               bitField0_ |= 0x00000002;
-              experimentId_ = input.readInt64();
+              runUuid_ = bs;
               break;
             }
-            case 26: {
+            case 18: {
               com.google.protobuf.ByteString bs = input.readBytes();
               bitField0_ |= 0x00000004;
-              name_ = bs;
-              break;
-            }
-            case 32: {
-              int rawValue = input.readEnum();
-                @SuppressWarnings("deprecation")
-              org.mlflow.api.proto.Service.SourceType value = org.mlflow.api.proto.Service.SourceType.valueOf(rawValue);
-              if (value == null) {
-                unknownFields.mergeVarintField(4, rawValue);
-              } else {
-                bitField0_ |= 0x00000008;
-                sourceType_ = rawValue;
-              }
-              break;
-            }
-            case 42: {
-              com.google.protobuf.ByteString bs = input.readBytes();
-              bitField0_ |= 0x00000010;
-              sourceName_ = bs;
+              experimentId_ = bs;
               break;
             }
             case 50: {
               com.google.protobuf.ByteString bs = input.readBytes();
-              bitField0_ |= 0x00000020;
+              bitField0_ |= 0x00000008;
               userId_ = bs;
               break;
             }
@@ -6262,45 +6278,39 @@ private RunInfo(
               if (value == null) {
                 unknownFields.mergeVarintField(7, rawValue);
               } else {
-                bitField0_ |= 0x00000040;
+                bitField0_ |= 0x00000010;
                 status_ = rawValue;
               }
               break;
             }
             case 64: {
-              bitField0_ |= 0x00000080;
+              bitField0_ |= 0x00000020;
               startTime_ = input.readInt64();
               break;
             }
             case 72: {
-              bitField0_ |= 0x00000100;
+              bitField0_ |= 0x00000040;
               endTime_ = input.readInt64();
               break;
             }
-            case 82: {
-              com.google.protobuf.ByteString bs = input.readBytes();
-              bitField0_ |= 0x00000200;
-              sourceVersion_ = bs;
-              break;
-            }
-            case 90: {
-              com.google.protobuf.ByteString bs = input.readBytes();
-              bitField0_ |= 0x00000400;
-              entryPointName_ = bs;
-              break;
-            }
             case 106: {
               com.google.protobuf.ByteString bs = input.readBytes();
-              bitField0_ |= 0x00000800;
+              bitField0_ |= 0x00000080;
               artifactUri_ = bs;
               break;
             }
             case 114: {
               com.google.protobuf.ByteString bs = input.readBytes();
-              bitField0_ |= 0x00001000;
+              bitField0_ |= 0x00000100;
               lifecycleStage_ = bs;
               break;
             }
+            case 122: {
+              com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000001;
+              runId_ = bs;
+              break;
+            }
             default: {
               if (!parseUnknownField(
                   input, unknownFields, extensionRegistry, tag)) {
@@ -6334,16 +6344,16 @@ private RunInfo(
     }
 
     private int bitField0_;
-    public static final int RUN_UUID_FIELD_NUMBER = 1;
-    private volatile java.lang.Object runUuid_;
+    public static final int RUN_ID_FIELD_NUMBER = 15;
+    private volatile java.lang.Object runId_;
     /**
      * 
      * Unique identifier for the run.
      * 
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ - public boolean hasRunUuid() { + public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** @@ -6351,10 +6361,10 @@ public boolean hasRunUuid() { * Unique identifier for the run. *
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; + public java.lang.String getRunId() { + java.lang.Object ref = runId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -6362,7 +6372,7 @@ public java.lang.String getRunUuid() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - runUuid_ = s; + runId_ = s; } return s; } @@ -6372,66 +6382,45 @@ public java.lang.String getRunUuid() { * Unique identifier for the run. *
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; + getRunIdBytes() { + java.lang.Object ref = runId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - runUuid_ = b; + runId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int EXPERIMENT_ID_FIELD_NUMBER = 2; - private long experimentId_; + public static final int RUN_UUID_FIELD_NUMBER = 1; + private volatile java.lang.Object runUuid_; /** *
-     * The experiment ID.
+     * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional int64 experiment_id = 2; + * optional string run_uuid = 1; */ - public boolean hasExperimentId() { + public boolean hasRunUuid() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * The experiment ID.
+     * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional int64 experiment_id = 2; - */ - public long getExperimentId() { - return experimentId_; - } - - public static final int NAME_FIELD_NUMBER = 3; - private volatile java.lang.Object name_; - /** - *
-     * Human readable name that identifies this run.
-     * 
- * - * optional string name = 3; - */ - public boolean hasName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - *
-     * Human readable name that identifies this run.
-     * 
- * - * optional string name = 3; + * optional string run_uuid = 1; */ - public java.lang.String getName() { - java.lang.Object ref = name_; + public java.lang.String getRunUuid() { + java.lang.Object ref = runUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -6439,78 +6428,54 @@ public java.lang.String getName() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - name_ = s; + runUuid_ = s; } return s; } } /** *
-     * Human readable name that identifies this run.
+     * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string name = 3; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; + getRunUuidBytes() { + java.lang.Object ref = runUuid_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - name_ = b; + runUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int SOURCE_TYPE_FIELD_NUMBER = 4; - private int sourceType_; - /** - *
-     * Source type.
-     * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public boolean hasSourceType() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - *
-     * Source type.
-     * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public org.mlflow.api.proto.Service.SourceType getSourceType() { - @SuppressWarnings("deprecation") - org.mlflow.api.proto.Service.SourceType result = org.mlflow.api.proto.Service.SourceType.valueOf(sourceType_); - return result == null ? org.mlflow.api.proto.Service.SourceType.NOTEBOOK : result; - } - - public static final int SOURCE_NAME_FIELD_NUMBER = 5; - private volatile java.lang.Object sourceName_; + public static final int EXPERIMENT_ID_FIELD_NUMBER = 2; + private volatile java.lang.Object experimentId_; /** *
-     * Source identifier: GitHub URL, name of notebook, name of job, etc.
+     * The experiment ID.
      * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ - public boolean hasSourceName() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public boolean hasExperimentId() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
-     * Source identifier: GitHub URL, name of notebook, name of job, etc.
+     * The experiment ID.
      * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ - public java.lang.String getSourceName() { - java.lang.Object ref = sourceName_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -6518,26 +6483,26 @@ public java.lang.String getSourceName() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - sourceName_ = s; + experimentId_ = s; } return s; } } /** *
-     * Source identifier: GitHub URL, name of notebook, name of job, etc.
+     * The experiment ID.
      * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ public com.google.protobuf.ByteString - getSourceNameBytes() { - java.lang.Object ref = sourceName_; + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - sourceName_ = b; + experimentId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -6549,16 +6514,20 @@ public java.lang.String getSourceName() { /** *
      * User who initiated the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* * optional string user_id = 6; */ public boolean hasUserId() { - return ((bitField0_ & 0x00000020) == 0x00000020); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
      * User who initiated the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* * optional string user_id = 6; @@ -6580,6 +6549,8 @@ public java.lang.String getUserId() { /** *
      * User who initiated the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* * optional string user_id = 6; @@ -6608,7 +6579,7 @@ public java.lang.String getUserId() { * optional .mlflow.RunStatus status = 7; */ public boolean hasStatus() { - return ((bitField0_ & 0x00000040) == 0x00000040); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** *
@@ -6633,7 +6604,7 @@ public org.mlflow.api.proto.Service.RunStatus getStatus() {
      * optional int64 start_time = 8;
      */
     public boolean hasStartTime() {
-      return ((bitField0_ & 0x00000080) == 0x00000080);
+      return ((bitField0_ & 0x00000020) == 0x00000020);
     }
     /**
      * 
@@ -6656,7 +6627,7 @@ public long getStartTime() {
      * optional int64 end_time = 9;
      */
     public boolean hasEndTime() {
-      return ((bitField0_ & 0x00000100) == 0x00000100);
+      return ((bitField0_ & 0x00000040) == 0x00000040);
     }
     /**
      * 
@@ -6669,114 +6640,6 @@ public long getEndTime() {
       return endTime_;
     }
 
-    public static final int SOURCE_VERSION_FIELD_NUMBER = 10;
-    private volatile java.lang.Object sourceVersion_;
-    /**
-     * 
-     * Git commit hash of the code used for the run.
-     * 
- * - * optional string source_version = 10; - */ - public boolean hasSourceVersion() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - *
-     * Git commit hash of the code used for the run.
-     * 
- * - * optional string source_version = 10; - */ - public java.lang.String getSourceVersion() { - java.lang.Object ref = sourceVersion_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - sourceVersion_ = s; - } - return s; - } - } - /** - *
-     * Git commit hash of the code used for the run.
-     * 
- * - * optional string source_version = 10; - */ - public com.google.protobuf.ByteString - getSourceVersionBytes() { - java.lang.Object ref = sourceVersion_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - sourceVersion_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int ENTRY_POINT_NAME_FIELD_NUMBER = 11; - private volatile java.lang.Object entryPointName_; - /** - *
-     * Name of the entry point for the run.
-     * 
- * - * optional string entry_point_name = 11; - */ - public boolean hasEntryPointName() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - *
-     * Name of the entry point for the run.
-     * 
- * - * optional string entry_point_name = 11; - */ - public java.lang.String getEntryPointName() { - java.lang.Object ref = entryPointName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - entryPointName_ = s; - } - return s; - } - } - /** - *
-     * Name of the entry point for the run.
-     * 
- * - * optional string entry_point_name = 11; - */ - public com.google.protobuf.ByteString - getEntryPointNameBytes() { - java.lang.Object ref = entryPointName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - entryPointName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - public static final int ARTIFACT_URI_FIELD_NUMBER = 13; private volatile java.lang.Object artifactUri_; /** @@ -6790,7 +6653,7 @@ public java.lang.String getEntryPointName() { * optional string artifact_uri = 13; */ public boolean hasArtifactUri() { - return ((bitField0_ & 0x00000800) == 0x00000800); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** *
@@ -6850,7 +6713,7 @@ public java.lang.String getArtifactUri() {
      * optional string lifecycle_stage = 14;
      */
     public boolean hasLifecycleStage() {
-      return ((bitField0_ & 0x00001000) == 0x00001000);
+      return ((bitField0_ & 0x00000100) == 0x00000100);
     }
     /**
      * 
@@ -6908,45 +6771,33 @@ public final boolean isInitialized() {
     @java.lang.Override
     public void writeTo(com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_);
-      }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeInt64(2, experimentId_);
+        com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        com.google.protobuf.GeneratedMessageV3.writeString(output, 3, name_);
+        com.google.protobuf.GeneratedMessageV3.writeString(output, 2, experimentId_);
       }
       if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        output.writeEnum(4, sourceType_);
-      }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        com.google.protobuf.GeneratedMessageV3.writeString(output, 5, sourceName_);
-      }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
         com.google.protobuf.GeneratedMessageV3.writeString(output, 6, userId_);
       }
-      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
         output.writeEnum(7, status_);
       }
-      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
         output.writeInt64(8, startTime_);
       }
-      if (((bitField0_ & 0x00000100) == 0x00000100)) {
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
         output.writeInt64(9, endTime_);
       }
-      if (((bitField0_ & 0x00000200) == 0x00000200)) {
-        com.google.protobuf.GeneratedMessageV3.writeString(output, 10, sourceVersion_);
-      }
-      if (((bitField0_ & 0x00000400) == 0x00000400)) {
-        com.google.protobuf.GeneratedMessageV3.writeString(output, 11, entryPointName_);
-      }
-      if (((bitField0_ & 0x00000800) == 0x00000800)) {
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
         com.google.protobuf.GeneratedMessageV3.writeString(output, 13, artifactUri_);
       }
-      if (((bitField0_ & 0x00001000) == 0x00001000)) {
+      if (((bitField0_ & 0x00000100) == 0x00000100)) {
         com.google.protobuf.GeneratedMessageV3.writeString(output, 14, lifecycleStage_);
       }
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        com.google.protobuf.GeneratedMessageV3.writeString(output, 15, runId_);
+      }
       unknownFields.writeTo(output);
     }
 
@@ -6956,50 +6807,36 @@ public int getSerializedSize() {
       if (size != -1) return size;
 
       size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_);
-      }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeInt64Size(2, experimentId_);
+        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, name_);
+        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, experimentId_);
       }
       if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeEnumSize(4, sourceType_);
-      }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, sourceName_);
-      }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
         size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, userId_);
       }
-      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
         size += com.google.protobuf.CodedOutputStream
           .computeEnumSize(7, status_);
       }
-      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
         size += com.google.protobuf.CodedOutputStream
           .computeInt64Size(8, startTime_);
       }
-      if (((bitField0_ & 0x00000100) == 0x00000100)) {
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
         size += com.google.protobuf.CodedOutputStream
           .computeInt64Size(9, endTime_);
       }
-      if (((bitField0_ & 0x00000200) == 0x00000200)) {
-        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(10, sourceVersion_);
-      }
-      if (((bitField0_ & 0x00000400) == 0x00000400)) {
-        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(11, entryPointName_);
-      }
-      if (((bitField0_ & 0x00000800) == 0x00000800)) {
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
         size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, artifactUri_);
       }
-      if (((bitField0_ & 0x00001000) == 0x00001000)) {
+      if (((bitField0_ & 0x00000100) == 0x00000100)) {
         size += com.google.protobuf.GeneratedMessageV3.computeStringSize(14, lifecycleStage_);
       }
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(15, runId_);
+      }
       size += unknownFields.getSerializedSize();
       memoizedSize = size;
       return size;
@@ -7016,6 +6853,11 @@ public boolean equals(final java.lang.Object obj) {
       org.mlflow.api.proto.Service.RunInfo other = (org.mlflow.api.proto.Service.RunInfo) obj;
 
       boolean result = true;
+      result = result && (hasRunId() == other.hasRunId());
+      if (hasRunId()) {
+        result = result && getRunId()
+            .equals(other.getRunId());
+      }
       result = result && (hasRunUuid() == other.hasRunUuid());
       if (hasRunUuid()) {
         result = result && getRunUuid()
@@ -7023,22 +6865,8 @@ public boolean equals(final java.lang.Object obj) {
       }
       result = result && (hasExperimentId() == other.hasExperimentId());
       if (hasExperimentId()) {
-        result = result && (getExperimentId()
-            == other.getExperimentId());
-      }
-      result = result && (hasName() == other.hasName());
-      if (hasName()) {
-        result = result && getName()
-            .equals(other.getName());
-      }
-      result = result && (hasSourceType() == other.hasSourceType());
-      if (hasSourceType()) {
-        result = result && sourceType_ == other.sourceType_;
-      }
-      result = result && (hasSourceName() == other.hasSourceName());
-      if (hasSourceName()) {
-        result = result && getSourceName()
-            .equals(other.getSourceName());
+        result = result && getExperimentId()
+            .equals(other.getExperimentId());
       }
       result = result && (hasUserId() == other.hasUserId());
       if (hasUserId()) {
@@ -7059,16 +6887,6 @@ public boolean equals(final java.lang.Object obj) {
         result = result && (getEndTime()
             == other.getEndTime());
       }
-      result = result && (hasSourceVersion() == other.hasSourceVersion());
-      if (hasSourceVersion()) {
-        result = result && getSourceVersion()
-            .equals(other.getSourceVersion());
-      }
-      result = result && (hasEntryPointName() == other.hasEntryPointName());
-      if (hasEntryPointName()) {
-        result = result && getEntryPointName()
-            .equals(other.getEntryPointName());
-      }
       result = result && (hasArtifactUri() == other.hasArtifactUri());
       if (hasArtifactUri()) {
         result = result && getArtifactUri()
@@ -7090,26 +6908,17 @@ public int hashCode() {
       }
       int hash = 41;
       hash = (19 * hash) + getDescriptor().hashCode();
+      if (hasRunId()) {
+        hash = (37 * hash) + RUN_ID_FIELD_NUMBER;
+        hash = (53 * hash) + getRunId().hashCode();
+      }
       if (hasRunUuid()) {
         hash = (37 * hash) + RUN_UUID_FIELD_NUMBER;
         hash = (53 * hash) + getRunUuid().hashCode();
       }
       if (hasExperimentId()) {
         hash = (37 * hash) + EXPERIMENT_ID_FIELD_NUMBER;
-        hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
-            getExperimentId());
-      }
-      if (hasName()) {
-        hash = (37 * hash) + NAME_FIELD_NUMBER;
-        hash = (53 * hash) + getName().hashCode();
-      }
-      if (hasSourceType()) {
-        hash = (37 * hash) + SOURCE_TYPE_FIELD_NUMBER;
-        hash = (53 * hash) + sourceType_;
-      }
-      if (hasSourceName()) {
-        hash = (37 * hash) + SOURCE_NAME_FIELD_NUMBER;
-        hash = (53 * hash) + getSourceName().hashCode();
+        hash = (53 * hash) + getExperimentId().hashCode();
       }
       if (hasUserId()) {
         hash = (37 * hash) + USER_ID_FIELD_NUMBER;
@@ -7129,14 +6938,6 @@ public int hashCode() {
         hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
             getEndTime());
       }
-      if (hasSourceVersion()) {
-        hash = (37 * hash) + SOURCE_VERSION_FIELD_NUMBER;
-        hash = (53 * hash) + getSourceVersion().hashCode();
-      }
-      if (hasEntryPointName()) {
-        hash = (37 * hash) + ENTRY_POINT_NAME_FIELD_NUMBER;
-        hash = (53 * hash) + getEntryPointName().hashCode();
-      }
       if (hasArtifactUri()) {
         hash = (37 * hash) + ARTIFACT_URI_FIELD_NUMBER;
         hash = (53 * hash) + getArtifactUri().hashCode();
@@ -7282,32 +7083,24 @@ private void maybeForceBuilderInitialization() {
       @java.lang.Override
       public Builder clear() {
         super.clear();
-        runUuid_ = "";
+        runId_ = "";
         bitField0_ = (bitField0_ & ~0x00000001);
-        experimentId_ = 0L;
+        runUuid_ = "";
         bitField0_ = (bitField0_ & ~0x00000002);
-        name_ = "";
+        experimentId_ = "";
         bitField0_ = (bitField0_ & ~0x00000004);
-        sourceType_ = 1;
-        bitField0_ = (bitField0_ & ~0x00000008);
-        sourceName_ = "";
-        bitField0_ = (bitField0_ & ~0x00000010);
         userId_ = "";
-        bitField0_ = (bitField0_ & ~0x00000020);
+        bitField0_ = (bitField0_ & ~0x00000008);
         status_ = 1;
-        bitField0_ = (bitField0_ & ~0x00000040);
+        bitField0_ = (bitField0_ & ~0x00000010);
         startTime_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000080);
+        bitField0_ = (bitField0_ & ~0x00000020);
         endTime_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000100);
-        sourceVersion_ = "";
-        bitField0_ = (bitField0_ & ~0x00000200);
-        entryPointName_ = "";
-        bitField0_ = (bitField0_ & ~0x00000400);
+        bitField0_ = (bitField0_ & ~0x00000040);
         artifactUri_ = "";
-        bitField0_ = (bitField0_ & ~0x00000800);
+        bitField0_ = (bitField0_ & ~0x00000080);
         lifecycleStage_ = "";
-        bitField0_ = (bitField0_ & ~0x00001000);
+        bitField0_ = (bitField0_ & ~0x00000100);
         return this;
       }
 
@@ -7339,54 +7132,38 @@ public org.mlflow.api.proto.Service.RunInfo buildPartial() {
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        result.runUuid_ = runUuid_;
+        result.runId_ = runId_;
         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
           to_bitField0_ |= 0x00000002;
         }
-        result.experimentId_ = experimentId_;
+        result.runUuid_ = runUuid_;
         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
           to_bitField0_ |= 0x00000004;
         }
-        result.name_ = name_;
+        result.experimentId_ = experimentId_;
         if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
           to_bitField0_ |= 0x00000008;
         }
-        result.sourceType_ = sourceType_;
+        result.userId_ = userId_;
         if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
           to_bitField0_ |= 0x00000010;
         }
-        result.sourceName_ = sourceName_;
+        result.status_ = status_;
         if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
           to_bitField0_ |= 0x00000020;
         }
-        result.userId_ = userId_;
+        result.startTime_ = startTime_;
         if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
           to_bitField0_ |= 0x00000040;
         }
-        result.status_ = status_;
+        result.endTime_ = endTime_;
         if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
           to_bitField0_ |= 0x00000080;
         }
-        result.startTime_ = startTime_;
+        result.artifactUri_ = artifactUri_;
         if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
           to_bitField0_ |= 0x00000100;
         }
-        result.endTime_ = endTime_;
-        if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
-          to_bitField0_ |= 0x00000200;
-        }
-        result.sourceVersion_ = sourceVersion_;
-        if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
-          to_bitField0_ |= 0x00000400;
-        }
-        result.entryPointName_ = entryPointName_;
-        if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
-          to_bitField0_ |= 0x00000800;
-        }
-        result.artifactUri_ = artifactUri_;
-        if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
-          to_bitField0_ |= 0x00001000;
-        }
         result.lifecycleStage_ = lifecycleStage_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
@@ -7437,29 +7214,23 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
 
       public Builder mergeFrom(org.mlflow.api.proto.Service.RunInfo other) {
         if (other == org.mlflow.api.proto.Service.RunInfo.getDefaultInstance()) return this;
-        if (other.hasRunUuid()) {
+        if (other.hasRunId()) {
           bitField0_ |= 0x00000001;
+          runId_ = other.runId_;
+          onChanged();
+        }
+        if (other.hasRunUuid()) {
+          bitField0_ |= 0x00000002;
           runUuid_ = other.runUuid_;
           onChanged();
         }
         if (other.hasExperimentId()) {
-          setExperimentId(other.getExperimentId());
-        }
-        if (other.hasName()) {
           bitField0_ |= 0x00000004;
-          name_ = other.name_;
-          onChanged();
-        }
-        if (other.hasSourceType()) {
-          setSourceType(other.getSourceType());
-        }
-        if (other.hasSourceName()) {
-          bitField0_ |= 0x00000010;
-          sourceName_ = other.sourceName_;
+          experimentId_ = other.experimentId_;
           onChanged();
         }
         if (other.hasUserId()) {
-          bitField0_ |= 0x00000020;
+          bitField0_ |= 0x00000008;
           userId_ = other.userId_;
           onChanged();
         }
@@ -7472,23 +7243,13 @@ public Builder mergeFrom(org.mlflow.api.proto.Service.RunInfo other) {
         if (other.hasEndTime()) {
           setEndTime(other.getEndTime());
         }
-        if (other.hasSourceVersion()) {
-          bitField0_ |= 0x00000200;
-          sourceVersion_ = other.sourceVersion_;
-          onChanged();
-        }
-        if (other.hasEntryPointName()) {
-          bitField0_ |= 0x00000400;
-          entryPointName_ = other.entryPointName_;
-          onChanged();
-        }
         if (other.hasArtifactUri()) {
-          bitField0_ |= 0x00000800;
+          bitField0_ |= 0x00000080;
           artifactUri_ = other.artifactUri_;
           onChanged();
         }
         if (other.hasLifecycleStage()) {
-          bitField0_ |= 0x00001000;
+          bitField0_ |= 0x00000100;
           lifecycleStage_ = other.lifecycleStage_;
           onChanged();
         }
@@ -7522,15 +7283,15 @@ public Builder mergeFrom(
       }
       private int bitField0_;
 
-      private java.lang.Object runUuid_ = "";
+      private java.lang.Object runId_ = "";
       /**
        * 
        * Unique identifier for the run.
        * 
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ - public boolean hasRunUuid() { + public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** @@ -7538,16 +7299,16 @@ public boolean hasRunUuid() { * Unique identifier for the run. *
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; + public java.lang.String getRunId() { + java.lang.Object ref = runId_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - runUuid_ = s; + runId_ = s; } return s; } else { @@ -7559,16 +7320,16 @@ public java.lang.String getRunUuid() { * Unique identifier for the run. *
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; + getRunIdBytes() { + java.lang.Object ref = runId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - runUuid_ = b; + runId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -7579,15 +7340,15 @@ public java.lang.String getRunUuid() { * Unique identifier for the run. *
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ - public Builder setRunUuid( + public Builder setRunId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - runUuid_ = value; + runId_ = value; onChanged(); return this; } @@ -7596,11 +7357,11 @@ public Builder setRunUuid( * Unique identifier for the run. *
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ - public Builder clearRunUuid() { + public Builder clearRunId() { bitField0_ = (bitField0_ & ~0x00000001); - runUuid_ = getDefaultInstance().getRunUuid(); + runId_ = getDefaultInstance().getRunId(); onChanged(); return this; } @@ -7609,93 +7370,47 @@ public Builder clearRunUuid() { * Unique identifier for the run. *
* - * optional string run_uuid = 1; + * optional string run_id = 15; */ - public Builder setRunUuidBytes( + public Builder setRunIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - runUuid_ = value; + runId_ = value; onChanged(); return this; } - private long experimentId_ ; + private java.lang.Object runUuid_ = ""; /** *
-       * The experiment ID.
+       * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional int64 experiment_id = 2; + * optional string run_uuid = 1; */ - public boolean hasExperimentId() { + public boolean hasRunUuid() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * The experiment ID.
-       * 
- * - * optional int64 experiment_id = 2; - */ - public long getExperimentId() { - return experimentId_; - } - /** - *
-       * The experiment ID.
-       * 
- * - * optional int64 experiment_id = 2; - */ - public Builder setExperimentId(long value) { - bitField0_ |= 0x00000002; - experimentId_ = value; - onChanged(); - return this; - } - /** - *
-       * The experiment ID.
-       * 
- * - * optional int64 experiment_id = 2; - */ - public Builder clearExperimentId() { - bitField0_ = (bitField0_ & ~0x00000002); - experimentId_ = 0L; - onChanged(); - return this; - } - - private java.lang.Object name_ = ""; - /** - *
-       * Human readable name that identifies this run.
-       * 
- * - * optional string name = 3; - */ - public boolean hasName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - *
-       * Human readable name that identifies this run.
+       * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string name = 3; + * optional string run_uuid = 1; */ - public java.lang.String getName() { - java.lang.Object ref = name_; + public java.lang.String getRunUuid() { + java.lang.Object ref = runUuid_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - name_ = s; + runUuid_ = s; } return s; } else { @@ -7704,19 +7419,20 @@ public java.lang.String getName() { } /** *
-       * Human readable name that identifies this run.
+       * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string name = 3; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString - getNameBytes() { - java.lang.Object ref = name_; + getRunUuidBytes() { + java.lang.Object ref = runUuid_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - name_ = b; + runUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -7724,131 +7440,81 @@ public java.lang.String getName() { } /** *
-       * Human readable name that identifies this run.
+       * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string name = 3; + * optional string run_uuid = 1; */ - public Builder setName( + public Builder setRunUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; - name_ = value; + bitField0_ |= 0x00000002; + runUuid_ = value; onChanged(); return this; } /** *
-       * Human readable name that identifies this run.
+       * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string name = 3; + * optional string run_uuid = 1; */ - public Builder clearName() { - bitField0_ = (bitField0_ & ~0x00000004); - name_ = getDefaultInstance().getName(); + public Builder clearRunUuid() { + bitField0_ = (bitField0_ & ~0x00000002); + runUuid_ = getDefaultInstance().getRunUuid(); onChanged(); return this; } /** *
-       * Human readable name that identifies this run.
+       * [Deprecated, use run_id instead] Unique identifier for the run. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string name = 3; + * optional string run_uuid = 1; */ - public Builder setNameBytes( + public Builder setRunUuidBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; - name_ = value; - onChanged(); - return this; - } - - private int sourceType_ = 1; - /** - *
-       * Source type.
-       * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public boolean hasSourceType() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - *
-       * Source type.
-       * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public org.mlflow.api.proto.Service.SourceType getSourceType() { - @SuppressWarnings("deprecation") - org.mlflow.api.proto.Service.SourceType result = org.mlflow.api.proto.Service.SourceType.valueOf(sourceType_); - return result == null ? org.mlflow.api.proto.Service.SourceType.NOTEBOOK : result; - } - /** - *
-       * Source type.
-       * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public Builder setSourceType(org.mlflow.api.proto.Service.SourceType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - sourceType_ = value.getNumber(); - onChanged(); - return this; - } - /** - *
-       * Source type.
-       * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public Builder clearSourceType() { - bitField0_ = (bitField0_ & ~0x00000008); - sourceType_ = 1; + bitField0_ |= 0x00000002; + runUuid_ = value; onChanged(); return this; } - private java.lang.Object sourceName_ = ""; + private java.lang.Object experimentId_ = ""; /** *
-       * Source identifier: GitHub URL, name of notebook, name of job, etc.
+       * The experiment ID.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ - public boolean hasSourceName() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public boolean hasExperimentId() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
-       * Source identifier: GitHub URL, name of notebook, name of job, etc.
+       * The experiment ID.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ - public java.lang.String getSourceName() { - java.lang.Object ref = sourceName_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - sourceName_ = s; + experimentId_ = s; } return s; } else { @@ -7857,19 +7523,19 @@ public java.lang.String getSourceName() { } /** *
-       * Source identifier: GitHub URL, name of notebook, name of job, etc.
+       * The experiment ID.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ public com.google.protobuf.ByteString - getSourceNameBytes() { - java.lang.Object ref = sourceName_; + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - sourceName_ = b; + experimentId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -7877,48 +7543,48 @@ public java.lang.String getSourceName() { } /** *
-       * Source identifier: GitHub URL, name of notebook, name of job, etc.
+       * The experiment ID.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ - public Builder setSourceName( + public Builder setExperimentId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000010; - sourceName_ = value; + bitField0_ |= 0x00000004; + experimentId_ = value; onChanged(); return this; } /** *
-       * Source identifier: GitHub URL, name of notebook, name of job, etc.
+       * The experiment ID.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ - public Builder clearSourceName() { - bitField0_ = (bitField0_ & ~0x00000010); - sourceName_ = getDefaultInstance().getSourceName(); + public Builder clearExperimentId() { + bitField0_ = (bitField0_ & ~0x00000004); + experimentId_ = getDefaultInstance().getExperimentId(); onChanged(); return this; } /** *
-       * Source identifier: GitHub URL, name of notebook, name of job, etc.
+       * The experiment ID.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 2; */ - public Builder setSourceNameBytes( + public Builder setExperimentIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000010; - sourceName_ = value; + bitField0_ |= 0x00000004; + experimentId_ = value; onChanged(); return this; } @@ -7927,16 +7593,20 @@ public Builder setSourceNameBytes( /** *
        * User who initiated the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* * optional string user_id = 6; */ public boolean hasUserId() { - return ((bitField0_ & 0x00000020) == 0x00000020); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
        * User who initiated the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* * optional string user_id = 6; @@ -7958,6 +7628,8 @@ public java.lang.String getUserId() { /** *
        * User who initiated the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* * optional string user_id = 6; @@ -7978,6 +7650,8 @@ public java.lang.String getUserId() { /** *
        * User who initiated the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* * optional string user_id = 6; @@ -7987,7 +7661,7 @@ public Builder setUserId( if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000008; userId_ = value; onChanged(); return this; @@ -7995,12 +7669,14 @@ public Builder setUserId( /** *
        * User who initiated the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* * optional string user_id = 6; */ public Builder clearUserId() { - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000008); userId_ = getDefaultInstance().getUserId(); onChanged(); return this; @@ -8008,6 +7684,8 @@ public Builder clearUserId() { /** *
        * User who initiated the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* * optional string user_id = 6; @@ -8017,7 +7695,7 @@ public Builder setUserIdBytes( if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000008; userId_ = value; onChanged(); return this; @@ -8032,7 +7710,7 @@ public Builder setUserIdBytes( * optional .mlflow.RunStatus status = 7; */ public boolean hasStatus() { - return ((bitField0_ & 0x00000040) == 0x00000040); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** *
@@ -8057,7 +7735,7 @@ public Builder setStatus(org.mlflow.api.proto.Service.RunStatus value) {
         if (value == null) {
           throw new NullPointerException();
         }
-        bitField0_ |= 0x00000040;
+        bitField0_ |= 0x00000010;
         status_ = value.getNumber();
         onChanged();
         return this;
@@ -8070,7 +7748,7 @@ public Builder setStatus(org.mlflow.api.proto.Service.RunStatus value) {
        * optional .mlflow.RunStatus status = 7;
        */
       public Builder clearStatus() {
-        bitField0_ = (bitField0_ & ~0x00000040);
+        bitField0_ = (bitField0_ & ~0x00000010);
         status_ = 1;
         onChanged();
         return this;
@@ -8085,7 +7763,7 @@ public Builder clearStatus() {
        * optional int64 start_time = 8;
        */
       public boolean hasStartTime() {
-        return ((bitField0_ & 0x00000080) == 0x00000080);
+        return ((bitField0_ & 0x00000020) == 0x00000020);
       }
       /**
        * 
@@ -8105,7 +7783,7 @@ public long getStartTime() {
        * optional int64 start_time = 8;
        */
       public Builder setStartTime(long value) {
-        bitField0_ |= 0x00000080;
+        bitField0_ |= 0x00000020;
         startTime_ = value;
         onChanged();
         return this;
@@ -8118,7 +7796,7 @@ public Builder setStartTime(long value) {
        * optional int64 start_time = 8;
        */
       public Builder clearStartTime() {
-        bitField0_ = (bitField0_ & ~0x00000080);
+        bitField0_ = (bitField0_ & ~0x00000020);
         startTime_ = 0L;
         onChanged();
         return this;
@@ -8133,7 +7811,7 @@ public Builder clearStartTime() {
        * optional int64 end_time = 9;
        */
       public boolean hasEndTime() {
-        return ((bitField0_ & 0x00000100) == 0x00000100);
+        return ((bitField0_ & 0x00000040) == 0x00000040);
       }
       /**
        * 
@@ -8153,7 +7831,7 @@ public long getEndTime() {
        * optional int64 end_time = 9;
        */
       public Builder setEndTime(long value) {
-        bitField0_ |= 0x00000100;
+        bitField0_ |= 0x00000040;
         endTime_ = value;
         onChanged();
         return this;
@@ -8166,212 +7844,12 @@ public Builder setEndTime(long value) {
        * optional int64 end_time = 9;
        */
       public Builder clearEndTime() {
-        bitField0_ = (bitField0_ & ~0x00000100);
+        bitField0_ = (bitField0_ & ~0x00000040);
         endTime_ = 0L;
         onChanged();
         return this;
       }
 
-      private java.lang.Object sourceVersion_ = "";
-      /**
-       * 
-       * Git commit hash of the code used for the run.
-       * 
- * - * optional string source_version = 10; - */ - public boolean hasSourceVersion() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - *
-       * Git commit hash of the code used for the run.
-       * 
- * - * optional string source_version = 10; - */ - public java.lang.String getSourceVersion() { - java.lang.Object ref = sourceVersion_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - sourceVersion_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * Git commit hash of the code used for the run.
-       * 
- * - * optional string source_version = 10; - */ - public com.google.protobuf.ByteString - getSourceVersionBytes() { - java.lang.Object ref = sourceVersion_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - sourceVersion_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * Git commit hash of the code used for the run.
-       * 
- * - * optional string source_version = 10; - */ - public Builder setSourceVersion( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000200; - sourceVersion_ = value; - onChanged(); - return this; - } - /** - *
-       * Git commit hash of the code used for the run.
-       * 
- * - * optional string source_version = 10; - */ - public Builder clearSourceVersion() { - bitField0_ = (bitField0_ & ~0x00000200); - sourceVersion_ = getDefaultInstance().getSourceVersion(); - onChanged(); - return this; - } - /** - *
-       * Git commit hash of the code used for the run.
-       * 
- * - * optional string source_version = 10; - */ - public Builder setSourceVersionBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000200; - sourceVersion_ = value; - onChanged(); - return this; - } - - private java.lang.Object entryPointName_ = ""; - /** - *
-       * Name of the entry point for the run.
-       * 
- * - * optional string entry_point_name = 11; - */ - public boolean hasEntryPointName() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - *
-       * Name of the entry point for the run.
-       * 
- * - * optional string entry_point_name = 11; - */ - public java.lang.String getEntryPointName() { - java.lang.Object ref = entryPointName_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - entryPointName_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * Name of the entry point for the run.
-       * 
- * - * optional string entry_point_name = 11; - */ - public com.google.protobuf.ByteString - getEntryPointNameBytes() { - java.lang.Object ref = entryPointName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - entryPointName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * Name of the entry point for the run.
-       * 
- * - * optional string entry_point_name = 11; - */ - public Builder setEntryPointName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - entryPointName_ = value; - onChanged(); - return this; - } - /** - *
-       * Name of the entry point for the run.
-       * 
- * - * optional string entry_point_name = 11; - */ - public Builder clearEntryPointName() { - bitField0_ = (bitField0_ & ~0x00000400); - entryPointName_ = getDefaultInstance().getEntryPointName(); - onChanged(); - return this; - } - /** - *
-       * Name of the entry point for the run.
-       * 
- * - * optional string entry_point_name = 11; - */ - public Builder setEntryPointNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - entryPointName_ = value; - onChanged(); - return this; - } - private java.lang.Object artifactUri_ = ""; /** *
@@ -8384,7 +7862,7 @@ public Builder setEntryPointNameBytes(
        * optional string artifact_uri = 13;
        */
       public boolean hasArtifactUri() {
-        return ((bitField0_ & 0x00000800) == 0x00000800);
+        return ((bitField0_ & 0x00000080) == 0x00000080);
       }
       /**
        * 
@@ -8448,7 +7926,7 @@ public Builder setArtifactUri(
         if (value == null) {
     throw new NullPointerException();
   }
-  bitField0_ |= 0x00000800;
+  bitField0_ |= 0x00000080;
         artifactUri_ = value;
         onChanged();
         return this;
@@ -8464,7 +7942,7 @@ public Builder setArtifactUri(
        * optional string artifact_uri = 13;
        */
       public Builder clearArtifactUri() {
-        bitField0_ = (bitField0_ & ~0x00000800);
+        bitField0_ = (bitField0_ & ~0x00000080);
         artifactUri_ = getDefaultInstance().getArtifactUri();
         onChanged();
         return this;
@@ -8484,7 +7962,7 @@ public Builder setArtifactUriBytes(
         if (value == null) {
     throw new NullPointerException();
   }
-  bitField0_ |= 0x00000800;
+  bitField0_ |= 0x00000080;
         artifactUri_ = value;
         onChanged();
         return this;
@@ -8499,7 +7977,7 @@ public Builder setArtifactUriBytes(
        * optional string lifecycle_stage = 14;
        */
       public boolean hasLifecycleStage() {
-        return ((bitField0_ & 0x00001000) == 0x00001000);
+        return ((bitField0_ & 0x00000100) == 0x00000100);
       }
       /**
        * 
@@ -8554,7 +8032,7 @@ public Builder setLifecycleStage(
         if (value == null) {
     throw new NullPointerException();
   }
-  bitField0_ |= 0x00001000;
+  bitField0_ |= 0x00000100;
         lifecycleStage_ = value;
         onChanged();
         return this;
@@ -8567,7 +8045,7 @@ public Builder setLifecycleStage(
        * optional string lifecycle_stage = 14;
        */
       public Builder clearLifecycleStage() {
-        bitField0_ = (bitField0_ & ~0x00001000);
+        bitField0_ = (bitField0_ & ~0x00000100);
         lifecycleStage_ = getDefaultInstance().getLifecycleStage();
         onChanged();
         return this;
@@ -8584,7 +8062,7 @@ public Builder setLifecycleStageBytes(
         if (value == null) {
     throw new NullPointerException();
   }
-  bitField0_ |= 0x00001000;
+  bitField0_ |= 0x00000100;
         lifecycleStage_ = value;
         onChanged();
         return this;
@@ -8651,7 +8129,7 @@ public interface ExperimentOrBuilder extends
      * Unique identifier for the experiment.
      * 
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ boolean hasExperimentId(); /** @@ -8659,9 +8137,18 @@ public interface ExperimentOrBuilder extends * Unique identifier for the experiment. *
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ - long getExperimentId(); + java.lang.String getExperimentId(); + /** + *
+     * Unique identifier for the experiment.
+     * 
+ * + * optional string experiment_id = 1; + */ + com.google.protobuf.ByteString + getExperimentIdBytes(); /** *
@@ -8795,7 +8282,7 @@ private Experiment(com.google.protobuf.GeneratedMessageV3.Builder builder) {
       super(builder);
     }
     private Experiment() {
-      experimentId_ = 0L;
+      experimentId_ = "";
       name_ = "";
       artifactLocation_ = "";
       lifecycleStage_ = "";
@@ -8827,9 +8314,10 @@ private Experiment(
             case 0:
               done = true;
               break;
-            case 8: {
+            case 10: {
+              com.google.protobuf.ByteString bs = input.readBytes();
               bitField0_ |= 0x00000001;
-              experimentId_ = input.readInt64();
+              experimentId_ = bs;
               break;
             }
             case 18: {
@@ -8894,13 +8382,13 @@ private Experiment(
 
     private int bitField0_;
     public static final int EXPERIMENT_ID_FIELD_NUMBER = 1;
-    private long experimentId_;
+    private volatile java.lang.Object experimentId_;
     /**
      * 
      * Unique identifier for the experiment.
      * 
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -8910,10 +8398,41 @@ public boolean hasExperimentId() { * Unique identifier for the experiment. *
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } + } + /** + *
+     * Unique identifier for the experiment.
+     * 
+ * + * optional string experiment_id = 1; + */ + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } public static final int NAME_FIELD_NUMBER = 2; @@ -9142,7 +8661,7 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, experimentId_); + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, experimentId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_); @@ -9169,8 +8688,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, experimentId_); + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, experimentId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_); @@ -9207,8 +8725,8 @@ public boolean equals(final java.lang.Object obj) { boolean result = true; result = result && (hasExperimentId() == other.hasExperimentId()); if (hasExperimentId()) { - result = result && (getExperimentId() - == other.getExperimentId()); + result = result && getExperimentId() + .equals(other.getExperimentId()); } result = result && (hasName() == other.hasName()); if (hasName()) { @@ -9248,8 +8766,7 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); if (hasExperimentId()) { hash = (37 * hash) + EXPERIMENT_ID_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getExperimentId()); + hash = (53 * hash) + getExperimentId().hashCode(); } if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; @@ -9410,7 +8927,7 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - experimentId_ = 0L; + experimentId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); name_ = ""; bitField0_ = (bitField0_ & ~0x00000002); @@ -9524,7 +9041,9 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.Experiment other) { if (other == org.mlflow.api.proto.Service.Experiment.getDefaultInstance()) return this; if (other.hasExperimentId()) { - setExperimentId(other.getExperimentId()); + bitField0_ |= 0x00000001; + experimentId_ = other.experimentId_; + onChanged(); } if (other.hasName()) { bitField0_ |= 0x00000002; @@ -9577,13 +9096,13 @@ public Builder mergeFrom( } private int bitField0_; - private long experimentId_ ; + private java.lang.Object experimentId_ = ""; /** *
        * Unique identifier for the experiment.
        * 
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -9593,20 +9112,55 @@ public boolean hasExperimentId() { * Unique identifier for the experiment. *
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } } /** *
        * Unique identifier for the experiment.
        * 
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ - public Builder setExperimentId(long value) { - bitField0_ |= 0x00000001; + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * Unique identifier for the experiment.
+       * 
+ * + * optional string experiment_id = 1; + */ + public Builder setExperimentId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; experimentId_ = value; onChanged(); return this; @@ -9616,11 +9170,28 @@ public Builder setExperimentId(long value) { * Unique identifier for the experiment. *
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ public Builder clearExperimentId() { bitField0_ = (bitField0_ & ~0x00000001); - experimentId_ = 0L; + experimentId_ = getDefaultInstance().getExperimentId(); + onChanged(); + return this; + } + /** + *
+       * Unique identifier for the experiment.
+       * 
+ * + * optional string experiment_id = 1; + */ + public Builder setExperimentIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + experimentId_ = value; onChanged(); return this; } @@ -10088,7 +9659,7 @@ public interface CreateExperimentOrBuilder extends * Experiment name. *
* - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ boolean hasName(); /** @@ -10096,7 +9667,7 @@ public interface CreateExperimentOrBuilder extends * Experiment name. *
* - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ java.lang.String getName(); /** @@ -10104,7 +9675,7 @@ public interface CreateExperimentOrBuilder extends * Experiment name. *
* - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ com.google.protobuf.ByteString getNameBytes(); @@ -10232,7 +9803,7 @@ public interface ResponseOrBuilder extends * Unique identifier for the experiment. *
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ boolean hasExperimentId(); /** @@ -10240,9 +9811,18 @@ public interface ResponseOrBuilder extends * Unique identifier for the experiment. *
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; + */ + java.lang.String getExperimentId(); + /** + *
+       * Unique identifier for the experiment.
+       * 
+ * + * optional string experiment_id = 1; */ - long getExperimentId(); + com.google.protobuf.ByteString + getExperimentIdBytes(); } /** * Protobuf type {@code mlflow.CreateExperiment.Response} @@ -10257,7 +9837,7 @@ private Response(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private Response() { - experimentId_ = 0L; + experimentId_ = ""; } @java.lang.Override @@ -10284,9 +9864,10 @@ private Response( case 0: done = true; break; - case 8: { + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - experimentId_ = input.readInt64(); + experimentId_ = bs; break; } default: { @@ -10323,13 +9904,13 @@ private Response( private int bitField0_; public static final int EXPERIMENT_ID_FIELD_NUMBER = 1; - private long experimentId_; + private volatile java.lang.Object experimentId_; /** *
        * Unique identifier for the experiment.
        * 
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -10339,10 +9920,41 @@ public boolean hasExperimentId() { * Unique identifier for the experiment. *
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } + } + /** + *
+       * Unique identifier for the experiment.
+       * 
+ * + * optional string experiment_id = 1; + */ + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } private byte memoizedIsInitialized = -1; @@ -10360,7 +9972,7 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, experimentId_); + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, experimentId_); } unknownFields.writeTo(output); } @@ -10372,8 +9984,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, experimentId_); + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, experimentId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -10393,8 +10004,8 @@ public boolean equals(final java.lang.Object obj) { boolean result = true; result = result && (hasExperimentId() == other.hasExperimentId()); if (hasExperimentId()) { - result = result && (getExperimentId() - == other.getExperimentId()); + result = result && getExperimentId() + .equals(other.getExperimentId()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -10409,8 +10020,7 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); if (hasExperimentId()) { hash = (37 * hash) + EXPERIMENT_ID_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getExperimentId()); + hash = (53 * hash) + getExperimentId().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; @@ -10545,7 +10155,7 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - experimentId_ = 0L; + experimentId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -10629,7 +10239,9 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.CreateExperiment.Response other) { if (other == org.mlflow.api.proto.Service.CreateExperiment.Response.getDefaultInstance()) return this; if (other.hasExperimentId()) { - setExperimentId(other.getExperimentId()); + bitField0_ |= 0x00000001; + experimentId_ = other.experimentId_; + onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -10661,13 +10273,13 @@ public Builder mergeFrom( } private int bitField0_; - private long experimentId_ ; + private java.lang.Object experimentId_ = ""; /** *
          * Unique identifier for the experiment.
          * 
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -10677,20 +10289,55 @@ public boolean hasExperimentId() { * Unique identifier for the experiment. *
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; + */ + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+         * Unique identifier for the experiment.
+         * 
+ * + * optional string experiment_id = 1; */ - public long getExperimentId() { - return experimentId_; + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } /** *
          * Unique identifier for the experiment.
          * 
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ - public Builder setExperimentId(long value) { - bitField0_ |= 0x00000001; + public Builder setExperimentId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; experimentId_ = value; onChanged(); return this; @@ -10700,11 +10347,28 @@ public Builder setExperimentId(long value) { * Unique identifier for the experiment. *
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ public Builder clearExperimentId() { bitField0_ = (bitField0_ & ~0x00000001); - experimentId_ = 0L; + experimentId_ = getDefaultInstance().getExperimentId(); + onChanged(); + return this; + } + /** + *
+         * Unique identifier for the experiment.
+         * 
+ * + * optional string experiment_id = 1; + */ + public Builder setExperimentIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + experimentId_ = value; onChanged(); return this; } @@ -10769,7 +10433,7 @@ public org.mlflow.api.proto.Service.CreateExperiment.Response getDefaultInstance * Experiment name. *
* - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -10779,7 +10443,7 @@ public boolean hasName() { * Experiment name. *
* - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ public java.lang.String getName() { java.lang.Object ref = name_; @@ -10800,7 +10464,7 @@ public java.lang.String getName() { * Experiment name. *
* - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getNameBytes() { @@ -11221,7 +10885,7 @@ public Builder mergeFrom( * Experiment name. *
* - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -11231,7 +10895,7 @@ public boolean hasName() { * Experiment name. * * - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ public java.lang.String getName() { java.lang.Object ref = name_; @@ -11252,7 +10916,7 @@ public java.lang.String getName() { * Experiment name. * * - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getNameBytes() { @@ -11272,7 +10936,7 @@ public java.lang.String getName() { * Experiment name. * * - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ public Builder setName( java.lang.String value) { @@ -11289,7 +10953,7 @@ public Builder setName( * Experiment name. * * - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); @@ -11302,7 +10966,7 @@ public Builder clearName() { * Experiment name. * * - * optional string name = 1 [(.validate_required) = true]; + * optional string name = 1 [(.mlflow.validate_required) = true]; */ public Builder setNameBytes( com.google.protobuf.ByteString value) { @@ -11586,7 +11250,7 @@ public interface ResponseOrBuilder extends /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -11595,7 +11259,7 @@ public interface ResponseOrBuilder extends getExperimentsList(); /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -11603,7 +11267,7 @@ public interface ResponseOrBuilder extends org.mlflow.api.proto.Service.Experiment getExperiments(int index); /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -11611,7 +11275,7 @@ public interface ResponseOrBuilder extends int getExperimentsCount(); /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -11620,7 +11284,7 @@ public interface ResponseOrBuilder extends getExperimentsOrBuilderList(); /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -11716,7 +11380,7 @@ private Response( private java.util.List experiments_; /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -11726,7 +11390,7 @@ public java.util.List getExperimentsLis } /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -11737,7 +11401,7 @@ public java.util.List getExperimentsLis } /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -11747,7 +11411,7 @@ public int getExperimentsCount() { } /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -11757,7 +11421,7 @@ public org.mlflow.api.proto.Service.Experiment getExperiments(int index) { } /** *
-       * All experiments
+       * All experiments.
        * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12124,7 +11788,7 @@ private void ensureExperimentsIsMutable() { /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12138,7 +11802,7 @@ public java.util.List getExperimentsLis } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12152,7 +11816,7 @@ public int getExperimentsCount() { } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12166,7 +11830,7 @@ public org.mlflow.api.proto.Service.Experiment getExperiments(int index) { } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12187,7 +11851,7 @@ public Builder setExperiments( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12205,7 +11869,7 @@ public Builder setExperiments( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12225,7 +11889,7 @@ public Builder addExperiments(org.mlflow.api.proto.Service.Experiment value) { } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12246,7 +11910,7 @@ public Builder addExperiments( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12264,7 +11928,7 @@ public Builder addExperiments( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12282,7 +11946,7 @@ public Builder addExperiments( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12301,7 +11965,7 @@ public Builder addAllExperiments( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12318,7 +11982,7 @@ public Builder clearExperiments() { } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12335,7 +11999,7 @@ public Builder removeExperiments(int index) { } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12346,7 +12010,7 @@ public org.mlflow.api.proto.Service.Experiment.Builder getExperimentsBuilder( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12360,7 +12024,7 @@ public org.mlflow.api.proto.Service.ExperimentOrBuilder getExperimentsOrBuilder( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12375,7 +12039,7 @@ public org.mlflow.api.proto.Service.ExperimentOrBuilder getExperimentsOrBuilder( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12386,7 +12050,7 @@ public org.mlflow.api.proto.Service.Experiment.Builder addExperimentsBuilder() { } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12398,7 +12062,7 @@ public org.mlflow.api.proto.Service.Experiment.Builder addExperimentsBuilder( } /** *
-         * All experiments
+         * All experiments.
          * 
* * repeated .mlflow.Experiment experiments = 1; @@ -12934,7 +12598,7 @@ public interface GetExperimentOrBuilder extends * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ boolean hasExperimentId(); /** @@ -12942,9 +12606,18 @@ public interface GetExperimentOrBuilder extends * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - long getExperimentId(); + java.lang.String getExperimentId(); + /** + *
+     * ID of the associated experiment.
+     * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + com.google.protobuf.ByteString + getExperimentIdBytes(); } /** * Protobuf type {@code mlflow.GetExperiment} @@ -12959,7 +12632,7 @@ private GetExperiment(com.google.protobuf.GeneratedMessageV3.Builder builder) super(builder); } private GetExperiment() { - experimentId_ = 0L; + experimentId_ = ""; } @java.lang.Override @@ -12986,9 +12659,10 @@ private GetExperiment( case 0: done = true; break; - case 8: { + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - experimentId_ = input.readInt64(); + experimentId_ = bs; break; } default: { @@ -13029,7 +12703,7 @@ public interface ResponseOrBuilder extends /** *
-       * Returns experiment details.
+       * Experiment details.
        * 
* * optional .mlflow.Experiment experiment = 1; @@ -13037,7 +12711,7 @@ public interface ResponseOrBuilder extends boolean hasExperiment(); /** *
-       * Returns experiment details.
+       * Experiment details.
        * 
* * optional .mlflow.Experiment experiment = 1; @@ -13045,7 +12719,7 @@ public interface ResponseOrBuilder extends org.mlflow.api.proto.Service.Experiment getExperiment(); /** *
-       * Returns experiment details.
+       * Experiment details.
        * 
* * optional .mlflow.Experiment experiment = 1; @@ -13198,7 +12872,7 @@ private Response( private org.mlflow.api.proto.Service.Experiment experiment_; /** *
-       * Returns experiment details.
+       * Experiment details.
        * 
* * optional .mlflow.Experiment experiment = 1; @@ -13208,7 +12882,7 @@ public boolean hasExperiment() { } /** *
-       * Returns experiment details.
+       * Experiment details.
        * 
* * optional .mlflow.Experiment experiment = 1; @@ -13218,7 +12892,7 @@ public org.mlflow.api.proto.Service.Experiment getExperiment() { } /** *
-       * Returns experiment details.
+       * Experiment details.
        * 
* * optional .mlflow.Experiment experiment = 1; @@ -13666,7 +13340,7 @@ public Builder mergeFrom( org.mlflow.api.proto.Service.Experiment, org.mlflow.api.proto.Service.Experiment.Builder, org.mlflow.api.proto.Service.ExperimentOrBuilder> experimentBuilder_; /** *
-         * Returns experiment details.
+         * Experiment details.
          * 
* * optional .mlflow.Experiment experiment = 1; @@ -13676,7 +13350,7 @@ public boolean hasExperiment() { } /** *
-         * Returns experiment details.
+         * Experiment details.
          * 
* * optional .mlflow.Experiment experiment = 1; @@ -13690,7 +13364,7 @@ public org.mlflow.api.proto.Service.Experiment getExperiment() { } /** *
-         * Returns experiment details.
+         * Experiment details.
          * 
* * optional .mlflow.Experiment experiment = 1; @@ -13710,7 +13384,7 @@ public Builder setExperiment(org.mlflow.api.proto.Service.Experiment value) { } /** *
-         * Returns experiment details.
+         * Experiment details.
          * 
* * optional .mlflow.Experiment experiment = 1; @@ -13728,7 +13402,7 @@ public Builder setExperiment( } /** *
-         * Returns experiment details.
+         * Experiment details.
          * 
* * optional .mlflow.Experiment experiment = 1; @@ -13752,7 +13426,7 @@ public Builder mergeExperiment(org.mlflow.api.proto.Service.Experiment value) { } /** *
-         * Returns experiment details.
+         * Experiment details.
          * 
* * optional .mlflow.Experiment experiment = 1; @@ -13769,7 +13443,7 @@ public Builder clearExperiment() { } /** *
-         * Returns experiment details.
+         * Experiment details.
          * 
* * optional .mlflow.Experiment experiment = 1; @@ -13781,7 +13455,7 @@ public org.mlflow.api.proto.Service.Experiment.Builder getExperimentBuilder() { } /** *
-         * Returns experiment details.
+         * Experiment details.
          * 
* * optional .mlflow.Experiment experiment = 1; @@ -13796,7 +13470,7 @@ public org.mlflow.api.proto.Service.ExperimentOrBuilder getExperimentOrBuilder() } /** *
-         * Returns experiment details.
+         * Experiment details.
          * 
* * optional .mlflow.Experiment experiment = 1; @@ -14181,13 +13855,13 @@ public org.mlflow.api.proto.Service.GetExperiment.Response getDefaultInstanceFor private int bitField0_; public static final int EXPERIMENT_ID_FIELD_NUMBER = 1; - private long experimentId_; + private volatile java.lang.Object experimentId_; /** *
      * ID of the associated experiment.
      * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -14197,10 +13871,41 @@ public boolean hasExperimentId() { * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } + } + /** + *
+     * ID of the associated experiment.
+     * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } private byte memoizedIsInitialized = -1; @@ -14218,7 +13923,7 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, experimentId_); + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, experimentId_); } unknownFields.writeTo(output); } @@ -14230,8 +13935,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, experimentId_); + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, experimentId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -14251,8 +13955,8 @@ public boolean equals(final java.lang.Object obj) { boolean result = true; result = result && (hasExperimentId() == other.hasExperimentId()); if (hasExperimentId()) { - result = result && (getExperimentId() - == other.getExperimentId()); + result = result && getExperimentId() + .equals(other.getExperimentId()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -14267,8 +13971,7 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); if (hasExperimentId()) { hash = (37 * hash) + EXPERIMENT_ID_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getExperimentId()); + hash = (53 * hash) + getExperimentId().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; @@ -14403,7 +14106,7 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - experimentId_ = 0L; + experimentId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -14487,7 +14190,9 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.GetExperiment other) { if (other == org.mlflow.api.proto.Service.GetExperiment.getDefaultInstance()) return this; if (other.hasExperimentId()) { - setExperimentId(other.getExperimentId()); + bitField0_ |= 0x00000001; + experimentId_ = other.experimentId_; + onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -14519,13 +14224,13 @@ public Builder mergeFrom( } private int bitField0_; - private long experimentId_ ; + private java.lang.Object experimentId_ = ""; /** *
        * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -14535,20 +14240,55 @@ public boolean hasExperimentId() { * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } } /** *
        * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public Builder setExperimentId(long value) { - bitField0_ |= 0x00000001; + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * ID of the associated experiment.
+       * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public Builder setExperimentId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; experimentId_ = value; onChanged(); return this; @@ -14558,11 +14298,28 @@ public Builder setExperimentId(long value) { * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public Builder clearExperimentId() { bitField0_ = (bitField0_ & ~0x00000001); - experimentId_ = 0L; + experimentId_ = getDefaultInstance().getExperimentId(); + onChanged(); + return this; + } + /** + *
+       * ID of the associated experiment.
+       * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public Builder setExperimentIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + experimentId_ = value; onChanged(); return this; } @@ -14628,7 +14385,7 @@ public interface DeleteExperimentOrBuilder extends * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ boolean hasExperimentId(); /** @@ -14636,9 +14393,18 @@ public interface DeleteExperimentOrBuilder extends * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + java.lang.String getExperimentId(); + /** + *
+     * ID of the associated experiment.
+     * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - long getExperimentId(); + com.google.protobuf.ByteString + getExperimentIdBytes(); } /** * Protobuf type {@code mlflow.DeleteExperiment} @@ -14653,7 +14419,7 @@ private DeleteExperiment(com.google.protobuf.GeneratedMessageV3.Builder build super(builder); } private DeleteExperiment() { - experimentId_ = 0L; + experimentId_ = ""; } @java.lang.Override @@ -14680,9 +14446,10 @@ private DeleteExperiment( case 0: done = true; break; - case 8: { + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - experimentId_ = input.readInt64(); + experimentId_ = bs; break; } default: { @@ -15131,13 +14898,13 @@ public org.mlflow.api.proto.Service.DeleteExperiment.Response getDefaultInstance private int bitField0_; public static final int EXPERIMENT_ID_FIELD_NUMBER = 1; - private long experimentId_; + private volatile java.lang.Object experimentId_; /** *
      * ID of the associated experiment.
      * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -15147,10 +14914,41 @@ public boolean hasExperimentId() { * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } + } + /** + *
+     * ID of the associated experiment.
+     * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public long getExperimentId() { - return experimentId_; + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } private byte memoizedIsInitialized = -1; @@ -15168,7 +14966,7 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, experimentId_); + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, experimentId_); } unknownFields.writeTo(output); } @@ -15180,8 +14978,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, experimentId_); + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, experimentId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -15201,8 +14998,8 @@ public boolean equals(final java.lang.Object obj) { boolean result = true; result = result && (hasExperimentId() == other.hasExperimentId()); if (hasExperimentId()) { - result = result && (getExperimentId() - == other.getExperimentId()); + result = result && getExperimentId() + .equals(other.getExperimentId()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -15217,8 +15014,7 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); if (hasExperimentId()) { hash = (37 * hash) + EXPERIMENT_ID_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getExperimentId()); + hash = (53 * hash) + getExperimentId().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; @@ -15353,7 +15149,7 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - experimentId_ = 0L; + experimentId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -15437,7 +15233,9 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.DeleteExperiment other) { if (other == org.mlflow.api.proto.Service.DeleteExperiment.getDefaultInstance()) return this; if (other.hasExperimentId()) { - setExperimentId(other.getExperimentId()); + bitField0_ |= 0x00000001; + experimentId_ = other.experimentId_; + onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -15469,13 +15267,13 @@ public Builder mergeFrom( } private int bitField0_; - private long experimentId_ ; + private java.lang.Object experimentId_ = ""; /** *
        * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -15485,20 +15283,55 @@ public boolean hasExperimentId() { * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } } /** *
        * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public Builder setExperimentId(long value) { - bitField0_ |= 0x00000001; + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * ID of the associated experiment.
+       * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public Builder setExperimentId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; experimentId_ = value; onChanged(); return this; @@ -15508,11 +15341,28 @@ public Builder setExperimentId(long value) { * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public Builder clearExperimentId() { bitField0_ = (bitField0_ & ~0x00000001); - experimentId_ = 0L; + experimentId_ = getDefaultInstance().getExperimentId(); + onChanged(); + return this; + } + /** + *
+       * ID of the associated experiment.
+       * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public Builder setExperimentIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + experimentId_ = value; onChanged(); return this; } @@ -15575,20 +15425,29 @@ public interface RestoreExperimentOrBuilder extends /** *
-     * Identifier to get an experiment
+     * ID of the associated experiment.
      * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ boolean hasExperimentId(); /** *
-     * Identifier to get an experiment
+     * ID of the associated experiment.
+     * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + java.lang.String getExperimentId(); + /** + *
+     * ID of the associated experiment.
      * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - long getExperimentId(); + com.google.protobuf.ByteString + getExperimentIdBytes(); } /** * Protobuf type {@code mlflow.RestoreExperiment} @@ -15603,7 +15462,7 @@ private RestoreExperiment(com.google.protobuf.GeneratedMessageV3.Builder buil super(builder); } private RestoreExperiment() { - experimentId_ = 0L; + experimentId_ = ""; } @java.lang.Override @@ -15630,9 +15489,10 @@ private RestoreExperiment( case 0: done = true; break; - case 8: { + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - experimentId_ = input.readInt64(); + experimentId_ = bs; break; } default: { @@ -16081,26 +15941,57 @@ public org.mlflow.api.proto.Service.RestoreExperiment.Response getDefaultInstanc private int bitField0_; public static final int EXPERIMENT_ID_FIELD_NUMBER = 1; - private long experimentId_; + private volatile java.lang.Object experimentId_; /** *
-     * Identifier to get an experiment
+     * ID of the associated experiment.
      * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-     * Identifier to get an experiment
+     * ID of the associated experiment.
      * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } + } + /** + *
+     * ID of the associated experiment.
+     * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } private byte memoizedIsInitialized = -1; @@ -16118,7 +16009,7 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, experimentId_); + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, experimentId_); } unknownFields.writeTo(output); } @@ -16130,8 +16021,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, experimentId_); + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, experimentId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -16151,8 +16041,8 @@ public boolean equals(final java.lang.Object obj) { boolean result = true; result = result && (hasExperimentId() == other.hasExperimentId()); if (hasExperimentId()) { - result = result && (getExperimentId() - == other.getExperimentId()); + result = result && getExperimentId() + .equals(other.getExperimentId()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -16167,8 +16057,7 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); if (hasExperimentId()) { hash = (37 * hash) + EXPERIMENT_ID_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getExperimentId()); + hash = (53 * hash) + getExperimentId().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; @@ -16303,7 +16192,7 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - experimentId_ = 0L; + experimentId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -16387,7 +16276,9 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.RestoreExperiment other) { if (other == org.mlflow.api.proto.Service.RestoreExperiment.getDefaultInstance()) return this; if (other.hasExperimentId()) { - setExperimentId(other.getExperimentId()); + bitField0_ |= 0x00000001; + experimentId_ = other.experimentId_; + onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -16419,50 +16310,102 @@ public Builder mergeFrom( } private int bitField0_; - private long experimentId_ ; + private java.lang.Object experimentId_ = ""; /** *
-       * Identifier to get an experiment
+       * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-       * Identifier to get an experiment
+       * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } } /** *
-       * Identifier to get an experiment
+       * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public Builder setExperimentId(long value) { - bitField0_ |= 0x00000001; + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * ID of the associated experiment.
+       * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public Builder setExperimentId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; experimentId_ = value; onChanged(); return this; } /** *
-       * Identifier to get an experiment
+       * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public Builder clearExperimentId() { bitField0_ = (bitField0_ & ~0x00000001); - experimentId_ = 0L; + experimentId_ = getDefaultInstance().getExperimentId(); + onChanged(); + return this; + } + /** + *
+       * ID of the associated experiment.
+       * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public Builder setExperimentIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + experimentId_ = value; onChanged(); return this; } @@ -16528,7 +16471,7 @@ public interface UpdateExperimentOrBuilder extends * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ boolean hasExperimentId(); /** @@ -16536,13 +16479,22 @@ public interface UpdateExperimentOrBuilder extends * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + java.lang.String getExperimentId(); + /** + *
+     * ID of the associated experiment.
+     * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - long getExperimentId(); + com.google.protobuf.ByteString + getExperimentIdBytes(); /** *
-     * If provided, the experiment's name will be changed to this. The new name must be unique.
+     * If provided, the experiment's name is changed to the new name. The new name must be unique.
      * 
* * optional string new_name = 2; @@ -16550,7 +16502,7 @@ public interface UpdateExperimentOrBuilder extends boolean hasNewName(); /** *
-     * If provided, the experiment's name will be changed to this. The new name must be unique.
+     * If provided, the experiment's name is changed to the new name. The new name must be unique.
      * 
* * optional string new_name = 2; @@ -16558,7 +16510,7 @@ public interface UpdateExperimentOrBuilder extends java.lang.String getNewName(); /** *
-     * If provided, the experiment's name will be changed to this. The new name must be unique.
+     * If provided, the experiment's name is changed to the new name. The new name must be unique.
      * 
* * optional string new_name = 2; @@ -16579,7 +16531,7 @@ private UpdateExperiment(com.google.protobuf.GeneratedMessageV3.Builder build super(builder); } private UpdateExperiment() { - experimentId_ = 0L; + experimentId_ = ""; newName_ = ""; } @@ -16607,9 +16559,10 @@ private UpdateExperiment( case 0: done = true; break; - case 8: { + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - experimentId_ = input.readInt64(); + experimentId_ = bs; break; } case 18: { @@ -17064,13 +17017,13 @@ public org.mlflow.api.proto.Service.UpdateExperiment.Response getDefaultInstance private int bitField0_; public static final int EXPERIMENT_ID_FIELD_NUMBER = 1; - private long experimentId_; + private volatile java.lang.Object experimentId_; /** *
      * ID of the associated experiment.
      * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -17080,17 +17033,48 @@ public boolean hasExperimentId() { * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } + } + /** + *
+     * ID of the associated experiment.
+     * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } public static final int NEW_NAME_FIELD_NUMBER = 2; private volatile java.lang.Object newName_; /** *
-     * If provided, the experiment's name will be changed to this. The new name must be unique.
+     * If provided, the experiment's name is changed to the new name. The new name must be unique.
      * 
* * optional string new_name = 2; @@ -17100,7 +17084,7 @@ public boolean hasNewName() { } /** *
-     * If provided, the experiment's name will be changed to this. The new name must be unique.
+     * If provided, the experiment's name is changed to the new name. The new name must be unique.
      * 
* * optional string new_name = 2; @@ -17121,7 +17105,7 @@ public java.lang.String getNewName() { } /** *
-     * If provided, the experiment's name will be changed to this. The new name must be unique.
+     * If provided, the experiment's name is changed to the new name. The new name must be unique.
      * 
* * optional string new_name = 2; @@ -17155,7 +17139,7 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, experimentId_); + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, experimentId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, newName_); @@ -17170,8 +17154,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, experimentId_); + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, experimentId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, newName_); @@ -17194,8 +17177,8 @@ public boolean equals(final java.lang.Object obj) { boolean result = true; result = result && (hasExperimentId() == other.hasExperimentId()); if (hasExperimentId()) { - result = result && (getExperimentId() - == other.getExperimentId()); + result = result && getExperimentId() + .equals(other.getExperimentId()); } result = result && (hasNewName() == other.hasNewName()); if (hasNewName()) { @@ -17215,8 +17198,7 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); if (hasExperimentId()) { hash = (37 * hash) + EXPERIMENT_ID_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getExperimentId()); + hash = (53 * hash) + getExperimentId().hashCode(); } if (hasNewName()) { hash = (37 * hash) + NEW_NAME_FIELD_NUMBER; @@ -17355,7 +17337,7 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - experimentId_ = 0L; + experimentId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); newName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); @@ -17445,7 +17427,9 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.UpdateExperiment other) { if (other == org.mlflow.api.proto.Service.UpdateExperiment.getDefaultInstance()) return this; if (other.hasExperimentId()) { - setExperimentId(other.getExperimentId()); + bitField0_ |= 0x00000001; + experimentId_ = other.experimentId_; + onChanged(); } if (other.hasNewName()) { bitField0_ |= 0x00000002; @@ -17482,13 +17466,13 @@ public Builder mergeFrom( } private int bitField0_; - private long experimentId_ ; + private java.lang.Object experimentId_ = ""; /** *
        * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -17498,20 +17482,55 @@ public boolean hasExperimentId() { * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public long getExperimentId() { - return experimentId_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + experimentId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } } /** *
        * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ - public Builder setExperimentId(long value) { - bitField0_ |= 0x00000001; + public com.google.protobuf.ByteString + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + experimentId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * ID of the associated experiment.
+       * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public Builder setExperimentId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; experimentId_ = value; onChanged(); return this; @@ -17521,11 +17540,28 @@ public Builder setExperimentId(long value) { * ID of the associated experiment. * * - * optional int64 experiment_id = 1 [(.validate_required) = true]; + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; */ public Builder clearExperimentId() { bitField0_ = (bitField0_ & ~0x00000001); - experimentId_ = 0L; + experimentId_ = getDefaultInstance().getExperimentId(); + onChanged(); + return this; + } + /** + *
+       * ID of the associated experiment.
+       * 
+ * + * optional string experiment_id = 1 [(.mlflow.validate_required) = true]; + */ + public Builder setExperimentIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + experimentId_ = value; onChanged(); return this; } @@ -17533,7 +17569,7 @@ public Builder clearExperimentId() { private java.lang.Object newName_ = ""; /** *
-       * If provided, the experiment's name will be changed to this. The new name must be unique.
+       * If provided, the experiment's name is changed to the new name. The new name must be unique.
        * 
* * optional string new_name = 2; @@ -17543,7 +17579,7 @@ public boolean hasNewName() { } /** *
-       * If provided, the experiment's name will be changed to this. The new name must be unique.
+       * If provided, the experiment's name is changed to the new name. The new name must be unique.
        * 
* * optional string new_name = 2; @@ -17564,7 +17600,7 @@ public java.lang.String getNewName() { } /** *
-       * If provided, the experiment's name will be changed to this. The new name must be unique.
+       * If provided, the experiment's name is changed to the new name. The new name must be unique.
        * 
* * optional string new_name = 2; @@ -17584,7 +17620,7 @@ public java.lang.String getNewName() { } /** *
-       * If provided, the experiment's name will be changed to this. The new name must be unique.
+       * If provided, the experiment's name is changed to the new name. The new name must be unique.
        * 
* * optional string new_name = 2; @@ -17601,7 +17637,7 @@ public Builder setNewName( } /** *
-       * If provided, the experiment's name will be changed to this. The new name must be unique.
+       * If provided, the experiment's name is changed to the new name. The new name must be unique.
        * 
* * optional string new_name = 2; @@ -17614,7 +17650,7 @@ public Builder clearNewName() { } /** *
-       * If provided, the experiment's name will be changed to this. The new name must be unique.
+       * If provided, the experiment's name is changed to the new name. The new name must be unique.
        * 
* * optional string new_name = 2; @@ -17691,7 +17727,7 @@ public interface CreateRunOrBuilder extends * ID of the associated experiment. * * - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ boolean hasExperimentId(); /** @@ -17699,13 +17735,24 @@ public interface CreateRunOrBuilder extends * ID of the associated experiment. * * - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; + */ + java.lang.String getExperimentId(); + /** + *
+     * ID of the associated experiment.
+     * 
+ * + * optional string experiment_id = 1; */ - long getExperimentId(); + com.google.protobuf.ByteString + getExperimentIdBytes(); /** *
      * ID of the user executing the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* * optional string user_id = 2; @@ -17714,6 +17761,8 @@ public interface CreateRunOrBuilder extends /** *
      * ID of the user executing the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* * optional string user_id = 2; @@ -17722,6 +17771,8 @@ public interface CreateRunOrBuilder extends /** *
      * ID of the user executing the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* * optional string user_id = 2; @@ -17731,105 +17782,7 @@ public interface CreateRunOrBuilder extends /** *
-     * Human readable name for the run.
-     * 
- * - * optional string run_name = 3; - */ - boolean hasRunName(); - /** - *
-     * Human readable name for the run.
-     * 
- * - * optional string run_name = 3; - */ - java.lang.String getRunName(); - /** - *
-     * Human readable name for the run.
-     * 
- * - * optional string run_name = 3; - */ - com.google.protobuf.ByteString - getRunNameBytes(); - - /** - *
-     * Originating source for the run.
-     * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - boolean hasSourceType(); - /** - *
-     * Originating source for the run.
-     * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - org.mlflow.api.proto.Service.SourceType getSourceType(); - - /** - *
-     * String descriptor for the run's source. For example, name or description of a notebook, or the
-     * URL or path to a project.
-     * 
- * - * optional string source_name = 5; - */ - boolean hasSourceName(); - /** - *
-     * String descriptor for the run's source. For example, name or description of a notebook, or the
-     * URL or path to a project.
-     * 
- * - * optional string source_name = 5; - */ - java.lang.String getSourceName(); - /** - *
-     * String descriptor for the run's source. For example, name or description of a notebook, or the
-     * URL or path to a project.
-     * 
- * - * optional string source_name = 5; - */ - com.google.protobuf.ByteString - getSourceNameBytes(); - - /** - *
-     * Name of the project entry point associated with the current run, if any.
-     * 
- * - * optional string entry_point_name = 6; - */ - boolean hasEntryPointName(); - /** - *
-     * Name of the project entry point associated with the current run, if any.
-     * 
- * - * optional string entry_point_name = 6; - */ - java.lang.String getEntryPointName(); - /** - *
-     * Name of the project entry point associated with the current run, if any.
-     * 
- * - * optional string entry_point_name = 6; - */ - com.google.protobuf.ByteString - getEntryPointNameBytes(); - - /** - *
-     * Unix timestamp of when the run started in milliseconds.
+     * Unix timestamp in milliseconds of when the run started.
      * 
* * optional int64 start_time = 7; @@ -17837,39 +17790,13 @@ public interface CreateRunOrBuilder extends boolean hasStartTime(); /** *
-     * Unix timestamp of when the run started in milliseconds.
+     * Unix timestamp in milliseconds of when the run started.
      * 
* * optional int64 start_time = 7; */ long getStartTime(); - /** - *
-     * Git commit hash of the source code used to create run.
-     * 
- * - * optional string source_version = 8; - */ - boolean hasSourceVersion(); - /** - *
-     * Git commit hash of the source code used to create run.
-     * 
- * - * optional string source_version = 8; - */ - java.lang.String getSourceVersion(); - /** - *
-     * Git commit hash of the source code used to create run.
-     * 
- * - * optional string source_version = 8; - */ - com.google.protobuf.ByteString - getSourceVersionBytes(); - /** *
      * Additional metadata for run.
@@ -17913,32 +17840,6 @@ public interface CreateRunOrBuilder extends
      */
     org.mlflow.api.proto.Service.RunTagOrBuilder getTagsOrBuilder(
         int index);
-
-    /**
-     * 
-     * ID of the parent run which started this run.
-     * 
- * - * optional string parent_run_id = 10; - */ - boolean hasParentRunId(); - /** - *
-     * ID of the parent run which started this run.
-     * 
- * - * optional string parent_run_id = 10; - */ - java.lang.String getParentRunId(); - /** - *
-     * ID of the parent run which started this run.
-     * 
- * - * optional string parent_run_id = 10; - */ - com.google.protobuf.ByteString - getParentRunIdBytes(); } /** * Protobuf type {@code mlflow.CreateRun} @@ -17953,16 +17854,10 @@ private CreateRun(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private CreateRun() { - experimentId_ = 0L; + experimentId_ = ""; userId_ = ""; - runName_ = ""; - sourceType_ = 1; - sourceName_ = ""; - entryPointName_ = ""; startTime_ = 0L; - sourceVersion_ = ""; tags_ = java.util.Collections.emptyList(); - parentRunId_ = ""; } @java.lang.Override @@ -17989,9 +17884,10 @@ private CreateRun( case 0: done = true; break; - case 8: { + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - experimentId_ = input.readInt64(); + experimentId_ = bs; break; } case 18: { @@ -18000,62 +17896,20 @@ private CreateRun( userId_ = bs; break; } - case 26: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000004; - runName_ = bs; - break; - } - case 32: { - int rawValue = input.readEnum(); - @SuppressWarnings("deprecation") - org.mlflow.api.proto.Service.SourceType value = org.mlflow.api.proto.Service.SourceType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(4, rawValue); - } else { - bitField0_ |= 0x00000008; - sourceType_ = rawValue; - } - break; - } - case 42: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000010; - sourceName_ = bs; - break; - } - case 50: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000020; - entryPointName_ = bs; - break; - } case 56: { - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000004; startTime_ = input.readInt64(); break; } - case 66: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000080; - sourceVersion_ = bs; - break; - } case 74: { - if (!((mutable_bitField0_ & 0x00000100) == 0x00000100)) { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { tags_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000100; + mutable_bitField0_ |= 0x00000008; } tags_.add( input.readMessage(org.mlflow.api.proto.Service.RunTag.PARSER, extensionRegistry)); break; } - case 82: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000100; - parentRunId_ = bs; - break; - } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -18071,7 +17925,7 @@ private CreateRun( throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000100) == 0x00000100)) { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { tags_ = java.util.Collections.unmodifiableList(tags_); } this.unknownFields = unknownFields.build(); @@ -18770,13 +18624,13 @@ public org.mlflow.api.proto.Service.CreateRun.Response getDefaultInstanceForType private int bitField0_; public static final int EXPERIMENT_ID_FIELD_NUMBER = 1; - private long experimentId_; + private volatile java.lang.Object experimentId_; /** *
      * ID of the associated experiment.
      * 
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -18786,168 +18640,10 @@ public boolean hasExperimentId() { * ID of the associated experiment. *
* - * optional int64 experiment_id = 1; - */ - public long getExperimentId() { - return experimentId_; - } - - public static final int USER_ID_FIELD_NUMBER = 2; - private volatile java.lang.Object userId_; - /** - *
-     * ID of the user executing the run.
-     * 
- * - * optional string user_id = 2; - */ - public boolean hasUserId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-     * ID of the user executing the run.
-     * 
- * - * optional string user_id = 2; - */ - public java.lang.String getUserId() { - java.lang.Object ref = userId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - userId_ = s; - } - return s; - } - } - /** - *
-     * ID of the user executing the run.
-     * 
- * - * optional string user_id = 2; - */ - public com.google.protobuf.ByteString - getUserIdBytes() { - java.lang.Object ref = userId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int RUN_NAME_FIELD_NUMBER = 3; - private volatile java.lang.Object runName_; - /** - *
-     * Human readable name for the run.
-     * 
- * - * optional string run_name = 3; - */ - public boolean hasRunName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - *
-     * Human readable name for the run.
-     * 
- * - * optional string run_name = 3; - */ - public java.lang.String getRunName() { - java.lang.Object ref = runName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - runName_ = s; - } - return s; - } - } - /** - *
-     * Human readable name for the run.
-     * 
- * - * optional string run_name = 3; - */ - public com.google.protobuf.ByteString - getRunNameBytes() { - java.lang.Object ref = runName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - runName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int SOURCE_TYPE_FIELD_NUMBER = 4; - private int sourceType_; - /** - *
-     * Originating source for the run.
-     * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public boolean hasSourceType() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - *
-     * Originating source for the run.
-     * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public org.mlflow.api.proto.Service.SourceType getSourceType() { - @SuppressWarnings("deprecation") - org.mlflow.api.proto.Service.SourceType result = org.mlflow.api.proto.Service.SourceType.valueOf(sourceType_); - return result == null ? org.mlflow.api.proto.Service.SourceType.NOTEBOOK : result; - } - - public static final int SOURCE_NAME_FIELD_NUMBER = 5; - private volatile java.lang.Object sourceName_; - /** - *
-     * String descriptor for the run's source. For example, name or description of a notebook, or the
-     * URL or path to a project.
-     * 
- * - * optional string source_name = 5; - */ - public boolean hasSourceName() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - *
-     * String descriptor for the run's source. For example, name or description of a notebook, or the
-     * URL or path to a project.
-     * 
- * - * optional string source_name = 5; + * optional string experiment_id = 1; */ - public java.lang.String getSourceName() { - java.lang.Object ref = sourceName_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -18955,54 +18651,57 @@ public java.lang.String getSourceName() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - sourceName_ = s; + experimentId_ = s; } return s; } } /** *
-     * String descriptor for the run's source. For example, name or description of a notebook, or the
-     * URL or path to a project.
+     * ID of the associated experiment.
      * 
* - * optional string source_name = 5; + * optional string experiment_id = 1; */ public com.google.protobuf.ByteString - getSourceNameBytes() { - java.lang.Object ref = sourceName_; + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - sourceName_ = b; + experimentId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int ENTRY_POINT_NAME_FIELD_NUMBER = 6; - private volatile java.lang.Object entryPointName_; + public static final int USER_ID_FIELD_NUMBER = 2; + private volatile java.lang.Object userId_; /** *
-     * Name of the project entry point associated with the current run, if any.
+     * ID of the user executing the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* - * optional string entry_point_name = 6; + * optional string user_id = 2; */ - public boolean hasEntryPointName() { - return ((bitField0_ & 0x00000020) == 0x00000020); + public boolean hasUserId() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * Name of the project entry point associated with the current run, if any.
+     * ID of the user executing the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* - * optional string entry_point_name = 6; + * optional string user_id = 2; */ - public java.lang.String getEntryPointName() { - java.lang.Object ref = entryPointName_; + public java.lang.String getUserId() { + java.lang.Object ref = userId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -19010,26 +18709,28 @@ public java.lang.String getEntryPointName() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - entryPointName_ = s; + userId_ = s; } return s; } } /** *
-     * Name of the project entry point associated with the current run, if any.
+     * ID of the user executing the run.
+     * This field is deprecated as of MLflow 1.0, and will be removed in a future
+     * MLflow release. Use 'mlflow.user' tag instead.
      * 
* - * optional string entry_point_name = 6; + * optional string user_id = 2; */ public com.google.protobuf.ByteString - getEntryPointNameBytes() { - java.lang.Object ref = entryPointName_; + getUserIdBytes() { + java.lang.Object ref = userId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - entryPointName_ = b; + userId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -19040,17 +18741,17 @@ public java.lang.String getEntryPointName() { private long startTime_; /** *
-     * Unix timestamp of when the run started in milliseconds.
+     * Unix timestamp in milliseconds of when the run started.
      * 
* * optional int64 start_time = 7; */ public boolean hasStartTime() { - return ((bitField0_ & 0x00000040) == 0x00000040); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
-     * Unix timestamp of when the run started in milliseconds.
+     * Unix timestamp in milliseconds of when the run started.
      * 
* * optional int64 start_time = 7; @@ -19059,60 +18760,6 @@ public long getStartTime() { return startTime_; } - public static final int SOURCE_VERSION_FIELD_NUMBER = 8; - private volatile java.lang.Object sourceVersion_; - /** - *
-     * Git commit hash of the source code used to create run.
-     * 
- * - * optional string source_version = 8; - */ - public boolean hasSourceVersion() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - *
-     * Git commit hash of the source code used to create run.
-     * 
- * - * optional string source_version = 8; - */ - public java.lang.String getSourceVersion() { - java.lang.Object ref = sourceVersion_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - sourceVersion_ = s; - } - return s; - } - } - /** - *
-     * Git commit hash of the source code used to create run.
-     * 
- * - * optional string source_version = 8; - */ - public com.google.protobuf.ByteString - getSourceVersionBytes() { - java.lang.Object ref = sourceVersion_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - sourceVersion_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - public static final int TAGS_FIELD_NUMBER = 9; private java.util.List tags_; /** @@ -19168,60 +18815,6 @@ public org.mlflow.api.proto.Service.RunTagOrBuilder getTagsOrBuilder( return tags_.get(index); } - public static final int PARENT_RUN_ID_FIELD_NUMBER = 10; - private volatile java.lang.Object parentRunId_; - /** - *
-     * ID of the parent run which started this run.
-     * 
- * - * optional string parent_run_id = 10; - */ - public boolean hasParentRunId() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - *
-     * ID of the parent run which started this run.
-     * 
- * - * optional string parent_run_id = 10; - */ - public java.lang.String getParentRunId() { - java.lang.Object ref = parentRunId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - parentRunId_ = s; - } - return s; - } - } - /** - *
-     * ID of the parent run which started this run.
-     * 
- * - * optional string parent_run_id = 10; - */ - public com.google.protobuf.ByteString - getParentRunIdBytes() { - java.lang.Object ref = parentRunId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - parentRunId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -19237,35 +18830,17 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, experimentId_); + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, experimentId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, userId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 3, runName_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeEnum(4, sourceType_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 5, sourceName_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 6, entryPointName_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeInt64(7, startTime_); } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 8, sourceVersion_); - } for (int i = 0; i < tags_.size(); i++) { output.writeMessage(9, tags_.get(i)); } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 10, parentRunId_); - } unknownFields.writeTo(output); } @@ -19276,39 +18851,19 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, experimentId_); + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, experimentId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, userId_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, runName_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, sourceType_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, sourceName_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, entryPointName_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(7, startTime_); } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(8, sourceVersion_); - } for (int i = 0; i < tags_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(9, tags_.get(i)); } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(10, parentRunId_); - } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -19327,50 +18882,21 @@ public boolean equals(final java.lang.Object obj) { boolean result = true; result = result && (hasExperimentId() == other.hasExperimentId()); if (hasExperimentId()) { - result = result && (getExperimentId() - == other.getExperimentId()); + result = result && getExperimentId() + .equals(other.getExperimentId()); } result = result && (hasUserId() == other.hasUserId()); if (hasUserId()) { result = result && getUserId() .equals(other.getUserId()); } - result = result && (hasRunName() == other.hasRunName()); - if (hasRunName()) { - result = result && getRunName() - .equals(other.getRunName()); - } - result = result && (hasSourceType() == other.hasSourceType()); - if (hasSourceType()) { - result = result && sourceType_ == other.sourceType_; - } - result = result && (hasSourceName() == other.hasSourceName()); - if (hasSourceName()) { - result = result && getSourceName() - .equals(other.getSourceName()); - } - result = result && (hasEntryPointName() == other.hasEntryPointName()); - if (hasEntryPointName()) { - result = result && getEntryPointName() - .equals(other.getEntryPointName()); - } result = result && (hasStartTime() == other.hasStartTime()); if (hasStartTime()) { result = result && (getStartTime() == other.getStartTime()); } - result = result && (hasSourceVersion() == other.hasSourceVersion()); - if (hasSourceVersion()) { - result = result && getSourceVersion() - .equals(other.getSourceVersion()); - } result = result && getTagsList() .equals(other.getTagsList()); - result = result && (hasParentRunId() == other.hasParentRunId()); - if (hasParentRunId()) { - result = result && getParentRunId() - .equals(other.getParentRunId()); - } result = result && unknownFields.equals(other.unknownFields); return result; } @@ -19384,46 +18910,21 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); if (hasExperimentId()) { hash = (37 * hash) + EXPERIMENT_ID_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getExperimentId()); + hash = (53 * hash) + getExperimentId().hashCode(); } if (hasUserId()) { hash = (37 * hash) + USER_ID_FIELD_NUMBER; hash = (53 * hash) + getUserId().hashCode(); } - if (hasRunName()) { - hash = (37 * hash) + RUN_NAME_FIELD_NUMBER; - hash = (53 * hash) + getRunName().hashCode(); - } - if (hasSourceType()) { - hash = (37 * hash) + SOURCE_TYPE_FIELD_NUMBER; - hash = (53 * hash) + sourceType_; - } - if (hasSourceName()) { - hash = (37 * hash) + SOURCE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getSourceName().hashCode(); - } - if (hasEntryPointName()) { - hash = (37 * hash) + ENTRY_POINT_NAME_FIELD_NUMBER; - hash = (53 * hash) + getEntryPointName().hashCode(); - } if (hasStartTime()) { hash = (37 * hash) + START_TIME_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getStartTime()); } - if (hasSourceVersion()) { - hash = (37 * hash) + SOURCE_VERSION_FIELD_NUMBER; - hash = (53 * hash) + getSourceVersion().hashCode(); - } if (getTagsCount() > 0) { hash = (37 * hash) + TAGS_FIELD_NUMBER; hash = (53 * hash) + getTagsList().hashCode(); } - if (hasParentRunId()) { - hash = (37 * hash) + PARENT_RUN_ID_FIELD_NUMBER; - hash = (53 * hash) + getParentRunId().hashCode(); - } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -19558,30 +19059,18 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - experimentId_ = 0L; + experimentId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); userId_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - runName_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - sourceType_ = 1; - bitField0_ = (bitField0_ & ~0x00000008); - sourceName_ = ""; - bitField0_ = (bitField0_ & ~0x00000010); - entryPointName_ = ""; - bitField0_ = (bitField0_ & ~0x00000020); startTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - sourceVersion_ = ""; - bitField0_ = (bitField0_ & ~0x00000080); + bitField0_ = (bitField0_ & ~0x00000004); if (tagsBuilder_ == null) { tags_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000008); } else { tagsBuilder_.clear(); } - parentRunId_ = ""; - bitField0_ = (bitField0_ & ~0x00000200); return this; } @@ -19621,40 +19110,16 @@ public org.mlflow.api.proto.Service.CreateRun buildPartial() { if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - result.runName_ = runName_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.sourceType_ = sourceType_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.sourceName_ = sourceName_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.entryPointName_ = entryPointName_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } result.startTime_ = startTime_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000080; - } - result.sourceVersion_ = sourceVersion_; if (tagsBuilder_ == null) { - if (((bitField0_ & 0x00000100) == 0x00000100)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { tags_ = java.util.Collections.unmodifiableList(tags_); - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000008); } result.tags_ = tags_; } else { result.tags_ = tagsBuilder_.build(); } - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000100; - } - result.parentRunId_ = parentRunId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -19705,44 +19170,23 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.CreateRun other) { if (other == org.mlflow.api.proto.Service.CreateRun.getDefaultInstance()) return this; if (other.hasExperimentId()) { - setExperimentId(other.getExperimentId()); + bitField0_ |= 0x00000001; + experimentId_ = other.experimentId_; + onChanged(); } if (other.hasUserId()) { bitField0_ |= 0x00000002; userId_ = other.userId_; onChanged(); } - if (other.hasRunName()) { - bitField0_ |= 0x00000004; - runName_ = other.runName_; - onChanged(); - } - if (other.hasSourceType()) { - setSourceType(other.getSourceType()); - } - if (other.hasSourceName()) { - bitField0_ |= 0x00000010; - sourceName_ = other.sourceName_; - onChanged(); - } - if (other.hasEntryPointName()) { - bitField0_ |= 0x00000020; - entryPointName_ = other.entryPointName_; - onChanged(); - } if (other.hasStartTime()) { setStartTime(other.getStartTime()); } - if (other.hasSourceVersion()) { - bitField0_ |= 0x00000080; - sourceVersion_ = other.sourceVersion_; - onChanged(); - } if (tagsBuilder_ == null) { if (!other.tags_.isEmpty()) { if (tags_.isEmpty()) { tags_ = other.tags_; - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000008); } else { ensureTagsIsMutable(); tags_.addAll(other.tags_); @@ -19755,7 +19199,7 @@ public Builder mergeFrom(org.mlflow.api.proto.Service.CreateRun other) { tagsBuilder_.dispose(); tagsBuilder_ = null; tags_ = other.tags_; - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000008); tagsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getTagsFieldBuilder() : null; @@ -19764,11 +19208,6 @@ public Builder mergeFrom(org.mlflow.api.proto.Service.CreateRun other) { } } } - if (other.hasParentRunId()) { - bitField0_ |= 0x00000200; - parentRunId_ = other.parentRunId_; - onChanged(); - } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -19799,13 +19238,13 @@ public Builder mergeFrom( } private int bitField0_; - private long experimentId_ ; + private java.lang.Object experimentId_ = ""; /** *
        * ID of the associated experiment.
        * 
* - * optional int64 experiment_id = 1; + * optional string experiment_id = 1; */ public boolean hasExperimentId() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -19815,164 +19254,16 @@ public boolean hasExperimentId() { * ID of the associated experiment. * * - * optional int64 experiment_id = 1; - */ - public long getExperimentId() { - return experimentId_; - } - /** - *
-       * ID of the associated experiment.
-       * 
- * - * optional int64 experiment_id = 1; - */ - public Builder setExperimentId(long value) { - bitField0_ |= 0x00000001; - experimentId_ = value; - onChanged(); - return this; - } - /** - *
-       * ID of the associated experiment.
-       * 
- * - * optional int64 experiment_id = 1; - */ - public Builder clearExperimentId() { - bitField0_ = (bitField0_ & ~0x00000001); - experimentId_ = 0L; - onChanged(); - return this; - } - - private java.lang.Object userId_ = ""; - /** - *
-       * ID of the user executing the run.
-       * 
- * - * optional string user_id = 2; - */ - public boolean hasUserId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-       * ID of the user executing the run.
-       * 
- * - * optional string user_id = 2; - */ - public java.lang.String getUserId() { - java.lang.Object ref = userId_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - userId_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * ID of the user executing the run.
-       * 
- * - * optional string user_id = 2; - */ - public com.google.protobuf.ByteString - getUserIdBytes() { - java.lang.Object ref = userId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - userId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * ID of the user executing the run.
-       * 
- * - * optional string user_id = 2; - */ - public Builder setUserId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - userId_ = value; - onChanged(); - return this; - } - /** - *
-       * ID of the user executing the run.
-       * 
- * - * optional string user_id = 2; - */ - public Builder clearUserId() { - bitField0_ = (bitField0_ & ~0x00000002); - userId_ = getDefaultInstance().getUserId(); - onChanged(); - return this; - } - /** - *
-       * ID of the user executing the run.
-       * 
- * - * optional string user_id = 2; - */ - public Builder setUserIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - userId_ = value; - onChanged(); - return this; - } - - private java.lang.Object runName_ = ""; - /** - *
-       * Human readable name for the run.
-       * 
- * - * optional string run_name = 3; - */ - public boolean hasRunName() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - *
-       * Human readable name for the run.
-       * 
- * - * optional string run_name = 3; + * optional string experiment_id = 1; */ - public java.lang.String getRunName() { - java.lang.Object ref = runName_; + public java.lang.String getExperimentId() { + java.lang.Object ref = experimentId_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - runName_ = s; + experimentId_ = s; } return s; } else { @@ -19981,175 +19272,19 @@ public java.lang.String getRunName() { } /** *
-       * Human readable name for the run.
-       * 
- * - * optional string run_name = 3; - */ - public com.google.protobuf.ByteString - getRunNameBytes() { - java.lang.Object ref = runName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - runName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * Human readable name for the run.
-       * 
- * - * optional string run_name = 3; - */ - public Builder setRunName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - runName_ = value; - onChanged(); - return this; - } - /** - *
-       * Human readable name for the run.
-       * 
- * - * optional string run_name = 3; - */ - public Builder clearRunName() { - bitField0_ = (bitField0_ & ~0x00000004); - runName_ = getDefaultInstance().getRunName(); - onChanged(); - return this; - } - /** - *
-       * Human readable name for the run.
-       * 
- * - * optional string run_name = 3; - */ - public Builder setRunNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - runName_ = value; - onChanged(); - return this; - } - - private int sourceType_ = 1; - /** - *
-       * Originating source for the run.
-       * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public boolean hasSourceType() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - *
-       * Originating source for the run.
-       * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public org.mlflow.api.proto.Service.SourceType getSourceType() { - @SuppressWarnings("deprecation") - org.mlflow.api.proto.Service.SourceType result = org.mlflow.api.proto.Service.SourceType.valueOf(sourceType_); - return result == null ? org.mlflow.api.proto.Service.SourceType.NOTEBOOK : result; - } - /** - *
-       * Originating source for the run.
-       * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public Builder setSourceType(org.mlflow.api.proto.Service.SourceType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - sourceType_ = value.getNumber(); - onChanged(); - return this; - } - /** - *
-       * Originating source for the run.
-       * 
- * - * optional .mlflow.SourceType source_type = 4; - */ - public Builder clearSourceType() { - bitField0_ = (bitField0_ & ~0x00000008); - sourceType_ = 1; - onChanged(); - return this; - } - - private java.lang.Object sourceName_ = ""; - /** - *
-       * String descriptor for the run's source. For example, name or description of a notebook, or the
-       * URL or path to a project.
-       * 
- * - * optional string source_name = 5; - */ - public boolean hasSourceName() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - *
-       * String descriptor for the run's source. For example, name or description of a notebook, or the
-       * URL or path to a project.
-       * 
- * - * optional string source_name = 5; - */ - public java.lang.String getSourceName() { - java.lang.Object ref = sourceName_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - sourceName_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * String descriptor for the run's source. For example, name or description of a notebook, or the
-       * URL or path to a project.
+       * ID of the associated experiment.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 1; */ public com.google.protobuf.ByteString - getSourceNameBytes() { - java.lang.Object ref = sourceName_; + getExperimentIdBytes() { + java.lang.Object ref = experimentId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - sourceName_ = b; + experimentId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -20157,81 +19292,82 @@ public java.lang.String getSourceName() { } /** *
-       * String descriptor for the run's source. For example, name or description of a notebook, or the
-       * URL or path to a project.
+       * ID of the associated experiment.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 1; */ - public Builder setSourceName( + public Builder setExperimentId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000010; - sourceName_ = value; + bitField0_ |= 0x00000001; + experimentId_ = value; onChanged(); return this; } /** *
-       * String descriptor for the run's source. For example, name or description of a notebook, or the
-       * URL or path to a project.
+       * ID of the associated experiment.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 1; */ - public Builder clearSourceName() { - bitField0_ = (bitField0_ & ~0x00000010); - sourceName_ = getDefaultInstance().getSourceName(); + public Builder clearExperimentId() { + bitField0_ = (bitField0_ & ~0x00000001); + experimentId_ = getDefaultInstance().getExperimentId(); onChanged(); return this; } /** *
-       * String descriptor for the run's source. For example, name or description of a notebook, or the
-       * URL or path to a project.
+       * ID of the associated experiment.
        * 
* - * optional string source_name = 5; + * optional string experiment_id = 1; */ - public Builder setSourceNameBytes( + public Builder setExperimentIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000010; - sourceName_ = value; + bitField0_ |= 0x00000001; + experimentId_ = value; onChanged(); return this; } - private java.lang.Object entryPointName_ = ""; + private java.lang.Object userId_ = ""; /** *
-       * Name of the project entry point associated with the current run, if any.
+       * ID of the user executing the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* - * optional string entry_point_name = 6; + * optional string user_id = 2; */ - public boolean hasEntryPointName() { - return ((bitField0_ & 0x00000020) == 0x00000020); + public boolean hasUserId() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * Name of the project entry point associated with the current run, if any.
+       * ID of the user executing the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* - * optional string entry_point_name = 6; + * optional string user_id = 2; */ - public java.lang.String getEntryPointName() { - java.lang.Object ref = entryPointName_; + public java.lang.String getUserId() { + java.lang.Object ref = userId_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - entryPointName_ = s; + userId_ = s; } return s; } else { @@ -20240,19 +19376,21 @@ public java.lang.String getEntryPointName() { } /** *
-       * Name of the project entry point associated with the current run, if any.
+       * ID of the user executing the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* - * optional string entry_point_name = 6; + * optional string user_id = 2; */ public com.google.protobuf.ByteString - getEntryPointNameBytes() { - java.lang.Object ref = entryPointName_; + getUserIdBytes() { + java.lang.Object ref = userId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - entryPointName_ = b; + userId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -20260,48 +19398,54 @@ public java.lang.String getEntryPointName() { } /** *
-       * Name of the project entry point associated with the current run, if any.
+       * ID of the user executing the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* - * optional string entry_point_name = 6; + * optional string user_id = 2; */ - public Builder setEntryPointName( + public Builder setUserId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000020; - entryPointName_ = value; + bitField0_ |= 0x00000002; + userId_ = value; onChanged(); return this; } /** *
-       * Name of the project entry point associated with the current run, if any.
+       * ID of the user executing the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* - * optional string entry_point_name = 6; + * optional string user_id = 2; */ - public Builder clearEntryPointName() { - bitField0_ = (bitField0_ & ~0x00000020); - entryPointName_ = getDefaultInstance().getEntryPointName(); + public Builder clearUserId() { + bitField0_ = (bitField0_ & ~0x00000002); + userId_ = getDefaultInstance().getUserId(); onChanged(); return this; } /** *
-       * Name of the project entry point associated with the current run, if any.
+       * ID of the user executing the run.
+       * This field is deprecated as of MLflow 1.0, and will be removed in a future
+       * MLflow release. Use 'mlflow.user' tag instead.
        * 
* - * optional string entry_point_name = 6; + * optional string user_id = 2; */ - public Builder setEntryPointNameBytes( + public Builder setUserIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000020; - entryPointName_ = value; + bitField0_ |= 0x00000002; + userId_ = value; onChanged(); return this; } @@ -20309,17 +19453,17 @@ public Builder setEntryPointNameBytes( private long startTime_ ; /** *
-       * Unix timestamp of when the run started in milliseconds.
+       * Unix timestamp in milliseconds of when the run started.
        * 
* * optional int64 start_time = 7; */ public boolean hasStartTime() { - return ((bitField0_ & 0x00000040) == 0x00000040); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
-       * Unix timestamp of when the run started in milliseconds.
+       * Unix timestamp in milliseconds of when the run started.
        * 
* * optional int64 start_time = 7; @@ -20329,137 +19473,37 @@ public long getStartTime() { } /** *
-       * Unix timestamp of when the run started in milliseconds.
+       * Unix timestamp in milliseconds of when the run started.
        * 
* * optional int64 start_time = 7; */ public Builder setStartTime(long value) { - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000004; startTime_ = value; onChanged(); return this; } /** *
-       * Unix timestamp of when the run started in milliseconds.
+       * Unix timestamp in milliseconds of when the run started.
        * 
* * optional int64 start_time = 7; */ public Builder clearStartTime() { - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000004); startTime_ = 0L; onChanged(); return this; } - private java.lang.Object sourceVersion_ = ""; - /** - *
-       * Git commit hash of the source code used to create run.
-       * 
- * - * optional string source_version = 8; - */ - public boolean hasSourceVersion() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - *
-       * Git commit hash of the source code used to create run.
-       * 
- * - * optional string source_version = 8; - */ - public java.lang.String getSourceVersion() { - java.lang.Object ref = sourceVersion_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - sourceVersion_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * Git commit hash of the source code used to create run.
-       * 
- * - * optional string source_version = 8; - */ - public com.google.protobuf.ByteString - getSourceVersionBytes() { - java.lang.Object ref = sourceVersion_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - sourceVersion_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * Git commit hash of the source code used to create run.
-       * 
- * - * optional string source_version = 8; - */ - public Builder setSourceVersion( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000080; - sourceVersion_ = value; - onChanged(); - return this; - } - /** - *
-       * Git commit hash of the source code used to create run.
-       * 
- * - * optional string source_version = 8; - */ - public Builder clearSourceVersion() { - bitField0_ = (bitField0_ & ~0x00000080); - sourceVersion_ = getDefaultInstance().getSourceVersion(); - onChanged(); - return this; - } - /** - *
-       * Git commit hash of the source code used to create run.
-       * 
- * - * optional string source_version = 8; - */ - public Builder setSourceVersionBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000080; - sourceVersion_ = value; - onChanged(); - return this; - } - private java.util.List tags_ = java.util.Collections.emptyList(); private void ensureTagsIsMutable() { - if (!((bitField0_ & 0x00000100) == 0x00000100)) { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { tags_ = new java.util.ArrayList(tags_); - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000008; } } @@ -20653,7 +19697,7 @@ public Builder addAllTags( public Builder clearTags() { if (tagsBuilder_ == null) { tags_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000008); onChanged(); } else { tagsBuilder_.clear(); @@ -20758,113 +19802,13 @@ public org.mlflow.api.proto.Service.RunTag.Builder addTagsBuilder( tagsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< org.mlflow.api.proto.Service.RunTag, org.mlflow.api.proto.Service.RunTag.Builder, org.mlflow.api.proto.Service.RunTagOrBuilder>( tags_, - ((bitField0_ & 0x00000100) == 0x00000100), + ((bitField0_ & 0x00000008) == 0x00000008), getParentForChildren(), isClean()); tags_ = null; } return tagsBuilder_; } - - private java.lang.Object parentRunId_ = ""; - /** - *
-       * ID of the parent run which started this run.
-       * 
- * - * optional string parent_run_id = 10; - */ - public boolean hasParentRunId() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - *
-       * ID of the parent run which started this run.
-       * 
- * - * optional string parent_run_id = 10; - */ - public java.lang.String getParentRunId() { - java.lang.Object ref = parentRunId_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - parentRunId_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * ID of the parent run which started this run.
-       * 
- * - * optional string parent_run_id = 10; - */ - public com.google.protobuf.ByteString - getParentRunIdBytes() { - java.lang.Object ref = parentRunId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - parentRunId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * ID of the parent run which started this run.
-       * 
- * - * optional string parent_run_id = 10; - */ - public Builder setParentRunId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000200; - parentRunId_ = value; - onChanged(); - return this; - } - /** - *
-       * ID of the parent run which started this run.
-       * 
- * - * optional string parent_run_id = 10; - */ - public Builder clearParentRunId() { - bitField0_ = (bitField0_ & ~0x00000200); - parentRunId_ = getDefaultInstance().getParentRunId(); - onChanged(); - return this; - } - /** - *
-       * ID of the parent run which started this run.
-       * 
- * - * optional string parent_run_id = 10; - */ - public Builder setParentRunIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000200; - parentRunId_ = value; - onChanged(); - return this; - } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -20924,26 +19868,55 @@ public interface UpdateRunOrBuilder extends /** *
-     * ID of the run to update.
+     * ID of the run to update. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 4; + */ + boolean hasRunId(); + /** + *
+     * ID of the run to update. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + java.lang.String getRunId(); + /** + *
+     * ID of the run to update. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + com.google.protobuf.ByteString + getRunIdBytes(); + + /** + *
+     * [Deprecated, use run_id instead] ID of the run to update.. This field will
+     * be removed in a future MLflow version.
+     * 
+ * + * optional string run_uuid = 1; */ boolean hasRunUuid(); /** *
-     * ID of the run to update.
+     * [Deprecated, use run_id instead] ID of the run to update.. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ java.lang.String getRunUuid(); /** *
-     * ID of the run to update.
+     * [Deprecated, use run_id instead] ID of the run to update.. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ com.google.protobuf.ByteString getRunUuidBytes(); @@ -20967,7 +19940,7 @@ public interface UpdateRunOrBuilder extends /** *
-     *Unix timestamp of when the run ended in milliseconds.
+     *Unix timestamp in milliseconds of when the run ended.
      * 
* * optional int64 end_time = 3; @@ -20975,7 +19948,7 @@ public interface UpdateRunOrBuilder extends boolean hasEndTime(); /** *
-     *Unix timestamp of when the run ended in milliseconds.
+     *Unix timestamp in milliseconds of when the run ended.
      * 
* * optional int64 end_time = 3; @@ -20995,6 +19968,7 @@ private UpdateRun(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private UpdateRun() { + runId_ = ""; runUuid_ = ""; status_ = 1; endTime_ = 0L; @@ -21026,7 +20000,7 @@ private UpdateRun( break; case 10: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = bs; break; } @@ -21037,16 +20011,22 @@ private UpdateRun( if (value == null) { unknownFields.mergeVarintField(2, rawValue); } else { - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; status_ = rawValue; } break; } case 24: { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; endTime_ = input.readInt64(); break; } + case 34: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + runId_ = bs; + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -21757,24 +20737,80 @@ public org.mlflow.api.proto.Service.UpdateRun.Response getDefaultInstanceForType } private int bitField0_; + public static final int RUN_ID_FIELD_NUMBER = 4; + private volatile java.lang.Object runId_; + /** + *
+     * ID of the run to update. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * ID of the run to update. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } + } + /** + *
+     * ID of the run to update. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + public static final int RUN_UUID_FIELD_NUMBER = 1; private volatile java.lang.Object runUuid_; /** *
-     * ID of the run to update.
+     * [Deprecated, use run_id instead] ID of the run to update.. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * ID of the run to update.
+     * [Deprecated, use run_id instead] ID of the run to update.. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public java.lang.String getRunUuid() { java.lang.Object ref = runUuid_; @@ -21792,10 +20828,11 @@ public java.lang.String getRunUuid() { } /** *
-     * ID of the run to update.
+     * [Deprecated, use run_id instead] ID of the run to update.. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString getRunUuidBytes() { @@ -21821,7 +20858,7 @@ public java.lang.String getRunUuid() { * optional .mlflow.RunStatus status = 2; */ public boolean hasStatus() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
@@ -21840,17 +20877,17 @@ public org.mlflow.api.proto.Service.RunStatus getStatus() {
     private long endTime_;
     /**
      * 
-     *Unix timestamp of when the run ended in milliseconds.
+     *Unix timestamp in milliseconds of when the run ended.
      * 
* * optional int64 end_time = 3; */ public boolean hasEndTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
-     *Unix timestamp of when the run ended in milliseconds.
+     *Unix timestamp in milliseconds of when the run ended.
      * 
* * optional int64 end_time = 3; @@ -21873,15 +20910,18 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeEnum(2, status_); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeInt64(3, endTime_); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, runId_); + } unknownFields.writeTo(output); } @@ -21891,17 +20931,20 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(2, status_); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(3, endTime_); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, runId_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -21918,6 +20961,11 @@ public boolean equals(final java.lang.Object obj) { org.mlflow.api.proto.Service.UpdateRun other = (org.mlflow.api.proto.Service.UpdateRun) obj; boolean result = true; + result = result && (hasRunId() == other.hasRunId()); + if (hasRunId()) { + result = result && getRunId() + .equals(other.getRunId()); + } result = result && (hasRunUuid() == other.hasRunUuid()); if (hasRunUuid()) { result = result && getRunUuid() @@ -21943,6 +20991,10 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRunId()) { + hash = (37 * hash) + RUN_ID_FIELD_NUMBER; + hash = (53 * hash) + getRunId().hashCode(); + } if (hasRunUuid()) { hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; hash = (53 * hash) + getRunUuid().hashCode(); @@ -22089,12 +21141,14 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - runUuid_ = ""; + runId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - status_ = 1; + runUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - endTime_ = 0L; + status_ = 1; bitField0_ = (bitField0_ & ~0x00000004); + endTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -22126,14 +21180,18 @@ public org.mlflow.api.proto.Service.UpdateRun buildPartial() { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.runUuid_ = runUuid_; + result.runId_ = runId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.status_ = status_; + result.runUuid_ = runUuid_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } + result.status_ = status_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } result.endTime_ = endTime_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -22184,8 +21242,13 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.UpdateRun other) { if (other == org.mlflow.api.proto.Service.UpdateRun.getDefaultInstance()) return this; - if (other.hasRunUuid()) { + if (other.hasRunId()) { bitField0_ |= 0x00000001; + runId_ = other.runId_; + onChanged(); + } + if (other.hasRunUuid()) { + bitField0_ |= 0x00000002; runUuid_ = other.runUuid_; onChanged(); } @@ -22225,23 +21288,125 @@ public Builder mergeFrom( } private int bitField0_; + private java.lang.Object runId_ = ""; + /** + *
+       * ID of the run to update. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+       * ID of the run to update. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+       * ID of the run to update. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * ID of the run to update. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public Builder setRunId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + /** + *
+       * ID of the run to update. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public Builder clearRunId() { + bitField0_ = (bitField0_ & ~0x00000001); + runId_ = getDefaultInstance().getRunId(); + onChanged(); + return this; + } + /** + *
+       * ID of the run to update. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public Builder setRunIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + private java.lang.Object runUuid_ = ""; /** *
-       * ID of the run to update.
+       * [Deprecated, use run_id instead] ID of the run to update.. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * ID of the run to update.
+       * [Deprecated, use run_id instead] ID of the run to update.. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public java.lang.String getRunUuid() { java.lang.Object ref = runUuid_; @@ -22259,10 +21424,11 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run to update.
+       * [Deprecated, use run_id instead] ID of the run to update.. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString getRunUuidBytes() { @@ -22279,47 +21445,50 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run to update.
+       * [Deprecated, use run_id instead] ID of the run to update.. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder setRunUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; } /** *
-       * ID of the run to update.
+       * [Deprecated, use run_id instead] ID of the run to update.. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder clearRunUuid() { - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); runUuid_ = getDefaultInstance().getRunUuid(); onChanged(); return this; } /** *
-       * ID of the run to update.
+       * [Deprecated, use run_id instead] ID of the run to update.. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder setRunUuidBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; @@ -22334,7 +21503,7 @@ public Builder setRunUuidBytes( * optional .mlflow.RunStatus status = 2; */ public boolean hasStatus() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
@@ -22359,7 +21528,7 @@ public Builder setStatus(org.mlflow.api.proto.Service.RunStatus value) {
         if (value == null) {
           throw new NullPointerException();
         }
-        bitField0_ |= 0x00000002;
+        bitField0_ |= 0x00000004;
         status_ = value.getNumber();
         onChanged();
         return this;
@@ -22372,7 +21541,7 @@ public Builder setStatus(org.mlflow.api.proto.Service.RunStatus value) {
        * optional .mlflow.RunStatus status = 2;
        */
       public Builder clearStatus() {
-        bitField0_ = (bitField0_ & ~0x00000002);
+        bitField0_ = (bitField0_ & ~0x00000004);
         status_ = 1;
         onChanged();
         return this;
@@ -22381,17 +21550,17 @@ public Builder clearStatus() {
       private long endTime_ ;
       /**
        * 
-       *Unix timestamp of when the run ended in milliseconds.
+       *Unix timestamp in milliseconds of when the run ended.
        * 
* * optional int64 end_time = 3; */ public boolean hasEndTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
-       *Unix timestamp of when the run ended in milliseconds.
+       *Unix timestamp in milliseconds of when the run ended.
        * 
* * optional int64 end_time = 3; @@ -22401,26 +21570,26 @@ public long getEndTime() { } /** *
-       *Unix timestamp of when the run ended in milliseconds.
+       *Unix timestamp in milliseconds of when the run ended.
        * 
* * optional int64 end_time = 3; */ public Builder setEndTime(long value) { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; endTime_ = value; onChanged(); return this; } /** *
-       *Unix timestamp of when the run ended in milliseconds.
+       *Unix timestamp in milliseconds of when the run ended.
        * 
* * optional int64 end_time = 3; */ public Builder clearEndTime() { - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000008); endTime_ = 0L; onChanged(); return this; @@ -22483,15 +21652,27 @@ public interface DeleteRunOrBuilder extends com.google.protobuf.MessageOrBuilder { /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to delete.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ boolean hasRunId(); /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to delete.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ java.lang.String getRunId(); /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to delete.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ com.google.protobuf.ByteString getRunIdBytes(); @@ -22990,13 +22171,21 @@ public org.mlflow.api.proto.Service.DeleteRun.Response getDefaultInstanceForType public static final int RUN_ID_FIELD_NUMBER = 1; private volatile java.lang.Object runId_; /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to delete.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to delete.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public java.lang.String getRunId() { java.lang.Object ref = runId_; @@ -23013,7 +22202,11 @@ public java.lang.String getRunId() { } } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to delete.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getRunIdBytes() { @@ -23347,13 +22540,21 @@ public Builder mergeFrom( private java.lang.Object runId_ = ""; /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to delete.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to delete.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public java.lang.String getRunId() { java.lang.Object ref = runId_; @@ -23370,7 +22571,11 @@ public java.lang.String getRunId() { } } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to delete.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getRunIdBytes() { @@ -23386,7 +22591,11 @@ public java.lang.String getRunId() { } } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to delete.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public Builder setRunId( java.lang.String value) { @@ -23399,7 +22608,11 @@ public Builder setRunId( return this; } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to delete.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public Builder clearRunId() { bitField0_ = (bitField0_ & ~0x00000001); @@ -23408,7 +22621,11 @@ public Builder clearRunId() { return this; } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to delete.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public Builder setRunIdBytes( com.google.protobuf.ByteString value) { @@ -23478,15 +22695,27 @@ public interface RestoreRunOrBuilder extends com.google.protobuf.MessageOrBuilder { /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to restore.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ boolean hasRunId(); /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to restore.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ java.lang.String getRunId(); /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to restore.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ com.google.protobuf.ByteString getRunIdBytes(); @@ -23985,13 +23214,21 @@ public org.mlflow.api.proto.Service.RestoreRun.Response getDefaultInstanceForTyp public static final int RUN_ID_FIELD_NUMBER = 1; private volatile java.lang.Object runId_; /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to restore.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to restore.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public java.lang.String getRunId() { java.lang.Object ref = runId_; @@ -24008,7 +23245,11 @@ public java.lang.String getRunId() { } } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+     * ID of the run to restore.
+     * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getRunIdBytes() { @@ -24342,13 +23583,21 @@ public Builder mergeFrom( private java.lang.Object runId_ = ""; /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to restore.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to restore.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public java.lang.String getRunId() { java.lang.Object ref = runId_; @@ -24365,7 +23614,11 @@ public java.lang.String getRunId() { } } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to restore.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getRunIdBytes() { @@ -24381,7 +23634,11 @@ public java.lang.String getRunId() { } } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to restore.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public Builder setRunId( java.lang.String value) { @@ -24394,7 +23651,11 @@ public Builder setRunId( return this; } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to restore.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public Builder clearRunId() { bitField0_ = (bitField0_ & ~0x00000001); @@ -24403,7 +23664,11 @@ public Builder clearRunId() { return this; } /** - * optional string run_id = 1 [(.validate_required) = true]; + *
+       * ID of the run to restore.
+       * 
+ * + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public Builder setRunIdBytes( com.google.protobuf.ByteString value) { @@ -24474,26 +23739,55 @@ public interface LogMetricOrBuilder extends /** *
-     * ID of the run under which to log the metric.
+     * ID of the run under which to log the metric. Must be provided.
+     * 
+ * + * optional string run_id = 6; + */ + boolean hasRunId(); + /** + *
+     * ID of the run under which to log the metric. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 6; + */ + java.lang.String getRunId(); + /** + *
+     * ID of the run under which to log the metric. Must be provided.
+     * 
+ * + * optional string run_id = 6; + */ + com.google.protobuf.ByteString + getRunIdBytes(); + + /** + *
+     * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+     * be removed in a future MLflow version.
+     * 
+ * + * optional string run_uuid = 1; */ boolean hasRunUuid(); /** *
-     * ID of the run under which to log the metric.
+     * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ java.lang.String getRunUuid(); /** *
-     * ID of the run under which to log the metric.
+     * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ com.google.protobuf.ByteString getRunUuidBytes(); @@ -24503,7 +23797,7 @@ public interface LogMetricOrBuilder extends * Name of the metric. *
* - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ boolean hasKey(); /** @@ -24511,7 +23805,7 @@ public interface LogMetricOrBuilder extends * Name of the metric. *
* - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ java.lang.String getKey(); /** @@ -24519,7 +23813,7 @@ public interface LogMetricOrBuilder extends * Name of the metric. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ com.google.protobuf.ByteString getKeyBytes(); @@ -24529,7 +23823,7 @@ public interface LogMetricOrBuilder extends * Double value of the metric being logged. * * - * optional double value = 3 [(.validate_required) = true]; + * optional double value = 3 [(.mlflow.validate_required) = true]; */ boolean hasValue(); /** @@ -24537,7 +23831,7 @@ public interface LogMetricOrBuilder extends * Double value of the metric being logged. * * - * optional double value = 3 [(.validate_required) = true]; + * optional double value = 3 [(.mlflow.validate_required) = true]; */ double getValue(); @@ -24546,7 +23840,7 @@ public interface LogMetricOrBuilder extends * Unix timestamp in milliseconds at the time metric was logged. * * - * optional int64 timestamp = 4 [(.validate_required) = true]; + * optional int64 timestamp = 4 [(.mlflow.validate_required) = true]; */ boolean hasTimestamp(); /** @@ -24554,9 +23848,26 @@ public interface LogMetricOrBuilder extends * Unix timestamp in milliseconds at the time metric was logged. * * - * optional int64 timestamp = 4 [(.validate_required) = true]; + * optional int64 timestamp = 4 [(.mlflow.validate_required) = true]; */ long getTimestamp(); + + /** + *
+     * Step at which to log the metric
+     * 
+ * + * optional int64 step = 5 [default = 0]; + */ + boolean hasStep(); + /** + *
+     * Step at which to log the metric
+     * 
+ * + * optional int64 step = 5 [default = 0]; + */ + long getStep(); } /** * Protobuf type {@code mlflow.LogMetric} @@ -24571,10 +23882,12 @@ private LogMetric(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private LogMetric() { + runId_ = ""; runUuid_ = ""; key_ = ""; value_ = 0D; timestamp_ = 0L; + step_ = 0L; } @java.lang.Override @@ -24603,26 +23916,37 @@ private LogMetric( break; case 10: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = bs; break; } case 18: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = bs; break; } case 25: { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = input.readDouble(); break; } case 32: { - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000010; timestamp_ = input.readInt64(); break; } + case 40: { + bitField0_ |= 0x00000020; + step_ = input.readInt64(); + break; + } + case 50: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + runId_ = bs; + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -25068,24 +24392,80 @@ public org.mlflow.api.proto.Service.LogMetric.Response getDefaultInstanceForType } private int bitField0_; + public static final int RUN_ID_FIELD_NUMBER = 6; + private volatile java.lang.Object runId_; + /** + *
+     * ID of the run under which to log the metric. Must be provided.
+     * 
+ * + * optional string run_id = 6; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * ID of the run under which to log the metric. Must be provided.
+     * 
+ * + * optional string run_id = 6; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } + } + /** + *
+     * ID of the run under which to log the metric. Must be provided.
+     * 
+ * + * optional string run_id = 6; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + public static final int RUN_UUID_FIELD_NUMBER = 1; private volatile java.lang.Object runUuid_; /** *
-     * ID of the run under which to log the metric.
+     * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * ID of the run under which to log the metric.
+     * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public java.lang.String getRunUuid() { java.lang.Object ref = runUuid_; @@ -25103,10 +24483,11 @@ public java.lang.String getRunUuid() { } /** *
-     * ID of the run under which to log the metric.
+     * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString getRunUuidBytes() { @@ -25129,17 +24510,17 @@ public java.lang.String getRunUuid() { * Name of the metric. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public boolean hasKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
      * Name of the metric.
      * 
* - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public java.lang.String getKey() { java.lang.Object ref = key_; @@ -25160,7 +24541,7 @@ public java.lang.String getKey() { * Name of the metric. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getKeyBytes() { @@ -25183,17 +24564,17 @@ public java.lang.String getKey() { * Double value of the metric being logged. * * - * optional double value = 3 [(.validate_required) = true]; + * optional double value = 3 [(.mlflow.validate_required) = true]; */ public boolean hasValue() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
      * Double value of the metric being logged.
      * 
* - * optional double value = 3 [(.validate_required) = true]; + * optional double value = 3 [(.mlflow.validate_required) = true]; */ public double getValue() { return value_; @@ -25206,22 +24587,45 @@ public double getValue() { * Unix timestamp in milliseconds at the time metric was logged. * * - * optional int64 timestamp = 4 [(.validate_required) = true]; + * optional int64 timestamp = 4 [(.mlflow.validate_required) = true]; */ public boolean hasTimestamp() { - return ((bitField0_ & 0x00000008) == 0x00000008); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** *
      * Unix timestamp in milliseconds at the time metric was logged.
      * 
* - * optional int64 timestamp = 4 [(.validate_required) = true]; + * optional int64 timestamp = 4 [(.mlflow.validate_required) = true]; */ public long getTimestamp() { return timestamp_; } + public static final int STEP_FIELD_NUMBER = 5; + private long step_; + /** + *
+     * Step at which to log the metric
+     * 
+ * + * optional int64 step = 5 [default = 0]; + */ + public boolean hasStep() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + *
+     * Step at which to log the metric
+     * 
+ * + * optional int64 step = 5 [default = 0]; + */ + public long getStep() { + return step_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -25236,18 +24640,24 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, key_); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeDouble(3, value_); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeInt64(4, timestamp_); } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeInt64(5, step_); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, runId_); + } unknownFields.writeTo(output); } @@ -25257,20 +24667,27 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, key_); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeDoubleSize(3, value_); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(4, timestamp_); } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(5, step_); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, runId_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -25287,6 +24704,11 @@ public boolean equals(final java.lang.Object obj) { org.mlflow.api.proto.Service.LogMetric other = (org.mlflow.api.proto.Service.LogMetric) obj; boolean result = true; + result = result && (hasRunId() == other.hasRunId()); + if (hasRunId()) { + result = result && getRunId() + .equals(other.getRunId()); + } result = result && (hasRunUuid() == other.hasRunUuid()); if (hasRunUuid()) { result = result && getRunUuid() @@ -25309,6 +24731,11 @@ public boolean equals(final java.lang.Object obj) { result = result && (getTimestamp() == other.getTimestamp()); } + result = result && (hasStep() == other.hasStep()); + if (hasStep()) { + result = result && (getStep() + == other.getStep()); + } result = result && unknownFields.equals(other.unknownFields); return result; } @@ -25320,6 +24747,10 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRunId()) { + hash = (37 * hash) + RUN_ID_FIELD_NUMBER; + hash = (53 * hash) + getRunId().hashCode(); + } if (hasRunUuid()) { hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; hash = (53 * hash) + getRunUuid().hashCode(); @@ -25338,6 +24769,11 @@ public int hashCode() { hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getTimestamp()); } + if (hasStep()) { + hash = (37 * hash) + STEP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getStep()); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -25471,14 +24907,18 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - runUuid_ = ""; + runId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - key_ = ""; + runUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - value_ = 0D; + key_ = ""; bitField0_ = (bitField0_ & ~0x00000004); - timestamp_ = 0L; + value_ = 0D; bitField0_ = (bitField0_ & ~0x00000008); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + step_ = 0L; + bitField0_ = (bitField0_ & ~0x00000020); return this; } @@ -25510,19 +24950,27 @@ public org.mlflow.api.proto.Service.LogMetric buildPartial() { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.runUuid_ = runUuid_; + result.runId_ = runId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.key_ = key_; + result.runUuid_ = runUuid_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - result.value_ = value_; + result.key_ = key_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } + result.value_ = value_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } result.timestamp_ = timestamp_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.step_ = step_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -25572,13 +25020,18 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.LogMetric other) { if (other == org.mlflow.api.proto.Service.LogMetric.getDefaultInstance()) return this; - if (other.hasRunUuid()) { + if (other.hasRunId()) { bitField0_ |= 0x00000001; + runId_ = other.runId_; + onChanged(); + } + if (other.hasRunUuid()) { + bitField0_ |= 0x00000002; runUuid_ = other.runUuid_; onChanged(); } if (other.hasKey()) { - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = other.key_; onChanged(); } @@ -25588,6 +25041,9 @@ public Builder mergeFrom(org.mlflow.api.proto.Service.LogMetric other) { if (other.hasTimestamp()) { setTimestamp(other.getTimestamp()); } + if (other.hasStep()) { + setStep(other.getStep()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -25618,23 +25074,125 @@ public Builder mergeFrom( } private int bitField0_; + private java.lang.Object runId_ = ""; + /** + *
+       * ID of the run under which to log the metric. Must be provided.
+       * 
+ * + * optional string run_id = 6; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+       * ID of the run under which to log the metric. Must be provided.
+       * 
+ * + * optional string run_id = 6; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+       * ID of the run under which to log the metric. Must be provided.
+       * 
+ * + * optional string run_id = 6; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * ID of the run under which to log the metric. Must be provided.
+       * 
+ * + * optional string run_id = 6; + */ + public Builder setRunId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + /** + *
+       * ID of the run under which to log the metric. Must be provided.
+       * 
+ * + * optional string run_id = 6; + */ + public Builder clearRunId() { + bitField0_ = (bitField0_ & ~0x00000001); + runId_ = getDefaultInstance().getRunId(); + onChanged(); + return this; + } + /** + *
+       * ID of the run under which to log the metric. Must be provided.
+       * 
+ * + * optional string run_id = 6; + */ + public Builder setRunIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + private java.lang.Object runUuid_ = ""; /** *
-       * ID of the run under which to log the metric.
+       * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * ID of the run under which to log the metric.
+       * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public java.lang.String getRunUuid() { java.lang.Object ref = runUuid_; @@ -25652,10 +25210,11 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run under which to log the metric.
+       * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString getRunUuidBytes() { @@ -25672,47 +25231,50 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run under which to log the metric.
+       * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder setRunUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; } /** *
-       * ID of the run under which to log the metric.
+       * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder clearRunUuid() { - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); runUuid_ = getDefaultInstance().getRunUuid(); onChanged(); return this; } /** *
-       * ID of the run under which to log the metric.
+       * [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder setRunUuidBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; @@ -25724,17 +25286,17 @@ public Builder setRunUuidBytes( * Name of the metric. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public boolean hasKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
        * Name of the metric.
        * 
* - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public java.lang.String getKey() { java.lang.Object ref = key_; @@ -25755,7 +25317,7 @@ public java.lang.String getKey() { * Name of the metric. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getKeyBytes() { @@ -25775,14 +25337,14 @@ public java.lang.String getKey() { * Name of the metric. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public Builder setKey( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = value; onChanged(); return this; @@ -25792,10 +25354,10 @@ public Builder setKey( * Name of the metric. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public Builder clearKey() { - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); key_ = getDefaultInstance().getKey(); onChanged(); return this; @@ -25805,14 +25367,14 @@ public Builder clearKey() { * Name of the metric. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public Builder setKeyBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = value; onChanged(); return this; @@ -25824,17 +25386,17 @@ public Builder setKeyBytes( * Double value of the metric being logged. * * - * optional double value = 3 [(.validate_required) = true]; + * optional double value = 3 [(.mlflow.validate_required) = true]; */ public boolean hasValue() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
        * Double value of the metric being logged.
        * 
* - * optional double value = 3 [(.validate_required) = true]; + * optional double value = 3 [(.mlflow.validate_required) = true]; */ public double getValue() { return value_; @@ -25844,10 +25406,10 @@ public double getValue() { * Double value of the metric being logged. * * - * optional double value = 3 [(.validate_required) = true]; + * optional double value = 3 [(.mlflow.validate_required) = true]; */ public Builder setValue(double value) { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = value; onChanged(); return this; @@ -25857,10 +25419,10 @@ public Builder setValue(double value) { * Double value of the metric being logged. * * - * optional double value = 3 [(.validate_required) = true]; + * optional double value = 3 [(.mlflow.validate_required) = true]; */ public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000008); value_ = 0D; onChanged(); return this; @@ -25872,17 +25434,17 @@ public Builder clearValue() { * Unix timestamp in milliseconds at the time metric was logged. * * - * optional int64 timestamp = 4 [(.validate_required) = true]; + * optional int64 timestamp = 4 [(.mlflow.validate_required) = true]; */ public boolean hasTimestamp() { - return ((bitField0_ & 0x00000008) == 0x00000008); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** *
        * Unix timestamp in milliseconds at the time metric was logged.
        * 
* - * optional int64 timestamp = 4 [(.validate_required) = true]; + * optional int64 timestamp = 4 [(.mlflow.validate_required) = true]; */ public long getTimestamp() { return timestamp_; @@ -25892,10 +25454,10 @@ public long getTimestamp() { * Unix timestamp in milliseconds at the time metric was logged. * * - * optional int64 timestamp = 4 [(.validate_required) = true]; + * optional int64 timestamp = 4 [(.mlflow.validate_required) = true]; */ public Builder setTimestamp(long value) { - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000010; timestamp_ = value; onChanged(); return this; @@ -25905,14 +25467,62 @@ public Builder setTimestamp(long value) { * Unix timestamp in milliseconds at the time metric was logged. * * - * optional int64 timestamp = 4 [(.validate_required) = true]; + * optional int64 timestamp = 4 [(.mlflow.validate_required) = true]; */ public Builder clearTimestamp() { - bitField0_ = (bitField0_ & ~0x00000008); + bitField0_ = (bitField0_ & ~0x00000010); timestamp_ = 0L; onChanged(); return this; } + + private long step_ ; + /** + *
+       * Step at which to log the metric
+       * 
+ * + * optional int64 step = 5 [default = 0]; + */ + public boolean hasStep() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + *
+       * Step at which to log the metric
+       * 
+ * + * optional int64 step = 5 [default = 0]; + */ + public long getStep() { + return step_; + } + /** + *
+       * Step at which to log the metric
+       * 
+ * + * optional int64 step = 5 [default = 0]; + */ + public Builder setStep(long value) { + bitField0_ |= 0x00000020; + step_ = value; + onChanged(); + return this; + } + /** + *
+       * Step at which to log the metric
+       * 
+ * + * optional int64 step = 5 [default = 0]; + */ + public Builder clearStep() { + bitField0_ = (bitField0_ & ~0x00000020); + step_ = 0L; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -25972,26 +25582,55 @@ public interface LogParamOrBuilder extends /** *
-     * ID of the run under which to log the param.
+     * ID of the run under which to log the param. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 4; + */ + boolean hasRunId(); + /** + *
+     * ID of the run under which to log the param. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + java.lang.String getRunId(); + /** + *
+     * ID of the run under which to log the param. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + com.google.protobuf.ByteString + getRunIdBytes(); + + /** + *
+     * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+     * be removed in a future MLflow version.
+     * 
+ * + * optional string run_uuid = 1; */ boolean hasRunUuid(); /** *
-     * ID of the run under which to log the param.
+     * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ java.lang.String getRunUuid(); /** *
-     * ID of the run under which to log the param.
+     * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ com.google.protobuf.ByteString getRunUuidBytes(); @@ -26001,7 +25640,7 @@ public interface LogParamOrBuilder extends * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ boolean hasKey(); /** @@ -26009,7 +25648,7 @@ public interface LogParamOrBuilder extends * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ java.lang.String getKey(); /** @@ -26017,7 +25656,7 @@ public interface LogParamOrBuilder extends * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ com.google.protobuf.ByteString getKeyBytes(); @@ -26027,7 +25666,7 @@ public interface LogParamOrBuilder extends * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ boolean hasValue(); /** @@ -26035,7 +25674,7 @@ public interface LogParamOrBuilder extends * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ java.lang.String getValue(); /** @@ -26043,7 +25682,7 @@ public interface LogParamOrBuilder extends * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ com.google.protobuf.ByteString getValueBytes(); @@ -26061,6 +25700,7 @@ private LogParam(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private LogParam() { + runId_ = ""; runUuid_ = ""; key_ = ""; value_ = ""; @@ -26092,22 +25732,28 @@ private LogParam( break; case 10: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = bs; break; } case 18: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = bs; break; } case 26: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = bs; break; } + case 34: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + runId_ = bs; + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -26553,24 +26199,80 @@ public org.mlflow.api.proto.Service.LogParam.Response getDefaultInstanceForType( } private int bitField0_; + public static final int RUN_ID_FIELD_NUMBER = 4; + private volatile java.lang.Object runId_; + /** + *
+     * ID of the run under which to log the param. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * ID of the run under which to log the param. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } + } + /** + *
+     * ID of the run under which to log the param. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + public static final int RUN_UUID_FIELD_NUMBER = 1; private volatile java.lang.Object runUuid_; /** *
-     * ID of the run under which to log the param.
+     * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * ID of the run under which to log the param.
+     * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public java.lang.String getRunUuid() { java.lang.Object ref = runUuid_; @@ -26588,10 +26290,11 @@ public java.lang.String getRunUuid() { } /** *
-     * ID of the run under which to log the param.
+     * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString getRunUuidBytes() { @@ -26614,17 +26317,17 @@ public java.lang.String getRunUuid() { * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public boolean hasKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
      * Name of the param. Maximum size is 255 bytes.
      * 
* - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public java.lang.String getKey() { java.lang.Object ref = key_; @@ -26645,7 +26348,7 @@ public java.lang.String getKey() { * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getKeyBytes() { @@ -26668,17 +26371,17 @@ public java.lang.String getKey() { * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public boolean hasValue() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
      * String value of the param being logged. Maximum size is 500 bytes.
      * 
* - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public java.lang.String getValue() { java.lang.Object ref = value_; @@ -26699,7 +26402,7 @@ public java.lang.String getValue() { * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getValueBytes() { @@ -26729,15 +26432,18 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, key_); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, value_); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, runId_); + } unknownFields.writeTo(output); } @@ -26747,15 +26453,18 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, key_); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, value_); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, runId_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -26772,6 +26481,11 @@ public boolean equals(final java.lang.Object obj) { org.mlflow.api.proto.Service.LogParam other = (org.mlflow.api.proto.Service.LogParam) obj; boolean result = true; + result = result && (hasRunId() == other.hasRunId()); + if (hasRunId()) { + result = result && getRunId() + .equals(other.getRunId()); + } result = result && (hasRunUuid() == other.hasRunUuid()); if (hasRunUuid()) { result = result && getRunUuid() @@ -26798,6 +26512,10 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRunId()) { + hash = (37 * hash) + RUN_ID_FIELD_NUMBER; + hash = (53 * hash) + getRunId().hashCode(); + } if (hasRunUuid()) { hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; hash = (53 * hash) + getRunUuid().hashCode(); @@ -26943,12 +26661,14 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - runUuid_ = ""; + runId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - key_ = ""; + runUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - value_ = ""; + key_ = ""; bitField0_ = (bitField0_ & ~0x00000004); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -26980,14 +26700,18 @@ public org.mlflow.api.proto.Service.LogParam buildPartial() { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.runUuid_ = runUuid_; + result.runId_ = runId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.key_ = key_; + result.runUuid_ = runUuid_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } + result.key_ = key_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -27038,18 +26762,23 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.LogParam other) { if (other == org.mlflow.api.proto.Service.LogParam.getDefaultInstance()) return this; - if (other.hasRunUuid()) { + if (other.hasRunId()) { bitField0_ |= 0x00000001; + runId_ = other.runId_; + onChanged(); + } + if (other.hasRunUuid()) { + bitField0_ |= 0x00000002; runUuid_ = other.runUuid_; onChanged(); } if (other.hasKey()) { - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = other.key_; onChanged(); } if (other.hasValue()) { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = other.value_; onChanged(); } @@ -27083,23 +26812,125 @@ public Builder mergeFrom( } private int bitField0_; + private java.lang.Object runId_ = ""; + /** + *
+       * ID of the run under which to log the param. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+       * ID of the run under which to log the param. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+       * ID of the run under which to log the param. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * ID of the run under which to log the param. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public Builder setRunId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + /** + *
+       * ID of the run under which to log the param. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public Builder clearRunId() { + bitField0_ = (bitField0_ & ~0x00000001); + runId_ = getDefaultInstance().getRunId(); + onChanged(); + return this; + } + /** + *
+       * ID of the run under which to log the param. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public Builder setRunIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + private java.lang.Object runUuid_ = ""; /** *
-       * ID of the run under which to log the param.
+       * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * ID of the run under which to log the param.
+       * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public java.lang.String getRunUuid() { java.lang.Object ref = runUuid_; @@ -27117,10 +26948,11 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run under which to log the param.
+       * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString getRunUuidBytes() { @@ -27137,47 +26969,50 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run under which to log the param.
+       * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder setRunUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; } /** *
-       * ID of the run under which to log the param.
+       * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder clearRunUuid() { - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); runUuid_ = getDefaultInstance().getRunUuid(); onChanged(); return this; } /** *
-       * ID of the run under which to log the param.
+       * [Deprecated, use run_id instead] ID of the run under which to log the param. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder setRunUuidBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; @@ -27189,17 +27024,17 @@ public Builder setRunUuidBytes( * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public boolean hasKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
        * Name of the param. Maximum size is 255 bytes.
        * 
* - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public java.lang.String getKey() { java.lang.Object ref = key_; @@ -27220,7 +27055,7 @@ public java.lang.String getKey() { * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getKeyBytes() { @@ -27240,14 +27075,14 @@ public java.lang.String getKey() { * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public Builder setKey( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = value; onChanged(); return this; @@ -27257,10 +27092,10 @@ public Builder setKey( * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public Builder clearKey() { - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); key_ = getDefaultInstance().getKey(); onChanged(); return this; @@ -27270,14 +27105,14 @@ public Builder clearKey() { * Name of the param. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public Builder setKeyBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = value; onChanged(); return this; @@ -27289,17 +27124,17 @@ public Builder setKeyBytes( * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public boolean hasValue() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
        * String value of the param being logged. Maximum size is 500 bytes.
        * 
* - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public java.lang.String getValue() { java.lang.Object ref = value_; @@ -27320,7 +27155,7 @@ public java.lang.String getValue() { * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getValueBytes() { @@ -27340,14 +27175,14 @@ public java.lang.String getValue() { * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public Builder setValue( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = value; onChanged(); return this; @@ -27357,10 +27192,10 @@ public Builder setValue( * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000008); value_ = getDefaultInstance().getValue(); onChanged(); return this; @@ -27370,14 +27205,14 @@ public Builder clearValue() { * String value of the param being logged. Maximum size is 500 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public Builder setValueBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = value; onChanged(); return this; @@ -27441,26 +27276,55 @@ public interface SetTagOrBuilder extends /** *
-     * ID of the run under which to set the tag.
+     * ID of the run under which to log the tag. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + boolean hasRunId(); + /** + *
+     * ID of the run under which to log the tag. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + java.lang.String getRunId(); + /** + *
+     * ID of the run under which to log the tag. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + com.google.protobuf.ByteString + getRunIdBytes(); + + /** + *
+     * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ boolean hasRunUuid(); /** *
-     * ID of the run under which to set the tag.
+     * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ java.lang.String getRunUuid(); /** *
-     * ID of the run under which to set the tag.
+     * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ com.google.protobuf.ByteString getRunUuidBytes(); @@ -27470,7 +27334,7 @@ public interface SetTagOrBuilder extends * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ boolean hasKey(); /** @@ -27478,7 +27342,7 @@ public interface SetTagOrBuilder extends * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ java.lang.String getKey(); /** @@ -27486,7 +27350,7 @@ public interface SetTagOrBuilder extends * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ com.google.protobuf.ByteString getKeyBytes(); @@ -27496,7 +27360,7 @@ public interface SetTagOrBuilder extends * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ boolean hasValue(); /** @@ -27504,7 +27368,7 @@ public interface SetTagOrBuilder extends * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ java.lang.String getValue(); /** @@ -27512,7 +27376,7 @@ public interface SetTagOrBuilder extends * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ com.google.protobuf.ByteString getValueBytes(); @@ -27530,6 +27394,7 @@ private SetTag(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SetTag() { + runId_ = ""; runUuid_ = ""; key_ = ""; value_ = ""; @@ -27561,22 +27426,28 @@ private SetTag( break; case 10: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = bs; break; } case 18: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = bs; break; } case 26: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = bs; break; } + case 34: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + runId_ = bs; + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -28022,24 +27893,80 @@ public org.mlflow.api.proto.Service.SetTag.Response getDefaultInstanceForType() } private int bitField0_; + public static final int RUN_ID_FIELD_NUMBER = 4; + private volatile java.lang.Object runId_; + /** + *
+     * ID of the run under which to log the tag. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * ID of the run under which to log the tag. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } + } + /** + *
+     * ID of the run under which to log the tag. Must be provided.
+     * 
+ * + * optional string run_id = 4; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + public static final int RUN_UUID_FIELD_NUMBER = 1; private volatile java.lang.Object runUuid_; /** *
-     * ID of the run under which to set the tag.
+     * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * ID of the run under which to set the tag.
+     * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public java.lang.String getRunUuid() { java.lang.Object ref = runUuid_; @@ -28057,10 +27984,11 @@ public java.lang.String getRunUuid() { } /** *
-     * ID of the run under which to set the tag.
+     * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString getRunUuidBytes() { @@ -28083,17 +28011,17 @@ public java.lang.String getRunUuid() { * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public boolean hasKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
      * Name of the tag. Maximum size is 255 bytes.
      * 
* - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public java.lang.String getKey() { java.lang.Object ref = key_; @@ -28114,7 +28042,7 @@ public java.lang.String getKey() { * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getKeyBytes() { @@ -28137,17 +28065,17 @@ public java.lang.String getKey() { * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public boolean hasValue() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
      * String value of the tag being logged. Maximum size is 5000 bytes.
      * 
* - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public java.lang.String getValue() { java.lang.Object ref = value_; @@ -28168,7 +28096,7 @@ public java.lang.String getValue() { * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getValueBytes() { @@ -28198,15 +28126,18 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, key_); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, value_); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, runId_); + } unknownFields.writeTo(output); } @@ -28216,15 +28147,18 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, key_); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, value_); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, runId_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -28241,6 +28175,11 @@ public boolean equals(final java.lang.Object obj) { org.mlflow.api.proto.Service.SetTag other = (org.mlflow.api.proto.Service.SetTag) obj; boolean result = true; + result = result && (hasRunId() == other.hasRunId()); + if (hasRunId()) { + result = result && getRunId() + .equals(other.getRunId()); + } result = result && (hasRunUuid() == other.hasRunUuid()); if (hasRunUuid()) { result = result && getRunUuid() @@ -28267,6 +28206,10 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRunId()) { + hash = (37 * hash) + RUN_ID_FIELD_NUMBER; + hash = (53 * hash) + getRunId().hashCode(); + } if (hasRunUuid()) { hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; hash = (53 * hash) + getRunUuid().hashCode(); @@ -28412,12 +28355,14 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - runUuid_ = ""; + runId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - key_ = ""; + runUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - value_ = ""; + key_ = ""; bitField0_ = (bitField0_ & ~0x00000004); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -28449,14 +28394,18 @@ public org.mlflow.api.proto.Service.SetTag buildPartial() { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.runUuid_ = runUuid_; + result.runId_ = runId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.key_ = key_; + result.runUuid_ = runUuid_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } + result.key_ = key_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -28507,18 +28456,23 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.mlflow.api.proto.Service.SetTag other) { if (other == org.mlflow.api.proto.Service.SetTag.getDefaultInstance()) return this; - if (other.hasRunUuid()) { + if (other.hasRunId()) { bitField0_ |= 0x00000001; + runId_ = other.runId_; + onChanged(); + } + if (other.hasRunUuid()) { + bitField0_ |= 0x00000002; runUuid_ = other.runUuid_; onChanged(); } if (other.hasKey()) { - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = other.key_; onChanged(); } if (other.hasValue()) { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = other.value_; onChanged(); } @@ -28552,23 +28506,125 @@ public Builder mergeFrom( } private int bitField0_; + private java.lang.Object runId_ = ""; + /** + *
+       * ID of the run under which to log the tag. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+       * ID of the run under which to log the tag. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+       * ID of the run under which to log the tag. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * ID of the run under which to log the tag. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public Builder setRunId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + /** + *
+       * ID of the run under which to log the tag. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public Builder clearRunId() { + bitField0_ = (bitField0_ & ~0x00000001); + runId_ = getDefaultInstance().getRunId(); + onChanged(); + return this; + } + /** + *
+       * ID of the run under which to log the tag. Must be provided.
+       * 
+ * + * optional string run_id = 4; + */ + public Builder setRunIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + private java.lang.Object runUuid_ = ""; /** *
-       * ID of the run under which to set the tag.
+       * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * ID of the run under which to set the tag.
+       * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public java.lang.String getRunUuid() { java.lang.Object ref = runUuid_; @@ -28586,10 +28642,11 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run under which to set the tag.
+       * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString getRunUuidBytes() { @@ -28606,47 +28663,50 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run under which to set the tag.
+       * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder setRunUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; } /** *
-       * ID of the run under which to set the tag.
+       * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder clearRunUuid() { - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); runUuid_ = getDefaultInstance().getRunUuid(); onChanged(); return this; } /** *
-       * ID of the run under which to set the tag.
+       * [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public Builder setRunUuidBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; @@ -28658,17 +28718,17 @@ public Builder setRunUuidBytes( * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public boolean hasKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
        * Name of the tag. Maximum size is 255 bytes.
        * 
* - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public java.lang.String getKey() { java.lang.Object ref = key_; @@ -28689,7 +28749,7 @@ public java.lang.String getKey() { * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getKeyBytes() { @@ -28709,14 +28769,14 @@ public java.lang.String getKey() { * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public Builder setKey( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = value; onChanged(); return this; @@ -28726,10 +28786,10 @@ public Builder setKey( * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public Builder clearKey() { - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); key_ = getDefaultInstance().getKey(); onChanged(); return this; @@ -28739,14 +28799,14 @@ public Builder clearKey() { * Name of the tag. Maximum size is 255 bytes. * * - * optional string key = 2 [(.validate_required) = true]; + * optional string key = 2 [(.mlflow.validate_required) = true]; */ public Builder setKeyBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; key_ = value; onChanged(); return this; @@ -28758,17 +28818,17 @@ public Builder setKeyBytes( * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public boolean hasValue() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
        * String value of the tag being logged. Maximum size is 5000 bytes.
        * 
* - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public java.lang.String getValue() { java.lang.Object ref = value_; @@ -28789,7 +28849,7 @@ public java.lang.String getValue() { * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString getValueBytes() { @@ -28809,14 +28869,14 @@ public java.lang.String getValue() { * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public Builder setValue( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = value; onChanged(); return this; @@ -28826,10 +28886,10 @@ public Builder setValue( * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000008); value_ = getDefaultInstance().getValue(); onChanged(); return this; @@ -28839,14 +28899,14 @@ public Builder clearValue() { * String value of the tag being logged. Maximum size is 5000 bytes. * * - * optional string value = 3 [(.validate_required) = true]; + * optional string value = 3 [(.mlflow.validate_required) = true]; */ public Builder setValueBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000008; value_ = value; onChanged(); return this; @@ -28904,50 +28964,77 @@ public org.mlflow.api.proto.Service.SetTag getDefaultInstanceForType() { } - public interface GetRunOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetRun) + public interface DeleteTagOrBuilder extends + // @@protoc_insertion_point(interface_extends:mlflow.DeleteTag) com.google.protobuf.MessageOrBuilder { /** *
-     * ID of the run to fetch.
+     * ID of the run that the tag was logged under. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ - boolean hasRunUuid(); + boolean hasRunId(); /** *
-     * ID of the run to fetch.
+     * ID of the run that the tag was logged under. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ - java.lang.String getRunUuid(); + java.lang.String getRunId(); /** *
-     * ID of the run to fetch.
+     * ID of the run that the tag was logged under. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ com.google.protobuf.ByteString - getRunUuidBytes(); + getRunIdBytes(); + + /** + *
+     * Name of the tag. Maximum size is 255 bytes. Must be provided.
+     * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + boolean hasKey(); + /** + *
+     * Name of the tag. Maximum size is 255 bytes. Must be provided.
+     * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + java.lang.String getKey(); + /** + *
+     * Name of the tag. Maximum size is 255 bytes. Must be provided.
+     * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + com.google.protobuf.ByteString + getKeyBytes(); } /** - * Protobuf type {@code mlflow.GetRun} + * Protobuf type {@code mlflow.DeleteTag} */ - public static final class GetRun extends + public static final class DeleteTag extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetRun) - GetRunOrBuilder { + // @@protoc_insertion_point(message_implements:mlflow.DeleteTag) + DeleteTagOrBuilder { private static final long serialVersionUID = 0L; - // Use GetRun.newBuilder() to construct. - private GetRun(com.google.protobuf.GeneratedMessageV3.Builder builder) { + // Use DeleteTag.newBuilder() to construct. + private DeleteTag(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private GetRun() { - runUuid_ = ""; + private DeleteTag() { + runId_ = ""; + key_ = ""; } @java.lang.Override @@ -28955,7 +29042,7 @@ private GetRun() { getUnknownFields() { return this.unknownFields; } - private GetRun( + private DeleteTag( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -28977,7 +29064,13 @@ private GetRun( case 10: { com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - runUuid_ = bs; + runId_ = bs; + break; + } + case 18: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000002; + key_ = bs; break; } default: { @@ -29001,52 +29094,27 @@ private GetRun( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetRun.class, org.mlflow.api.proto.Service.GetRun.Builder.class); + org.mlflow.api.proto.Service.DeleteTag.class, org.mlflow.api.proto.Service.DeleteTag.Builder.class); } public interface ResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetRun.Response) + // @@protoc_insertion_point(interface_extends:mlflow.DeleteTag.Response) com.google.protobuf.MessageOrBuilder { - - /** - *
-       * Run metadata (name, start time, etc) and data (metrics, params, etc).
-       * 
- * - * optional .mlflow.Run run = 1; - */ - boolean hasRun(); - /** - *
-       * Run metadata (name, start time, etc) and data (metrics, params, etc).
-       * 
- * - * optional .mlflow.Run run = 1; - */ - org.mlflow.api.proto.Service.Run getRun(); - /** - *
-       * Run metadata (name, start time, etc) and data (metrics, params, etc).
-       * 
- * - * optional .mlflow.Run run = 1; - */ - org.mlflow.api.proto.Service.RunOrBuilder getRunOrBuilder(); } /** - * Protobuf type {@code mlflow.GetRun.Response} + * Protobuf type {@code mlflow.DeleteTag.Response} */ public static final class Response extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetRun.Response) + // @@protoc_insertion_point(message_implements:mlflow.DeleteTag.Response) ResponseOrBuilder { private static final long serialVersionUID = 0L; // Use Response.newBuilder() to construct. @@ -29069,7 +29137,6 @@ private Response( if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -29080,19 +29147,6 @@ private Response( case 0: done = true; break; - case 10: { - org.mlflow.api.proto.Service.Run.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = run_.toBuilder(); - } - run_ = input.readMessage(org.mlflow.api.proto.Service.Run.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(run_); - run_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -29114,49 +29168,15 @@ private Response( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_Response_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_Response_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetRun.Response.class, org.mlflow.api.proto.Service.GetRun.Response.Builder.class); - } - - private int bitField0_; - public static final int RUN_FIELD_NUMBER = 1; - private org.mlflow.api.proto.Service.Run run_; - /** - *
-       * Run metadata (name, start time, etc) and data (metrics, params, etc).
-       * 
- * - * optional .mlflow.Run run = 1; - */ - public boolean hasRun() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-       * Run metadata (name, start time, etc) and data (metrics, params, etc).
-       * 
- * - * optional .mlflow.Run run = 1; - */ - public org.mlflow.api.proto.Service.Run getRun() { - return run_ == null ? org.mlflow.api.proto.Service.Run.getDefaultInstance() : run_; - } - /** - *
-       * Run metadata (name, start time, etc) and data (metrics, params, etc).
-       * 
- * - * optional .mlflow.Run run = 1; - */ - public org.mlflow.api.proto.Service.RunOrBuilder getRunOrBuilder() { - return run_ == null ? org.mlflow.api.proto.Service.Run.getDefaultInstance() : run_; + org.mlflow.api.proto.Service.DeleteTag.Response.class, org.mlflow.api.proto.Service.DeleteTag.Response.Builder.class); } private byte memoizedIsInitialized = -1; @@ -29173,9 +29193,6 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, getRun()); - } unknownFields.writeTo(output); } @@ -29185,10 +29202,6 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getRun()); - } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -29199,17 +29212,12 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.mlflow.api.proto.Service.GetRun.Response)) { + if (!(obj instanceof org.mlflow.api.proto.Service.DeleteTag.Response)) { return super.equals(obj); } - org.mlflow.api.proto.Service.GetRun.Response other = (org.mlflow.api.proto.Service.GetRun.Response) obj; + org.mlflow.api.proto.Service.DeleteTag.Response other = (org.mlflow.api.proto.Service.DeleteTag.Response) obj; boolean result = true; - result = result && (hasRun() == other.hasRun()); - if (hasRun()) { - result = result && getRun() - .equals(other.getRun()); - } result = result && unknownFields.equals(other.unknownFields); return result; } @@ -29221,78 +29229,74 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (hasRun()) { - hash = (37 * hash) + RUN_FIELD_NUMBER; - hash = (53 * hash) + getRun().hashCode(); - } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom(byte[] data) + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun.Response parseDelimitedFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.DeleteTag.Response parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetRun.Response parseDelimitedFrom( + public static org.mlflow.api.proto.Service.DeleteTag.Response parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag.Response parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -29305,7 +29309,7 @@ public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetRun.Response prototype) { + public static Builder newBuilder(org.mlflow.api.proto.Service.DeleteTag.Response prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -29321,26 +29325,26 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code mlflow.GetRun.Response} + * Protobuf type {@code mlflow.DeleteTag.Response} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetRun.Response) - org.mlflow.api.proto.Service.GetRun.ResponseOrBuilder { + // @@protoc_insertion_point(builder_implements:mlflow.DeleteTag.Response) + org.mlflow.api.proto.Service.DeleteTag.ResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_Response_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_Response_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetRun.Response.class, org.mlflow.api.proto.Service.GetRun.Response.Builder.class); + org.mlflow.api.proto.Service.DeleteTag.Response.class, org.mlflow.api.proto.Service.DeleteTag.Response.Builder.class); } - // Construct using org.mlflow.api.proto.Service.GetRun.Response.newBuilder() + // Construct using org.mlflow.api.proto.Service.DeleteTag.Response.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -29353,35 +29357,28 @@ private Builder( private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getRunFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); - if (runBuilder_ == null) { - run_ = null; - } else { - runBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_Response_descriptor; } @java.lang.Override - public org.mlflow.api.proto.Service.GetRun.Response getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetRun.Response.getDefaultInstance(); + public org.mlflow.api.proto.Service.DeleteTag.Response getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.DeleteTag.Response.getDefaultInstance(); } @java.lang.Override - public org.mlflow.api.proto.Service.GetRun.Response build() { - org.mlflow.api.proto.Service.GetRun.Response result = buildPartial(); + public org.mlflow.api.proto.Service.DeleteTag.Response build() { + org.mlflow.api.proto.Service.DeleteTag.Response result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -29389,19 +29386,8 @@ public org.mlflow.api.proto.Service.GetRun.Response build() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetRun.Response buildPartial() { - org.mlflow.api.proto.Service.GetRun.Response result = new org.mlflow.api.proto.Service.GetRun.Response(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (runBuilder_ == null) { - result.run_ = run_; - } else { - result.run_ = runBuilder_.build(); - } - result.bitField0_ = to_bitField0_; + public org.mlflow.api.proto.Service.DeleteTag.Response buildPartial() { + org.mlflow.api.proto.Service.DeleteTag.Response result = new org.mlflow.api.proto.Service.DeleteTag.Response(this); onBuilt(); return result; } @@ -29440,19 +29426,16 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetRun.Response) { - return mergeFrom((org.mlflow.api.proto.Service.GetRun.Response)other); + if (other instanceof org.mlflow.api.proto.Service.DeleteTag.Response) { + return mergeFrom((org.mlflow.api.proto.Service.DeleteTag.Response)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.GetRun.Response other) { - if (other == org.mlflow.api.proto.Service.GetRun.Response.getDefaultInstance()) return this; - if (other.hasRun()) { - mergeRun(other.getRun()); - } + public Builder mergeFrom(org.mlflow.api.proto.Service.DeleteTag.Response other) { + if (other == org.mlflow.api.proto.Service.DeleteTag.Response.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -29468,11 +29451,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.GetRun.Response parsedMessage = null; + org.mlflow.api.proto.Service.DeleteTag.Response parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetRun.Response) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.DeleteTag.Response) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -29481,161 +29464,6 @@ public Builder mergeFrom( } return this; } - private int bitField0_; - - private org.mlflow.api.proto.Service.Run run_ = null; - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder> runBuilder_; - /** - *
-         * Run metadata (name, start time, etc) and data (metrics, params, etc).
-         * 
- * - * optional .mlflow.Run run = 1; - */ - public boolean hasRun() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-         * Run metadata (name, start time, etc) and data (metrics, params, etc).
-         * 
- * - * optional .mlflow.Run run = 1; - */ - public org.mlflow.api.proto.Service.Run getRun() { - if (runBuilder_ == null) { - return run_ == null ? org.mlflow.api.proto.Service.Run.getDefaultInstance() : run_; - } else { - return runBuilder_.getMessage(); - } - } - /** - *
-         * Run metadata (name, start time, etc) and data (metrics, params, etc).
-         * 
- * - * optional .mlflow.Run run = 1; - */ - public Builder setRun(org.mlflow.api.proto.Service.Run value) { - if (runBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - run_ = value; - onChanged(); - } else { - runBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - *
-         * Run metadata (name, start time, etc) and data (metrics, params, etc).
-         * 
- * - * optional .mlflow.Run run = 1; - */ - public Builder setRun( - org.mlflow.api.proto.Service.Run.Builder builderForValue) { - if (runBuilder_ == null) { - run_ = builderForValue.build(); - onChanged(); - } else { - runBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - /** - *
-         * Run metadata (name, start time, etc) and data (metrics, params, etc).
-         * 
- * - * optional .mlflow.Run run = 1; - */ - public Builder mergeRun(org.mlflow.api.proto.Service.Run value) { - if (runBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - run_ != null && - run_ != org.mlflow.api.proto.Service.Run.getDefaultInstance()) { - run_ = - org.mlflow.api.proto.Service.Run.newBuilder(run_).mergeFrom(value).buildPartial(); - } else { - run_ = value; - } - onChanged(); - } else { - runBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - /** - *
-         * Run metadata (name, start time, etc) and data (metrics, params, etc).
-         * 
- * - * optional .mlflow.Run run = 1; - */ - public Builder clearRun() { - if (runBuilder_ == null) { - run_ = null; - onChanged(); - } else { - runBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - *
-         * Run metadata (name, start time, etc) and data (metrics, params, etc).
-         * 
- * - * optional .mlflow.Run run = 1; - */ - public org.mlflow.api.proto.Service.Run.Builder getRunBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getRunFieldBuilder().getBuilder(); - } - /** - *
-         * Run metadata (name, start time, etc) and data (metrics, params, etc).
-         * 
- * - * optional .mlflow.Run run = 1; - */ - public org.mlflow.api.proto.Service.RunOrBuilder getRunOrBuilder() { - if (runBuilder_ != null) { - return runBuilder_.getMessageOrBuilder(); - } else { - return run_ == null ? - org.mlflow.api.proto.Service.Run.getDefaultInstance() : run_; - } - } - /** - *
-         * Run metadata (name, start time, etc) and data (metrics, params, etc).
-         * 
- * - * optional .mlflow.Run run = 1; - */ - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder> - getRunFieldBuilder() { - if (runBuilder_ == null) { - runBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder>( - getRun(), - getParentForChildren(), - isClean()); - run_ = null; - } - return runBuilder_; - } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -29649,16 +29477,16 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.GetRun.Response) + // @@protoc_insertion_point(builder_scope:mlflow.DeleteTag.Response) } - // @@protoc_insertion_point(class_scope:mlflow.GetRun.Response) - private static final org.mlflow.api.proto.Service.GetRun.Response DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.DeleteTag.Response) + private static final org.mlflow.api.proto.Service.DeleteTag.Response DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetRun.Response(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.DeleteTag.Response(); } - public static org.mlflow.api.proto.Service.GetRun.Response getDefaultInstance() { + public static org.mlflow.api.proto.Service.DeleteTag.Response getDefaultInstance() { return DEFAULT_INSTANCE; } @@ -29683,34 +29511,34 @@ public com.google.protobuf.Parser getParserForType() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetRun.Response getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.DeleteTag.Response getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; - public static final int RUN_UUID_FIELD_NUMBER = 1; - private volatile java.lang.Object runUuid_; + public static final int RUN_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object runId_; /** *
-     * ID of the run to fetch.
+     * ID of the run that the tag was logged under. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ - public boolean hasRunUuid() { + public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-     * ID of the run to fetch.
+     * ID of the run that the tag was logged under. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; + public java.lang.String getRunId() { + java.lang.Object ref = runId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -29718,26 +29546,80 @@ public java.lang.String getRunUuid() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - runUuid_ = s; + runId_ = s; } return s; } } /** *
-     * ID of the run to fetch.
+     * ID of the run that the tag was logged under. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; + getRunIdBytes() { + java.lang.Object ref = runId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - runUuid_ = b; + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KEY_FIELD_NUMBER = 2; + private volatile java.lang.Object key_; + /** + *
+     * Name of the tag. Maximum size is 255 bytes. Must be provided.
+     * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + *
+     * Name of the tag. Maximum size is 255 bytes. Must be provided.
+     * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + *
+     * Name of the tag. Maximum size is 255 bytes. Must be provided.
+     * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -29759,7 +29641,10 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, key_); } unknownFields.writeTo(output); } @@ -29771,7 +29656,10 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, key_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -29783,16 +29671,21 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.mlflow.api.proto.Service.GetRun)) { + if (!(obj instanceof org.mlflow.api.proto.Service.DeleteTag)) { return super.equals(obj); } - org.mlflow.api.proto.Service.GetRun other = (org.mlflow.api.proto.Service.GetRun) obj; + org.mlflow.api.proto.Service.DeleteTag other = (org.mlflow.api.proto.Service.DeleteTag) obj; boolean result = true; - result = result && (hasRunUuid() == other.hasRunUuid()); - if (hasRunUuid()) { - result = result && getRunUuid() - .equals(other.getRunUuid()); + result = result && (hasRunId() == other.hasRunId()); + if (hasRunId()) { + result = result && getRunId() + .equals(other.getRunId()); + } + result = result && (hasKey() == other.hasKey()); + if (hasKey()) { + result = result && getKey() + .equals(other.getKey()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -29805,78 +29698,82 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (hasRunUuid()) { - hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; - hash = (53 * hash) + getRunUuid().hashCode(); + if (hasRunId()) { + hash = (37 * hash) + RUN_ID_FIELD_NUMBER; + hash = (53 * hash) + getRunId().hashCode(); + } + if (hasKey()) { + hash = (37 * hash) + KEY_FIELD_NUMBER; + hash = (53 * hash) + getKey().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.mlflow.api.proto.Service.GetRun parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetRun parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetRun parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun parseFrom(byte[] data) + public static org.mlflow.api.proto.Service.DeleteTag parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetRun parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun parseFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.DeleteTag parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetRun parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun parseDelimitedFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.DeleteTag parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetRun parseDelimitedFrom( + public static org.mlflow.api.proto.Service.DeleteTag parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetRun parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetRun parseFrom( + public static org.mlflow.api.proto.Service.DeleteTag parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -29889,7 +29786,7 @@ public static org.mlflow.api.proto.Service.GetRun parseFrom( public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetRun prototype) { + public static Builder newBuilder(org.mlflow.api.proto.Service.DeleteTag prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -29905,26 +29802,26 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code mlflow.GetRun} + * Protobuf type {@code mlflow.DeleteTag} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetRun) - org.mlflow.api.proto.Service.GetRunOrBuilder { + // @@protoc_insertion_point(builder_implements:mlflow.DeleteTag) + org.mlflow.api.proto.Service.DeleteTagOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetRun.class, org.mlflow.api.proto.Service.GetRun.Builder.class); + org.mlflow.api.proto.Service.DeleteTag.class, org.mlflow.api.proto.Service.DeleteTag.Builder.class); } - // Construct using org.mlflow.api.proto.Service.GetRun.newBuilder() + // Construct using org.mlflow.api.proto.Service.DeleteTag.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -29942,25 +29839,27 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - runUuid_ = ""; + runId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_DeleteTag_descriptor; } @java.lang.Override - public org.mlflow.api.proto.Service.GetRun getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetRun.getDefaultInstance(); + public org.mlflow.api.proto.Service.DeleteTag getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.DeleteTag.getDefaultInstance(); } @java.lang.Override - public org.mlflow.api.proto.Service.GetRun build() { - org.mlflow.api.proto.Service.GetRun result = buildPartial(); + public org.mlflow.api.proto.Service.DeleteTag build() { + org.mlflow.api.proto.Service.DeleteTag result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -29968,14 +29867,18 @@ public org.mlflow.api.proto.Service.GetRun build() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetRun buildPartial() { - org.mlflow.api.proto.Service.GetRun result = new org.mlflow.api.proto.Service.GetRun(this); + public org.mlflow.api.proto.Service.DeleteTag buildPartial() { + org.mlflow.api.proto.Service.DeleteTag result = new org.mlflow.api.proto.Service.DeleteTag(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.runUuid_ = runUuid_; + result.runId_ = runId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.key_ = key_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -30015,19 +29918,24 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetRun) { - return mergeFrom((org.mlflow.api.proto.Service.GetRun)other); + if (other instanceof org.mlflow.api.proto.Service.DeleteTag) { + return mergeFrom((org.mlflow.api.proto.Service.DeleteTag)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.GetRun other) { - if (other == org.mlflow.api.proto.Service.GetRun.getDefaultInstance()) return this; - if (other.hasRunUuid()) { + public Builder mergeFrom(org.mlflow.api.proto.Service.DeleteTag other) { + if (other == org.mlflow.api.proto.Service.DeleteTag.getDefaultInstance()) return this; + if (other.hasRunId()) { bitField0_ |= 0x00000001; - runUuid_ = other.runUuid_; + runId_ = other.runId_; + onChanged(); + } + if (other.hasKey()) { + bitField0_ |= 0x00000002; + key_ = other.key_; onChanged(); } this.mergeUnknownFields(other.unknownFields); @@ -30045,11 +29953,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.GetRun parsedMessage = null; + org.mlflow.api.proto.Service.DeleteTag parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetRun) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.DeleteTag) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -30060,32 +29968,32 @@ public Builder mergeFrom( } private int bitField0_; - private java.lang.Object runUuid_ = ""; + private java.lang.Object runId_ = ""; /** *
-       * ID of the run to fetch.
+       * ID of the run that the tag was logged under. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ - public boolean hasRunUuid() { + public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-       * ID of the run to fetch.
+       * ID of the run that the tag was logged under. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; + public java.lang.String getRunId() { + java.lang.Object ref = runId_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - runUuid_ = s; + runId_ = s; } return s; } else { @@ -30094,19 +30002,19 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run to fetch.
+       * ID of the run that the tag was logged under. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; + getRunIdBytes() { + java.lang.Object ref = runId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - runUuid_ = b; + runId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -30114,48 +30022,148 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run to fetch.
+       * ID of the run that the tag was logged under. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ - public Builder setRunUuid( + public Builder setRunId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - runUuid_ = value; + runId_ = value; onChanged(); return this; } /** *
-       * ID of the run to fetch.
+       * ID of the run that the tag was logged under. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ - public Builder clearRunUuid() { + public Builder clearRunId() { bitField0_ = (bitField0_ & ~0x00000001); - runUuid_ = getDefaultInstance().getRunUuid(); + runId_ = getDefaultInstance().getRunId(); onChanged(); return this; } /** *
-       * ID of the run to fetch.
+       * ID of the run that the tag was logged under. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1 [(.mlflow.validate_required) = true]; */ - public Builder setRunUuidBytes( + public Builder setRunIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - runUuid_ = value; + runId_ = value; + onChanged(); + return this; + } + + private java.lang.Object key_ = ""; + /** + *
+       * Name of the tag. Maximum size is 255 bytes. Must be provided.
+       * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + *
+       * Name of the tag. Maximum size is 255 bytes. Must be provided.
+       * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+       * Name of the tag. Maximum size is 255 bytes. Must be provided.
+       * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * Name of the tag. Maximum size is 255 bytes. Must be provided.
+       * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + key_ = value; + onChanged(); + return this; + } + /** + *
+       * Name of the tag. Maximum size is 255 bytes. Must be provided.
+       * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000002); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + *
+       * Name of the tag. Maximum size is 255 bytes. Must be provided.
+       * 
+ * + * optional string key = 2 [(.mlflow.validate_required) = true]; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + key_ = value; onChanged(); return this; } @@ -30172,117 +30180,120 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.GetRun) + // @@protoc_insertion_point(builder_scope:mlflow.DeleteTag) } - // @@protoc_insertion_point(class_scope:mlflow.GetRun) - private static final org.mlflow.api.proto.Service.GetRun DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.DeleteTag) + private static final org.mlflow.api.proto.Service.DeleteTag DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetRun(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.DeleteTag(); } - public static org.mlflow.api.proto.Service.GetRun getDefaultInstance() { + public static org.mlflow.api.proto.Service.DeleteTag getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Deprecated public static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override - public GetRun parsePartialFrom( + public DeleteTag parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetRun(input, extensionRegistry); + return new DeleteTag(input, extensionRegistry); } }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override - public org.mlflow.api.proto.Service.GetRun getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.DeleteTag getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } - public interface GetMetricOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetMetric) + public interface GetRunOrBuilder extends + // @@protoc_insertion_point(interface_extends:mlflow.GetRun) com.google.protobuf.MessageOrBuilder { /** *
-     * ID of the run from which to retrieve the metric value.
+     * ID of the run to fetch. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ - boolean hasRunUuid(); + boolean hasRunId(); /** *
-     * ID of the run from which to retrieve the metric value.
+     * ID of the run to fetch. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ - java.lang.String getRunUuid(); + java.lang.String getRunId(); /** *
-     * ID of the run from which to retrieve the metric value.
+     * ID of the run to fetch. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ com.google.protobuf.ByteString - getRunUuidBytes(); + getRunIdBytes(); /** *
-     * Name of the metric.
+     * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ - boolean hasMetricKey(); + boolean hasRunUuid(); /** *
-     * Name of the metric.
+     * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ - java.lang.String getMetricKey(); + java.lang.String getRunUuid(); /** *
-     * Name of the metric.
+     * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ com.google.protobuf.ByteString - getMetricKeyBytes(); + getRunUuidBytes(); } /** - * Protobuf type {@code mlflow.GetMetric} + * Protobuf type {@code mlflow.GetRun} */ - public static final class GetMetric extends + public static final class GetRun extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetMetric) - GetMetricOrBuilder { + // @@protoc_insertion_point(message_implements:mlflow.GetRun) + GetRunOrBuilder { private static final long serialVersionUID = 0L; - // Use GetMetric.newBuilder() to construct. - private GetMetric(com.google.protobuf.GeneratedMessageV3.Builder builder) { + // Use GetRun.newBuilder() to construct. + private GetRun(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private GetMetric() { + private GetRun() { + runId_ = ""; runUuid_ = ""; - metricKey_ = ""; } @java.lang.Override @@ -30290,7 +30301,7 @@ private GetMetric() { getUnknownFields() { return this.unknownFields; } - private GetMetric( + private GetRun( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -30311,14 +30322,14 @@ private GetMetric( break; case 10: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = bs; break; } case 18: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; - metricKey_ = bs; + bitField0_ |= 0x00000001; + runId_ = bs; break; } default: { @@ -30342,52 +30353,52 @@ private GetMetric( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetMetric.class, org.mlflow.api.proto.Service.GetMetric.Builder.class); + org.mlflow.api.proto.Service.GetRun.class, org.mlflow.api.proto.Service.GetRun.Builder.class); } public interface ResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetMetric.Response) + // @@protoc_insertion_point(interface_extends:mlflow.GetRun.Response) com.google.protobuf.MessageOrBuilder { /** *
-       * Latest reported value of the specified metric.
+       * Run metadata (name, start time, etc) and data (metrics, params, and tags).
        * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - boolean hasMetric(); + boolean hasRun(); /** *
-       * Latest reported value of the specified metric.
+       * Run metadata (name, start time, etc) and data (metrics, params, and tags).
        * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - org.mlflow.api.proto.Service.Metric getMetric(); + org.mlflow.api.proto.Service.Run getRun(); /** *
-       * Latest reported value of the specified metric.
+       * Run metadata (name, start time, etc) and data (metrics, params, and tags).
        * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - org.mlflow.api.proto.Service.MetricOrBuilder getMetricOrBuilder(); + org.mlflow.api.proto.Service.RunOrBuilder getRunOrBuilder(); } /** - * Protobuf type {@code mlflow.GetMetric.Response} + * Protobuf type {@code mlflow.GetRun.Response} */ public static final class Response extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetMetric.Response) + // @@protoc_insertion_point(message_implements:mlflow.GetRun.Response) ResponseOrBuilder { private static final long serialVersionUID = 0L; // Use Response.newBuilder() to construct. @@ -30422,14 +30433,14 @@ private Response( done = true; break; case 10: { - org.mlflow.api.proto.Service.Metric.Builder subBuilder = null; + org.mlflow.api.proto.Service.Run.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = metric_.toBuilder(); + subBuilder = run_.toBuilder(); } - metric_ = input.readMessage(org.mlflow.api.proto.Service.Metric.PARSER, extensionRegistry); + run_ = input.readMessage(org.mlflow.api.proto.Service.Run.PARSER, extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom(metric_); - metric_ = subBuilder.buildPartial(); + subBuilder.mergeFrom(run_); + run_ = subBuilder.buildPartial(); } bitField0_ |= 0x00000001; break; @@ -30455,49 +30466,49 @@ private Response( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_Response_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetMetric.Response.class, org.mlflow.api.proto.Service.GetMetric.Response.Builder.class); + org.mlflow.api.proto.Service.GetRun.Response.class, org.mlflow.api.proto.Service.GetRun.Response.Builder.class); } private int bitField0_; - public static final int METRIC_FIELD_NUMBER = 1; - private org.mlflow.api.proto.Service.Metric metric_; + public static final int RUN_FIELD_NUMBER = 1; + private org.mlflow.api.proto.Service.Run run_; /** *
-       * Latest reported value of the specified metric.
+       * Run metadata (name, start time, etc) and data (metrics, params, and tags).
        * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public boolean hasMetric() { + public boolean hasRun() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-       * Latest reported value of the specified metric.
+       * Run metadata (name, start time, etc) and data (metrics, params, and tags).
        * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public org.mlflow.api.proto.Service.Metric getMetric() { - return metric_ == null ? org.mlflow.api.proto.Service.Metric.getDefaultInstance() : metric_; + public org.mlflow.api.proto.Service.Run getRun() { + return run_ == null ? org.mlflow.api.proto.Service.Run.getDefaultInstance() : run_; } /** *
-       * Latest reported value of the specified metric.
+       * Run metadata (name, start time, etc) and data (metrics, params, and tags).
        * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public org.mlflow.api.proto.Service.MetricOrBuilder getMetricOrBuilder() { - return metric_ == null ? org.mlflow.api.proto.Service.Metric.getDefaultInstance() : metric_; + public org.mlflow.api.proto.Service.RunOrBuilder getRunOrBuilder() { + return run_ == null ? org.mlflow.api.proto.Service.Run.getDefaultInstance() : run_; } private byte memoizedIsInitialized = -1; @@ -30515,7 +30526,7 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, getMetric()); + output.writeMessage(1, getRun()); } unknownFields.writeTo(output); } @@ -30528,7 +30539,7 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getMetric()); + .computeMessageSize(1, getRun()); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -30540,16 +30551,16 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.mlflow.api.proto.Service.GetMetric.Response)) { + if (!(obj instanceof org.mlflow.api.proto.Service.GetRun.Response)) { return super.equals(obj); } - org.mlflow.api.proto.Service.GetMetric.Response other = (org.mlflow.api.proto.Service.GetMetric.Response) obj; + org.mlflow.api.proto.Service.GetRun.Response other = (org.mlflow.api.proto.Service.GetRun.Response) obj; boolean result = true; - result = result && (hasMetric() == other.hasMetric()); - if (hasMetric()) { - result = result && getMetric() - .equals(other.getMetric()); + result = result && (hasRun() == other.hasRun()); + if (hasRun()) { + result = result && getRun() + .equals(other.getRun()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -30562,78 +30573,78 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (hasMetric()) { - hash = (37 * hash) + METRIC_FIELD_NUMBER; - hash = (53 * hash) + getMetric().hashCode(); + if (hasRun()) { + hash = (37 * hash) + RUN_FIELD_NUMBER; + hash = (53 * hash) + getRun().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom( + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom( + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom( + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom( + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom(byte[] data) + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom( + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom( + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseDelimitedFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.GetRun.Response parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseDelimitedFrom( + public static org.mlflow.api.proto.Service.GetRun.Response parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom( + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom( + public static org.mlflow.api.proto.Service.GetRun.Response parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -30646,7 +30657,7 @@ public static org.mlflow.api.proto.Service.GetMetric.Response parseFrom( public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetMetric.Response prototype) { + public static Builder newBuilder(org.mlflow.api.proto.Service.GetRun.Response prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -30662,26 +30673,26 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code mlflow.GetMetric.Response} + * Protobuf type {@code mlflow.GetRun.Response} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetMetric.Response) - org.mlflow.api.proto.Service.GetMetric.ResponseOrBuilder { + // @@protoc_insertion_point(builder_implements:mlflow.GetRun.Response) + org.mlflow.api.proto.Service.GetRun.ResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_Response_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetMetric.Response.class, org.mlflow.api.proto.Service.GetMetric.Response.Builder.class); + org.mlflow.api.proto.Service.GetRun.Response.class, org.mlflow.api.proto.Service.GetRun.Response.Builder.class); } - // Construct using org.mlflow.api.proto.Service.GetMetric.Response.newBuilder() + // Construct using org.mlflow.api.proto.Service.GetRun.Response.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -30694,16 +30705,16 @@ private Builder( private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getMetricFieldBuilder(); + getRunFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); - if (metricBuilder_ == null) { - metric_ = null; + if (runBuilder_ == null) { + run_ = null; } else { - metricBuilder_.clear(); + runBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; @@ -30712,17 +30723,17 @@ public Builder clear() { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_Response_descriptor; } @java.lang.Override - public org.mlflow.api.proto.Service.GetMetric.Response getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetMetric.Response.getDefaultInstance(); + public org.mlflow.api.proto.Service.GetRun.Response getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.GetRun.Response.getDefaultInstance(); } @java.lang.Override - public org.mlflow.api.proto.Service.GetMetric.Response build() { - org.mlflow.api.proto.Service.GetMetric.Response result = buildPartial(); + public org.mlflow.api.proto.Service.GetRun.Response build() { + org.mlflow.api.proto.Service.GetRun.Response result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -30730,17 +30741,17 @@ public org.mlflow.api.proto.Service.GetMetric.Response build() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetMetric.Response buildPartial() { - org.mlflow.api.proto.Service.GetMetric.Response result = new org.mlflow.api.proto.Service.GetMetric.Response(this); + public org.mlflow.api.proto.Service.GetRun.Response buildPartial() { + org.mlflow.api.proto.Service.GetRun.Response result = new org.mlflow.api.proto.Service.GetRun.Response(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (metricBuilder_ == null) { - result.metric_ = metric_; + if (runBuilder_ == null) { + result.run_ = run_; } else { - result.metric_ = metricBuilder_.build(); + result.run_ = runBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); @@ -30781,18 +30792,18 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetMetric.Response) { - return mergeFrom((org.mlflow.api.proto.Service.GetMetric.Response)other); + if (other instanceof org.mlflow.api.proto.Service.GetRun.Response) { + return mergeFrom((org.mlflow.api.proto.Service.GetRun.Response)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.GetMetric.Response other) { - if (other == org.mlflow.api.proto.Service.GetMetric.Response.getDefaultInstance()) return this; - if (other.hasMetric()) { - mergeMetric(other.getMetric()); + public Builder mergeFrom(org.mlflow.api.proto.Service.GetRun.Response other) { + if (other == org.mlflow.api.proto.Service.GetRun.Response.getDefaultInstance()) return this; + if (other.hasRun()) { + mergeRun(other.getRun()); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -30809,11 +30820,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.GetMetric.Response parsedMessage = null; + org.mlflow.api.proto.Service.GetRun.Response parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetMetric.Response) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.GetRun.Response) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -30824,158 +30835,158 @@ public Builder mergeFrom( } private int bitField0_; - private org.mlflow.api.proto.Service.Metric metric_ = null; + private org.mlflow.api.proto.Service.Run run_ = null; private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder> metricBuilder_; + org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder> runBuilder_; /** *
-         * Latest reported value of the specified metric.
+         * Run metadata (name, start time, etc) and data (metrics, params, and tags).
          * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public boolean hasMetric() { + public boolean hasRun() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-         * Latest reported value of the specified metric.
+         * Run metadata (name, start time, etc) and data (metrics, params, and tags).
          * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public org.mlflow.api.proto.Service.Metric getMetric() { - if (metricBuilder_ == null) { - return metric_ == null ? org.mlflow.api.proto.Service.Metric.getDefaultInstance() : metric_; + public org.mlflow.api.proto.Service.Run getRun() { + if (runBuilder_ == null) { + return run_ == null ? org.mlflow.api.proto.Service.Run.getDefaultInstance() : run_; } else { - return metricBuilder_.getMessage(); + return runBuilder_.getMessage(); } } /** *
-         * Latest reported value of the specified metric.
+         * Run metadata (name, start time, etc) and data (metrics, params, and tags).
          * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public Builder setMetric(org.mlflow.api.proto.Service.Metric value) { - if (metricBuilder_ == null) { + public Builder setRun(org.mlflow.api.proto.Service.Run value) { + if (runBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - metric_ = value; + run_ = value; onChanged(); } else { - metricBuilder_.setMessage(value); + runBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /** *
-         * Latest reported value of the specified metric.
+         * Run metadata (name, start time, etc) and data (metrics, params, and tags).
          * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public Builder setMetric( - org.mlflow.api.proto.Service.Metric.Builder builderForValue) { - if (metricBuilder_ == null) { - metric_ = builderForValue.build(); + public Builder setRun( + org.mlflow.api.proto.Service.Run.Builder builderForValue) { + if (runBuilder_ == null) { + run_ = builderForValue.build(); onChanged(); } else { - metricBuilder_.setMessage(builderForValue.build()); + runBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /** *
-         * Latest reported value of the specified metric.
+         * Run metadata (name, start time, etc) and data (metrics, params, and tags).
          * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public Builder mergeMetric(org.mlflow.api.proto.Service.Metric value) { - if (metricBuilder_ == null) { + public Builder mergeRun(org.mlflow.api.proto.Service.Run value) { + if (runBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && - metric_ != null && - metric_ != org.mlflow.api.proto.Service.Metric.getDefaultInstance()) { - metric_ = - org.mlflow.api.proto.Service.Metric.newBuilder(metric_).mergeFrom(value).buildPartial(); + run_ != null && + run_ != org.mlflow.api.proto.Service.Run.getDefaultInstance()) { + run_ = + org.mlflow.api.proto.Service.Run.newBuilder(run_).mergeFrom(value).buildPartial(); } else { - metric_ = value; + run_ = value; } onChanged(); } else { - metricBuilder_.mergeFrom(value); + runBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /** *
-         * Latest reported value of the specified metric.
+         * Run metadata (name, start time, etc) and data (metrics, params, and tags).
          * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public Builder clearMetric() { - if (metricBuilder_ == null) { - metric_ = null; + public Builder clearRun() { + if (runBuilder_ == null) { + run_ = null; onChanged(); } else { - metricBuilder_.clear(); + runBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /** *
-         * Latest reported value of the specified metric.
+         * Run metadata (name, start time, etc) and data (metrics, params, and tags).
          * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public org.mlflow.api.proto.Service.Metric.Builder getMetricBuilder() { + public org.mlflow.api.proto.Service.Run.Builder getRunBuilder() { bitField0_ |= 0x00000001; onChanged(); - return getMetricFieldBuilder().getBuilder(); + return getRunFieldBuilder().getBuilder(); } /** *
-         * Latest reported value of the specified metric.
+         * Run metadata (name, start time, etc) and data (metrics, params, and tags).
          * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ - public org.mlflow.api.proto.Service.MetricOrBuilder getMetricOrBuilder() { - if (metricBuilder_ != null) { - return metricBuilder_.getMessageOrBuilder(); + public org.mlflow.api.proto.Service.RunOrBuilder getRunOrBuilder() { + if (runBuilder_ != null) { + return runBuilder_.getMessageOrBuilder(); } else { - return metric_ == null ? - org.mlflow.api.proto.Service.Metric.getDefaultInstance() : metric_; + return run_ == null ? + org.mlflow.api.proto.Service.Run.getDefaultInstance() : run_; } } /** *
-         * Latest reported value of the specified metric.
+         * Run metadata (name, start time, etc) and data (metrics, params, and tags).
          * 
* - * optional .mlflow.Metric metric = 1; + * optional .mlflow.Run run = 1; */ private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder> - getMetricFieldBuilder() { - if (metricBuilder_ == null) { - metricBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder>( - getMetric(), + org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder> + getRunFieldBuilder() { + if (runBuilder_ == null) { + runBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder>( + getRun(), getParentForChildren(), isClean()); - metric_ = null; + run_ = null; } - return metricBuilder_; + return runBuilder_; } @java.lang.Override public final Builder setUnknownFields( @@ -30990,16 +31001,16 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.GetMetric.Response) + // @@protoc_insertion_point(builder_scope:mlflow.GetRun.Response) } - // @@protoc_insertion_point(class_scope:mlflow.GetMetric.Response) - private static final org.mlflow.api.proto.Service.GetMetric.Response DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.GetRun.Response) + private static final org.mlflow.api.proto.Service.GetRun.Response DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetMetric.Response(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetRun.Response(); } - public static org.mlflow.api.proto.Service.GetMetric.Response getDefaultInstance() { + public static org.mlflow.api.proto.Service.GetRun.Response getDefaultInstance() { return DEFAULT_INSTANCE; } @@ -31024,34 +31035,34 @@ public com.google.protobuf.Parser getParserForType() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetMetric.Response getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.GetRun.Response getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; - public static final int RUN_UUID_FIELD_NUMBER = 1; - private volatile java.lang.Object runUuid_; + public static final int RUN_ID_FIELD_NUMBER = 2; + private volatile java.lang.Object runId_; /** *
-     * ID of the run from which to retrieve the metric value.
+     * ID of the run to fetch. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ - public boolean hasRunUuid() { + public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-     * ID of the run from which to retrieve the metric value.
+     * ID of the run to fetch. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; + public java.lang.String getRunId() { + java.lang.Object ref = runId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -31059,53 +31070,55 @@ public java.lang.String getRunUuid() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - runUuid_ = s; + runId_ = s; } return s; } } /** *
-     * ID of the run from which to retrieve the metric value.
+     * ID of the run to fetch. Must be provided.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; + getRunIdBytes() { + java.lang.Object ref = runId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - runUuid_ = b; + runId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int METRIC_KEY_FIELD_NUMBER = 2; - private volatile java.lang.Object metricKey_; + public static final int RUN_UUID_FIELD_NUMBER = 1; + private volatile java.lang.Object runUuid_; /** *
-     * Name of the metric.
+     * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ - public boolean hasMetricKey() { + public boolean hasRunUuid() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * Name of the metric.
+     * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ - public java.lang.String getMetricKey() { - java.lang.Object ref = metricKey_; + public java.lang.String getRunUuid() { + java.lang.Object ref = runUuid_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -31113,26 +31126,27 @@ public java.lang.String getMetricKey() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - metricKey_ = s; + runUuid_ = s; } return s; } } /** *
-     * Name of the metric.
+     * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString - getMetricKeyBytes() { - java.lang.Object ref = metricKey_; + getRunUuidBytes() { + java.lang.Object ref = runUuid_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - metricKey_ = b; + runUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -31153,11 +31167,11 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, metricKey_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, runId_); } unknownFields.writeTo(output); } @@ -31168,11 +31182,11 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, metricKey_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, runId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -31184,22 +31198,22 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.mlflow.api.proto.Service.GetMetric)) { + if (!(obj instanceof org.mlflow.api.proto.Service.GetRun)) { return super.equals(obj); } - org.mlflow.api.proto.Service.GetMetric other = (org.mlflow.api.proto.Service.GetMetric) obj; + org.mlflow.api.proto.Service.GetRun other = (org.mlflow.api.proto.Service.GetRun) obj; boolean result = true; + result = result && (hasRunId() == other.hasRunId()); + if (hasRunId()) { + result = result && getRunId() + .equals(other.getRunId()); + } result = result && (hasRunUuid() == other.hasRunUuid()); if (hasRunUuid()) { result = result && getRunUuid() .equals(other.getRunUuid()); } - result = result && (hasMetricKey() == other.hasMetricKey()); - if (hasMetricKey()) { - result = result && getMetricKey() - .equals(other.getMetricKey()); - } result = result && unknownFields.equals(other.unknownFields); return result; } @@ -31211,82 +31225,82 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRunId()) { + hash = (37 * hash) + RUN_ID_FIELD_NUMBER; + hash = (53 * hash) + getRunId().hashCode(); + } if (hasRunUuid()) { hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; hash = (53 * hash) + getRunUuid().hashCode(); } - if (hasMetricKey()) { - hash = (37 * hash) + METRIC_KEY_FIELD_NUMBER; - hash = (53 * hash) + getMetricKey().hashCode(); - } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.mlflow.api.proto.Service.GetMetric parseFrom( + public static org.mlflow.api.proto.Service.GetRun parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetMetric parseFrom( + public static org.mlflow.api.proto.Service.GetRun parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric parseFrom( + public static org.mlflow.api.proto.Service.GetRun parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetMetric parseFrom( + public static org.mlflow.api.proto.Service.GetRun parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric parseFrom(byte[] data) + public static org.mlflow.api.proto.Service.GetRun parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetMetric parseFrom( + public static org.mlflow.api.proto.Service.GetRun parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric parseFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.GetRun parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetMetric parseFrom( + public static org.mlflow.api.proto.Service.GetRun parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric parseDelimitedFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.GetRun parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetMetric parseDelimitedFrom( + public static org.mlflow.api.proto.Service.GetRun parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetMetric parseFrom( + public static org.mlflow.api.proto.Service.GetRun parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetMetric parseFrom( + public static org.mlflow.api.proto.Service.GetRun parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -31299,7 +31313,7 @@ public static org.mlflow.api.proto.Service.GetMetric parseFrom( public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetMetric prototype) { + public static Builder newBuilder(org.mlflow.api.proto.Service.GetRun prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -31315,26 +31329,26 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code mlflow.GetMetric} + * Protobuf type {@code mlflow.GetRun} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetMetric) - org.mlflow.api.proto.Service.GetMetricOrBuilder { + // @@protoc_insertion_point(builder_implements:mlflow.GetRun) + org.mlflow.api.proto.Service.GetRunOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetMetric.class, org.mlflow.api.proto.Service.GetMetric.Builder.class); + org.mlflow.api.proto.Service.GetRun.class, org.mlflow.api.proto.Service.GetRun.Builder.class); } - // Construct using org.mlflow.api.proto.Service.GetMetric.newBuilder() + // Construct using org.mlflow.api.proto.Service.GetRun.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -31352,9 +31366,9 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - runUuid_ = ""; + runId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - metricKey_ = ""; + runUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -31362,17 +31376,17 @@ public Builder clear() { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetric_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_GetRun_descriptor; } @java.lang.Override - public org.mlflow.api.proto.Service.GetMetric getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetMetric.getDefaultInstance(); + public org.mlflow.api.proto.Service.GetRun getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.GetRun.getDefaultInstance(); } @java.lang.Override - public org.mlflow.api.proto.Service.GetMetric build() { - org.mlflow.api.proto.Service.GetMetric result = buildPartial(); + public org.mlflow.api.proto.Service.GetRun build() { + org.mlflow.api.proto.Service.GetRun result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -31380,18 +31394,18 @@ public org.mlflow.api.proto.Service.GetMetric build() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetMetric buildPartial() { - org.mlflow.api.proto.Service.GetMetric result = new org.mlflow.api.proto.Service.GetMetric(this); + public org.mlflow.api.proto.Service.GetRun buildPartial() { + org.mlflow.api.proto.Service.GetRun result = new org.mlflow.api.proto.Service.GetRun(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.runUuid_ = runUuid_; + result.runId_ = runId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.metricKey_ = metricKey_; + result.runUuid_ = runUuid_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -31431,24 +31445,24 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetMetric) { - return mergeFrom((org.mlflow.api.proto.Service.GetMetric)other); + if (other instanceof org.mlflow.api.proto.Service.GetRun) { + return mergeFrom((org.mlflow.api.proto.Service.GetRun)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.GetMetric other) { - if (other == org.mlflow.api.proto.Service.GetMetric.getDefaultInstance()) return this; - if (other.hasRunUuid()) { + public Builder mergeFrom(org.mlflow.api.proto.Service.GetRun other) { + if (other == org.mlflow.api.proto.Service.GetRun.getDefaultInstance()) return this; + if (other.hasRunId()) { bitField0_ |= 0x00000001; - runUuid_ = other.runUuid_; + runId_ = other.runId_; onChanged(); } - if (other.hasMetricKey()) { + if (other.hasRunUuid()) { bitField0_ |= 0x00000002; - metricKey_ = other.metricKey_; + runUuid_ = other.runUuid_; onChanged(); } this.mergeUnknownFields(other.unknownFields); @@ -31466,11 +31480,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.GetMetric parsedMessage = null; + org.mlflow.api.proto.Service.GetRun parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetMetric) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.GetRun) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -31481,32 +31495,32 @@ public Builder mergeFrom( } private int bitField0_; - private java.lang.Object runUuid_ = ""; + private java.lang.Object runId_ = ""; /** *
-       * ID of the run from which to retrieve the metric value.
+       * ID of the run to fetch. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ - public boolean hasRunUuid() { + public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-       * ID of the run from which to retrieve the metric value.
+       * ID of the run to fetch. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; + public java.lang.String getRunId() { + java.lang.Object ref = runId_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - runUuid_ = s; + runId_ = s; } return s; } else { @@ -31515,19 +31529,19 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run from which to retrieve the metric value.
+       * ID of the run to fetch. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; + getRunIdBytes() { + java.lang.Object ref = runId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - runUuid_ = b; + runId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -31535,78 +31549,80 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run from which to retrieve the metric value.
+       * ID of the run to fetch. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ - public Builder setRunUuid( + public Builder setRunId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - runUuid_ = value; + runId_ = value; onChanged(); return this; } /** *
-       * ID of the run from which to retrieve the metric value.
+       * ID of the run to fetch. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ - public Builder clearRunUuid() { + public Builder clearRunId() { bitField0_ = (bitField0_ & ~0x00000001); - runUuid_ = getDefaultInstance().getRunUuid(); + runId_ = getDefaultInstance().getRunId(); onChanged(); return this; } /** *
-       * ID of the run from which to retrieve the metric value.
+       * ID of the run to fetch. Must be provided.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 2; */ - public Builder setRunUuidBytes( + public Builder setRunIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - runUuid_ = value; + runId_ = value; onChanged(); return this; } - private java.lang.Object metricKey_ = ""; + private java.lang.Object runUuid_ = ""; /** *
-       * Name of the metric.
+       * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ - public boolean hasMetricKey() { + public boolean hasRunUuid() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * Name of the metric.
+       * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ - public java.lang.String getMetricKey() { - java.lang.Object ref = metricKey_; + public java.lang.String getRunUuid() { + java.lang.Object ref = runUuid_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - metricKey_ = s; + runUuid_ = s; } return s; } else { @@ -31615,19 +31631,20 @@ public java.lang.String getMetricKey() { } /** *
-       * Name of the metric.
+       * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ public com.google.protobuf.ByteString - getMetricKeyBytes() { - java.lang.Object ref = metricKey_; + getRunUuidBytes() { + java.lang.Object ref = runUuid_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - metricKey_ = b; + runUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -31635,48 +31652,51 @@ public java.lang.String getMetricKey() { } /** *
-       * Name of the metric.
+       * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ - public Builder setMetricKey( + public Builder setRunUuid( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; - metricKey_ = value; + runUuid_ = value; onChanged(); return this; } /** *
-       * Name of the metric.
+       * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ - public Builder clearMetricKey() { + public Builder clearRunUuid() { bitField0_ = (bitField0_ & ~0x00000002); - metricKey_ = getDefaultInstance().getMetricKey(); + runUuid_ = getDefaultInstance().getRunUuid(); onChanged(); return this; } /** *
-       * Name of the metric.
+       * [Deprecated, use run_id instead] ID of the run to fetch. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * optional string run_uuid = 1; */ - public Builder setMetricKeyBytes( + public Builder setRunUuidBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; - metricKey_ = value; + runUuid_ = value; onChanged(); return this; } @@ -31693,117 +31713,249 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.GetMetric) + // @@protoc_insertion_point(builder_scope:mlflow.GetRun) } - // @@protoc_insertion_point(class_scope:mlflow.GetMetric) - private static final org.mlflow.api.proto.Service.GetMetric DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.GetRun) + private static final org.mlflow.api.proto.Service.GetRun DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetMetric(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetRun(); } - public static org.mlflow.api.proto.Service.GetMetric getDefaultInstance() { + public static org.mlflow.api.proto.Service.GetRun getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Deprecated public static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override - public GetMetric parsePartialFrom( + public GetRun parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetMetric(input, extensionRegistry); + return new GetRun(input, extensionRegistry); } }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override - public org.mlflow.api.proto.Service.GetMetric getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.GetRun getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } - public interface GetParamOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetParam) + public interface SearchRunsOrBuilder extends + // @@protoc_insertion_point(interface_extends:mlflow.SearchRuns) com.google.protobuf.MessageOrBuilder { /** *
-     * ID of the run from which to retrieve the param value.
+     * List of experiment IDs to search over.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - boolean hasRunUuid(); + java.util.List + getExperimentIdsList(); /** *
-     * ID of the run from which to retrieve the param value.
+     * List of experiment IDs to search over.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - java.lang.String getRunUuid(); + int getExperimentIdsCount(); + /** + *
+     * List of experiment IDs to search over.
+     * 
+ * + * repeated string experiment_ids = 1; + */ + java.lang.String getExperimentIds(int index); /** *
-     * ID of the run from which to retrieve the param value.
+     * List of experiment IDs to search over.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ com.google.protobuf.ByteString - getRunUuidBytes(); + getExperimentIdsBytes(int index); + + /** + *
+     * A filter expression over params, metrics, and tags, that allows returning a subset of
+     * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+     * between a param, metric, or tag and a constant.
+     * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+     * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+     * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+     * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
+     * 
+ * + * optional string filter = 4; + */ + boolean hasFilter(); + /** + *
+     * A filter expression over params, metrics, and tags, that allows returning a subset of
+     * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+     * between a param, metric, or tag and a constant.
+     * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+     * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+     * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+     * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
+     * 
+ * + * optional string filter = 4; + */ + java.lang.String getFilter(); + /** + *
+     * A filter expression over params, metrics, and tags, that allows returning a subset of
+     * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+     * between a param, metric, or tag and a constant.
+     * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+     * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+     * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+     * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
+     * 
+ * + * optional string filter = 4; + */ + com.google.protobuf.ByteString + getFilterBytes(); + + /** + *
+     * Whether to display only active, only deleted, or all runs.
+     * Defaults to only active runs.
+     * 
+ * + * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; + */ + boolean hasRunViewType(); + /** + *
+     * Whether to display only active, only deleted, or all runs.
+     * Defaults to only active runs.
+     * 
+ * + * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; + */ + org.mlflow.api.proto.Service.ViewType getRunViewType(); + + /** + *
+     * Maximum number of runs desired. Max threshold is 50000
+     * 
+ * + * optional int32 max_results = 5 [default = 1000]; + */ + boolean hasMaxResults(); + /** + *
+     * Maximum number of runs desired. Max threshold is 50000
+     * 
+ * + * optional int32 max_results = 5 [default = 1000]; + */ + int getMaxResults(); /** *
-     * Name of the param.
+     * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+     * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+     * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+     * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+     * (and this is the default ordering criterion if order_by is not provided).
+     * 
+ * + * repeated string order_by = 6; + */ + java.util.List + getOrderByList(); + /** + *
+     * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+     * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+     * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+     * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+     * (and this is the default ordering criterion if order_by is not provided).
      * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * repeated string order_by = 6; */ - boolean hasParamName(); + int getOrderByCount(); /** *
-     * Name of the param.
+     * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+     * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+     * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+     * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+     * (and this is the default ordering criterion if order_by is not provided).
      * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * repeated string order_by = 6; */ - java.lang.String getParamName(); + java.lang.String getOrderBy(int index); /** *
-     * Name of the param.
+     * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+     * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+     * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+     * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+     * (and this is the default ordering criterion if order_by is not provided).
      * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * repeated string order_by = 6; */ com.google.protobuf.ByteString - getParamNameBytes(); + getOrderByBytes(int index); + + /** + * optional string page_token = 7; + */ + boolean hasPageToken(); + /** + * optional string page_token = 7; + */ + java.lang.String getPageToken(); + /** + * optional string page_token = 7; + */ + com.google.protobuf.ByteString + getPageTokenBytes(); } /** - * Protobuf type {@code mlflow.GetParam} + * Protobuf type {@code mlflow.SearchRuns} */ - public static final class GetParam extends + public static final class SearchRuns extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetParam) - GetParamOrBuilder { + // @@protoc_insertion_point(message_implements:mlflow.SearchRuns) + SearchRunsOrBuilder { private static final long serialVersionUID = 0L; - // Use GetParam.newBuilder() to construct. - private GetParam(com.google.protobuf.GeneratedMessageV3.Builder builder) { + // Use SearchRuns.newBuilder() to construct. + private SearchRuns(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private GetParam() { - runUuid_ = ""; - paramName_ = ""; + private SearchRuns() { + experimentIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + filter_ = ""; + runViewType_ = 1; + maxResults_ = 1000; + orderBy_ = com.google.protobuf.LazyStringArrayList.EMPTY; + pageToken_ = ""; } @java.lang.Override @@ -31811,7 +31963,7 @@ private GetParam() { getUnknownFields() { return this.unknownFields; } - private GetParam( + private SearchRuns( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -31831,15 +31983,50 @@ private GetParam( done = true; break; case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + experimentIds_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + experimentIds_.add(bs); + break; + } + case 24: { + int rawValue = input.readEnum(); + @SuppressWarnings("deprecation") + org.mlflow.api.proto.Service.ViewType value = org.mlflow.api.proto.Service.ViewType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(3, rawValue); + } else { + bitField0_ |= 0x00000002; + runViewType_ = rawValue; + } + break; + } + case 34: { com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - runUuid_ = bs; + filter_ = bs; break; } - case 18: { + case 40: { + bitField0_ |= 0x00000004; + maxResults_ = input.readInt32(); + break; + } + case 50: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; - paramName_ = bs; + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + orderBy_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000010; + } + orderBy_.add(bs); + break; + } + case 58: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000008; + pageToken_ = bs; break; } default: { @@ -31857,58 +32044,97 @@ private GetParam( throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + experimentIds_ = experimentIds_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + orderBy_ = orderBy_.getUnmodifiableView(); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetParam.class, org.mlflow.api.proto.Service.GetParam.Builder.class); + org.mlflow.api.proto.Service.SearchRuns.class, org.mlflow.api.proto.Service.SearchRuns.Builder.class); } public interface ResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetParam.Response) + // @@protoc_insertion_point(interface_extends:mlflow.SearchRuns.Response) com.google.protobuf.MessageOrBuilder { /** *
-       * Param key-value pair.
+       * Runs that match the search criteria.
+       * 
+ * + * repeated .mlflow.Run runs = 1; + */ + java.util.List + getRunsList(); + /** + *
+       * Runs that match the search criteria.
+       * 
+ * + * repeated .mlflow.Run runs = 1; + */ + org.mlflow.api.proto.Service.Run getRuns(int index); + /** + *
+       * Runs that match the search criteria.
        * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - boolean hasParameter(); + int getRunsCount(); /** *
-       * Param key-value pair.
+       * Runs that match the search criteria.
        * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - org.mlflow.api.proto.Service.Param getParameter(); + java.util.List + getRunsOrBuilderList(); /** *
-       * Param key-value pair.
+       * Runs that match the search criteria.
        * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; + */ + org.mlflow.api.proto.Service.RunOrBuilder getRunsOrBuilder( + int index); + + /** + * optional string next_page_token = 2; + */ + boolean hasNextPageToken(); + /** + * optional string next_page_token = 2; + */ + java.lang.String getNextPageToken(); + /** + * optional string next_page_token = 2; */ - org.mlflow.api.proto.Service.ParamOrBuilder getParameterOrBuilder(); + com.google.protobuf.ByteString + getNextPageTokenBytes(); } /** - * Protobuf type {@code mlflow.GetParam.Response} + * Protobuf type {@code mlflow.SearchRuns.Response} */ public static final class Response extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetParam.Response) + // @@protoc_insertion_point(message_implements:mlflow.SearchRuns.Response) ResponseOrBuilder { private static final long serialVersionUID = 0L; // Use Response.newBuilder() to construct. @@ -31916,6 +32142,8 @@ private Response(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private Response() { + runs_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; } @java.lang.Override @@ -31943,16 +32171,18 @@ private Response( done = true; break; case 10: { - org.mlflow.api.proto.Service.Param.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = parameter_.toBuilder(); - } - parameter_ = input.readMessage(org.mlflow.api.proto.Service.Param.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(parameter_); - parameter_ = subBuilder.buildPartial(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + runs_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } + runs_.add( + input.readMessage(org.mlflow.api.proto.Service.Run.PARSER, extensionRegistry)); + break; + } + case 18: { + com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; + nextPageToken_ = bs; break; } default: { @@ -31970,55 +32200,122 @@ private Response( throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + runs_ = java.util.Collections.unmodifiableList(runs_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_Response_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetParam.Response.class, org.mlflow.api.proto.Service.GetParam.Response.Builder.class); + org.mlflow.api.proto.Service.SearchRuns.Response.class, org.mlflow.api.proto.Service.SearchRuns.Response.Builder.class); } private int bitField0_; - public static final int PARAMETER_FIELD_NUMBER = 1; - private org.mlflow.api.proto.Service.Param parameter_; + public static final int RUNS_FIELD_NUMBER = 1; + private java.util.List runs_; /** *
-       * Param key-value pair.
+       * Runs that match the search criteria.
        * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public boolean hasParameter() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getRunsList() { + return runs_; + } + /** + *
+       * Runs that match the search criteria.
+       * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public java.util.List + getRunsOrBuilderList() { + return runs_; + } + /** + *
+       * Runs that match the search criteria.
+       * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public int getRunsCount() { + return runs_.size(); } /** *
-       * Param key-value pair.
+       * Runs that match the search criteria.
        * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public org.mlflow.api.proto.Service.Param getParameter() { - return parameter_ == null ? org.mlflow.api.proto.Service.Param.getDefaultInstance() : parameter_; + public org.mlflow.api.proto.Service.Run getRuns(int index) { + return runs_.get(index); } /** *
-       * Param key-value pair.
+       * Runs that match the search criteria.
        * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; + */ + public org.mlflow.api.proto.Service.RunOrBuilder getRunsOrBuilder( + int index) { + return runs_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + private volatile java.lang.Object nextPageToken_; + /** + * optional string next_page_token = 2; + */ + public boolean hasNextPageToken() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string next_page_token = 2; + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + nextPageToken_ = s; + } + return s; + } + } + /** + * optional string next_page_token = 2; */ - public org.mlflow.api.proto.Service.ParamOrBuilder getParameterOrBuilder() { - return parameter_ == null ? org.mlflow.api.proto.Service.Param.getDefaultInstance() : parameter_; + public com.google.protobuf.ByteString + getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } private byte memoizedIsInitialized = -1; @@ -32035,8 +32332,11 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < runs_.size(); i++) { + output.writeMessage(1, runs_.get(i)); + } if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, getParameter()); + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, nextPageToken_); } unknownFields.writeTo(output); } @@ -32047,9 +32347,12 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + for (int i = 0; i < runs_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getParameter()); + .computeMessageSize(1, runs_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, nextPageToken_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -32061,16 +32364,18 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.mlflow.api.proto.Service.GetParam.Response)) { + if (!(obj instanceof org.mlflow.api.proto.Service.SearchRuns.Response)) { return super.equals(obj); } - org.mlflow.api.proto.Service.GetParam.Response other = (org.mlflow.api.proto.Service.GetParam.Response) obj; + org.mlflow.api.proto.Service.SearchRuns.Response other = (org.mlflow.api.proto.Service.SearchRuns.Response) obj; boolean result = true; - result = result && (hasParameter() == other.hasParameter()); - if (hasParameter()) { - result = result && getParameter() - .equals(other.getParameter()); + result = result && getRunsList() + .equals(other.getRunsList()); + result = result && (hasNextPageToken() == other.hasNextPageToken()); + if (hasNextPageToken()) { + result = result && getNextPageToken() + .equals(other.getNextPageToken()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -32083,78 +32388,82 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (hasParameter()) { - hash = (37 * hash) + PARAMETER_FIELD_NUMBER; - hash = (53 * hash) + getParameter().hashCode(); + if (getRunsCount() > 0) { + hash = (37 * hash) + RUNS_FIELD_NUMBER; + hash = (53 * hash) + getRunsList().hashCode(); + } + if (hasNextPageToken()) { + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom(byte[] data) + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam.Response parseDelimitedFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.SearchRuns.Response parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetParam.Response parseDelimitedFrom( + public static org.mlflow.api.proto.Service.SearchRuns.Response parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetParam.Response parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -32167,7 +32476,7 @@ public static org.mlflow.api.proto.Service.GetParam.Response parseFrom( public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetParam.Response prototype) { + public static Builder newBuilder(org.mlflow.api.proto.Service.SearchRuns.Response prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -32183,26 +32492,26 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code mlflow.GetParam.Response} + * Protobuf type {@code mlflow.SearchRuns.Response} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetParam.Response) - org.mlflow.api.proto.Service.GetParam.ResponseOrBuilder { + // @@protoc_insertion_point(builder_implements:mlflow.SearchRuns.Response) + org.mlflow.api.proto.Service.SearchRuns.ResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_Response_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetParam.Response.class, org.mlflow.api.proto.Service.GetParam.Response.Builder.class); + org.mlflow.api.proto.Service.SearchRuns.Response.class, org.mlflow.api.proto.Service.SearchRuns.Response.Builder.class); } - // Construct using org.mlflow.api.proto.Service.GetParam.Response.newBuilder() + // Construct using org.mlflow.api.proto.Service.SearchRuns.Response.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -32215,35 +32524,37 @@ private Builder( private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getParameterFieldBuilder(); + getRunsFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); - if (parameterBuilder_ == null) { - parameter_ = null; + if (runsBuilder_ == null) { + runs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); } else { - parameterBuilder_.clear(); + runsBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_descriptor; } @java.lang.Override - public org.mlflow.api.proto.Service.GetParam.Response getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetParam.Response.getDefaultInstance(); + public org.mlflow.api.proto.Service.SearchRuns.Response getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.SearchRuns.Response.getDefaultInstance(); } @java.lang.Override - public org.mlflow.api.proto.Service.GetParam.Response build() { - org.mlflow.api.proto.Service.GetParam.Response result = buildPartial(); + public org.mlflow.api.proto.Service.SearchRuns.Response build() { + org.mlflow.api.proto.Service.SearchRuns.Response result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -32251,18 +32562,23 @@ public org.mlflow.api.proto.Service.GetParam.Response build() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetParam.Response buildPartial() { - org.mlflow.api.proto.Service.GetParam.Response result = new org.mlflow.api.proto.Service.GetParam.Response(this); + public org.mlflow.api.proto.Service.SearchRuns.Response buildPartial() { + org.mlflow.api.proto.Service.SearchRuns.Response result = new org.mlflow.api.proto.Service.SearchRuns.Response(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (parameterBuilder_ == null) { - result.parameter_ = parameter_; + if (runsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + runs_ = java.util.Collections.unmodifiableList(runs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.runs_ = runs_; } else { - result.parameter_ = parameterBuilder_.build(); + result.runs_ = runsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; } + result.nextPageToken_ = nextPageToken_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -32302,18 +32618,46 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetParam.Response) { - return mergeFrom((org.mlflow.api.proto.Service.GetParam.Response)other); + if (other instanceof org.mlflow.api.proto.Service.SearchRuns.Response) { + return mergeFrom((org.mlflow.api.proto.Service.SearchRuns.Response)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.GetParam.Response other) { - if (other == org.mlflow.api.proto.Service.GetParam.Response.getDefaultInstance()) return this; - if (other.hasParameter()) { - mergeParameter(other.getParameter()); + public Builder mergeFrom(org.mlflow.api.proto.Service.SearchRuns.Response other) { + if (other == org.mlflow.api.proto.Service.SearchRuns.Response.getDefaultInstance()) return this; + if (runsBuilder_ == null) { + if (!other.runs_.isEmpty()) { + if (runs_.isEmpty()) { + runs_ = other.runs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRunsIsMutable(); + runs_.addAll(other.runs_); + } + onChanged(); + } + } else { + if (!other.runs_.isEmpty()) { + if (runsBuilder_.isEmpty()) { + runsBuilder_.dispose(); + runsBuilder_ = null; + runs_ = other.runs_; + bitField0_ = (bitField0_ & ~0x00000001); + runsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getRunsFieldBuilder() : null; + } else { + runsBuilder_.addAllMessages(other.runs_); + } + } + } + if (other.hasNextPageToken()) { + bitField0_ |= 0x00000002; + nextPageToken_ = other.nextPageToken_; + onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -32330,11 +32674,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.GetParam.Response parsedMessage = null; + org.mlflow.api.proto.Service.SearchRuns.Response parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetParam.Response) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.SearchRuns.Response) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -32345,158 +32689,392 @@ public Builder mergeFrom( } private int bitField0_; - private org.mlflow.api.proto.Service.Param parameter_ = null; - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.Param, org.mlflow.api.proto.Service.Param.Builder, org.mlflow.api.proto.Service.ParamOrBuilder> parameterBuilder_; + private java.util.List runs_ = + java.util.Collections.emptyList(); + private void ensureRunsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + runs_ = new java.util.ArrayList(runs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder> runsBuilder_; + /** *
-         * Param key-value pair.
+         * Runs that match the search criteria.
          * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public boolean hasParameter() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getRunsList() { + if (runsBuilder_ == null) { + return java.util.Collections.unmodifiableList(runs_); + } else { + return runsBuilder_.getMessageList(); + } + } + /** + *
+         * Runs that match the search criteria.
+         * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public int getRunsCount() { + if (runsBuilder_ == null) { + return runs_.size(); + } else { + return runsBuilder_.getCount(); + } } /** *
-         * Param key-value pair.
+         * Runs that match the search criteria.
          * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public org.mlflow.api.proto.Service.Param getParameter() { - if (parameterBuilder_ == null) { - return parameter_ == null ? org.mlflow.api.proto.Service.Param.getDefaultInstance() : parameter_; + public org.mlflow.api.proto.Service.Run getRuns(int index) { + if (runsBuilder_ == null) { + return runs_.get(index); } else { - return parameterBuilder_.getMessage(); + return runsBuilder_.getMessage(index); } } /** *
-         * Param key-value pair.
+         * Runs that match the search criteria.
          * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public Builder setParameter(org.mlflow.api.proto.Service.Param value) { - if (parameterBuilder_ == null) { + public Builder setRuns( + int index, org.mlflow.api.proto.Service.Run value) { + if (runsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - parameter_ = value; + ensureRunsIsMutable(); + runs_.set(index, value); onChanged(); } else { - parameterBuilder_.setMessage(value); + runsBuilder_.setMessage(index, value); } - bitField0_ |= 0x00000001; return this; } /** *
-         * Param key-value pair.
+         * Runs that match the search criteria.
          * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public Builder setParameter( - org.mlflow.api.proto.Service.Param.Builder builderForValue) { - if (parameterBuilder_ == null) { - parameter_ = builderForValue.build(); + public Builder setRuns( + int index, org.mlflow.api.proto.Service.Run.Builder builderForValue) { + if (runsBuilder_ == null) { + ensureRunsIsMutable(); + runs_.set(index, builderForValue.build()); onChanged(); } else { - parameterBuilder_.setMessage(builderForValue.build()); + runsBuilder_.setMessage(index, builderForValue.build()); } - bitField0_ |= 0x00000001; return this; } /** *
-         * Param key-value pair.
+         * Runs that match the search criteria.
          * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public Builder mergeParameter(org.mlflow.api.proto.Service.Param value) { - if (parameterBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - parameter_ != null && - parameter_ != org.mlflow.api.proto.Service.Param.getDefaultInstance()) { - parameter_ = - org.mlflow.api.proto.Service.Param.newBuilder(parameter_).mergeFrom(value).buildPartial(); - } else { - parameter_ = value; + public Builder addRuns(org.mlflow.api.proto.Service.Run value) { + if (runsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureRunsIsMutable(); + runs_.add(value); onChanged(); } else { - parameterBuilder_.mergeFrom(value); + runsBuilder_.addMessage(value); + } + return this; + } + /** + *
+         * Runs that match the search criteria.
+         * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public Builder addRuns( + int index, org.mlflow.api.proto.Service.Run value) { + if (runsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRunsIsMutable(); + runs_.add(index, value); + onChanged(); + } else { + runsBuilder_.addMessage(index, value); } - bitField0_ |= 0x00000001; return this; } /** *
-         * Param key-value pair.
+         * Runs that match the search criteria.
          * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public Builder clearParameter() { - if (parameterBuilder_ == null) { - parameter_ = null; + public Builder addRuns( + org.mlflow.api.proto.Service.Run.Builder builderForValue) { + if (runsBuilder_ == null) { + ensureRunsIsMutable(); + runs_.add(builderForValue.build()); onChanged(); } else { - parameterBuilder_.clear(); + runsBuilder_.addMessage(builderForValue.build()); } - bitField0_ = (bitField0_ & ~0x00000001); return this; } /** *
-         * Param key-value pair.
+         * Runs that match the search criteria.
          * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public org.mlflow.api.proto.Service.Param.Builder getParameterBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getParameterFieldBuilder().getBuilder(); + public Builder addRuns( + int index, org.mlflow.api.proto.Service.Run.Builder builderForValue) { + if (runsBuilder_ == null) { + ensureRunsIsMutable(); + runs_.add(index, builderForValue.build()); + onChanged(); + } else { + runsBuilder_.addMessage(index, builderForValue.build()); + } + return this; } /** *
-         * Param key-value pair.
+         * Runs that match the search criteria.
          * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - public org.mlflow.api.proto.Service.ParamOrBuilder getParameterOrBuilder() { - if (parameterBuilder_ != null) { - return parameterBuilder_.getMessageOrBuilder(); + public Builder addAllRuns( + java.lang.Iterable values) { + if (runsBuilder_ == null) { + ensureRunsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, runs_); + onChanged(); } else { - return parameter_ == null ? - org.mlflow.api.proto.Service.Param.getDefaultInstance() : parameter_; + runsBuilder_.addAllMessages(values); } + return this; } /** *
-         * Param key-value pair.
+         * Runs that match the search criteria.
          * 
* - * optional .mlflow.Param parameter = 1; + * repeated .mlflow.Run runs = 1; */ - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.Param, org.mlflow.api.proto.Service.Param.Builder, org.mlflow.api.proto.Service.ParamOrBuilder> - getParameterFieldBuilder() { - if (parameterBuilder_ == null) { - parameterBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.Param, org.mlflow.api.proto.Service.Param.Builder, org.mlflow.api.proto.Service.ParamOrBuilder>( - getParameter(), + public Builder clearRuns() { + if (runsBuilder_ == null) { + runs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + runsBuilder_.clear(); + } + return this; + } + /** + *
+         * Runs that match the search criteria.
+         * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public Builder removeRuns(int index) { + if (runsBuilder_ == null) { + ensureRunsIsMutable(); + runs_.remove(index); + onChanged(); + } else { + runsBuilder_.remove(index); + } + return this; + } + /** + *
+         * Runs that match the search criteria.
+         * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public org.mlflow.api.proto.Service.Run.Builder getRunsBuilder( + int index) { + return getRunsFieldBuilder().getBuilder(index); + } + /** + *
+         * Runs that match the search criteria.
+         * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public org.mlflow.api.proto.Service.RunOrBuilder getRunsOrBuilder( + int index) { + if (runsBuilder_ == null) { + return runs_.get(index); } else { + return runsBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+         * Runs that match the search criteria.
+         * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public java.util.List + getRunsOrBuilderList() { + if (runsBuilder_ != null) { + return runsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(runs_); + } + } + /** + *
+         * Runs that match the search criteria.
+         * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public org.mlflow.api.proto.Service.Run.Builder addRunsBuilder() { + return getRunsFieldBuilder().addBuilder( + org.mlflow.api.proto.Service.Run.getDefaultInstance()); + } + /** + *
+         * Runs that match the search criteria.
+         * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public org.mlflow.api.proto.Service.Run.Builder addRunsBuilder( + int index) { + return getRunsFieldBuilder().addBuilder( + index, org.mlflow.api.proto.Service.Run.getDefaultInstance()); + } + /** + *
+         * Runs that match the search criteria.
+         * 
+ * + * repeated .mlflow.Run runs = 1; + */ + public java.util.List + getRunsBuilderList() { + return getRunsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder> + getRunsFieldBuilder() { + if (runsBuilder_ == null) { + runsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder>( + runs_, + ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); - parameter_ = null; + runs_ = null; + } + return runsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + /** + * optional string next_page_token = 2; + */ + public boolean hasNextPageToken() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string next_page_token = 2; + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + nextPageToken_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string next_page_token = 2; + */ + public com.google.protobuf.ByteString + getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - return parameterBuilder_; + } + /** + * optional string next_page_token = 2; + */ + public Builder setNextPageToken( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + nextPageToken_ = value; + onChanged(); + return this; + } + /** + * optional string next_page_token = 2; + */ + public Builder clearNextPageToken() { + bitField0_ = (bitField0_ & ~0x00000002); + nextPageToken_ = getDefaultInstance().getNextPageToken(); + onChanged(); + return this; + } + /** + * optional string next_page_token = 2; + */ + public Builder setNextPageTokenBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + nextPageToken_ = value; + onChanged(); + return this; } @java.lang.Override public final Builder setUnknownFields( @@ -32511,16 +33089,16 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.GetParam.Response) + // @@protoc_insertion_point(builder_scope:mlflow.SearchRuns.Response) } - // @@protoc_insertion_point(class_scope:mlflow.GetParam.Response) - private static final org.mlflow.api.proto.Service.GetParam.Response DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.SearchRuns.Response) + private static final org.mlflow.api.proto.Service.SearchRuns.Response DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetParam.Response(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.SearchRuns.Response(); } - public static org.mlflow.api.proto.Service.GetParam.Response getDefaultInstance() { + public static org.mlflow.api.proto.Service.SearchRuns.Response getDefaultInstance() { return DEFAULT_INSTANCE; } @@ -32545,88 +33123,254 @@ public com.google.protobuf.Parser getParserForType() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetParam.Response getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.SearchRuns.Response getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; - public static final int RUN_UUID_FIELD_NUMBER = 1; - private volatile java.lang.Object runUuid_; + public static final int EXPERIMENT_IDS_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList experimentIds_; /** *
-     * ID of the run from which to retrieve the param value.
+     * List of experiment IDs to search over.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public com.google.protobuf.ProtocolStringList + getExperimentIdsList() { + return experimentIds_; } /** *
-     * ID of the run from which to retrieve the param value.
+     * List of experiment IDs to search over.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - runUuid_ = s; - } - return s; - } + public int getExperimentIdsCount() { + return experimentIds_.size(); } /** *
-     * ID of the run from which to retrieve the param value.
+     * List of experiment IDs to search over.
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; - if (ref instanceof java.lang.String) { + public java.lang.String getExperimentIds(int index) { + return experimentIds_.get(index); + } + /** + *
+     * List of experiment IDs to search over.
+     * 
+ * + * repeated string experiment_ids = 1; + */ + public com.google.protobuf.ByteString + getExperimentIdsBytes(int index) { + return experimentIds_.getByteString(index); + } + + public static final int FILTER_FIELD_NUMBER = 4; + private volatile java.lang.Object filter_; + /** + *
+     * A filter expression over params, metrics, and tags, that allows returning a subset of
+     * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+     * between a param, metric, or tag and a constant.
+     * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+     * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+     * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+     * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
+     * 
+ * + * optional string filter = 4; + */ + public boolean hasFilter() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * A filter expression over params, metrics, and tags, that allows returning a subset of
+     * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+     * between a param, metric, or tag and a constant.
+     * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+     * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+     * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+     * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
+     * 
+ * + * optional string filter = 4; + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + filter_ = s; + } + return s; + } + } + /** + *
+     * A filter expression over params, metrics, and tags, that allows returning a subset of
+     * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+     * between a param, metric, or tag and a constant.
+     * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+     * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+     * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+     * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
+     * 
+ * + * optional string filter = 4; + */ + public com.google.protobuf.ByteString + getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - runUuid_ = b; + filter_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int PARAM_NAME_FIELD_NUMBER = 2; - private volatile java.lang.Object paramName_; + public static final int RUN_VIEW_TYPE_FIELD_NUMBER = 3; + private int runViewType_; /** *
-     * Name of the param.
+     * Whether to display only active, only deleted, or all runs.
+     * Defaults to only active runs.
      * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; */ - public boolean hasParamName() { + public boolean hasRunViewType() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * Name of the param.
+     * Whether to display only active, only deleted, or all runs.
+     * Defaults to only active runs.
+     * 
+ * + * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; + */ + public org.mlflow.api.proto.Service.ViewType getRunViewType() { + @SuppressWarnings("deprecation") + org.mlflow.api.proto.Service.ViewType result = org.mlflow.api.proto.Service.ViewType.valueOf(runViewType_); + return result == null ? org.mlflow.api.proto.Service.ViewType.ACTIVE_ONLY : result; + } + + public static final int MAX_RESULTS_FIELD_NUMBER = 5; + private int maxResults_; + /** + *
+     * Maximum number of runs desired. Max threshold is 50000
+     * 
+ * + * optional int32 max_results = 5 [default = 1000]; + */ + public boolean hasMaxResults() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + *
+     * Maximum number of runs desired. Max threshold is 50000
+     * 
+ * + * optional int32 max_results = 5 [default = 1000]; + */ + public int getMaxResults() { + return maxResults_; + } + + public static final int ORDER_BY_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList orderBy_; + /** + *
+     * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+     * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+     * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+     * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+     * (and this is the default ordering criterion if order_by is not provided).
+     * 
+ * + * repeated string order_by = 6; + */ + public com.google.protobuf.ProtocolStringList + getOrderByList() { + return orderBy_; + } + /** + *
+     * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+     * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+     * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+     * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+     * (and this is the default ordering criterion if order_by is not provided).
+     * 
+ * + * repeated string order_by = 6; + */ + public int getOrderByCount() { + return orderBy_.size(); + } + /** + *
+     * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+     * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+     * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+     * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+     * (and this is the default ordering criterion if order_by is not provided).
+     * 
+ * + * repeated string order_by = 6; + */ + public java.lang.String getOrderBy(int index) { + return orderBy_.get(index); + } + /** + *
+     * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+     * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+     * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+     * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+     * (and this is the default ordering criterion if order_by is not provided).
      * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * repeated string order_by = 6; + */ + public com.google.protobuf.ByteString + getOrderByBytes(int index) { + return orderBy_.getByteString(index); + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 7; + private volatile java.lang.Object pageToken_; + /** + * optional string page_token = 7; + */ + public boolean hasPageToken() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string page_token = 7; */ - public java.lang.String getParamName() { - java.lang.Object ref = paramName_; + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -32634,26 +33378,22 @@ public java.lang.String getParamName() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - paramName_ = s; + pageToken_ = s; } return s; } } /** - *
-     * Name of the param.
-     * 
- * - * optional string param_name = 2 [(.validate_required) = true]; + * optional string page_token = 7; */ public com.google.protobuf.ByteString - getParamNameBytes() { - java.lang.Object ref = paramName_; + getPageTokenBytes() { + java.lang.Object ref = pageToken_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - paramName_ = b; + pageToken_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -32674,11 +33414,23 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); + for (int i = 0; i < experimentIds_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, experimentIds_.getRaw(i)); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, paramName_); + output.writeEnum(3, runViewType_); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, filter_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt32(5, maxResults_); + } + for (int i = 0; i < orderBy_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, orderBy_.getRaw(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 7, pageToken_); } unknownFields.writeTo(output); } @@ -32689,11 +33441,35 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); + { + int dataSize = 0; + for (int i = 0; i < experimentIds_.size(); i++) { + dataSize += computeStringSizeNoTag(experimentIds_.getRaw(i)); + } + size += dataSize; + size += 1 * getExperimentIdsList().size(); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, paramName_); + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(3, runViewType_); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, filter_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(5, maxResults_); + } + { + int dataSize = 0; + for (int i = 0; i < orderBy_.size(); i++) { + dataSize += computeStringSizeNoTag(orderBy_.getRaw(i)); + } + size += dataSize; + size += 1 * getOrderByList().size(); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, pageToken_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -32705,21 +33481,34 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.mlflow.api.proto.Service.GetParam)) { + if (!(obj instanceof org.mlflow.api.proto.Service.SearchRuns)) { return super.equals(obj); } - org.mlflow.api.proto.Service.GetParam other = (org.mlflow.api.proto.Service.GetParam) obj; + org.mlflow.api.proto.Service.SearchRuns other = (org.mlflow.api.proto.Service.SearchRuns) obj; boolean result = true; - result = result && (hasRunUuid() == other.hasRunUuid()); - if (hasRunUuid()) { - result = result && getRunUuid() - .equals(other.getRunUuid()); + result = result && getExperimentIdsList() + .equals(other.getExperimentIdsList()); + result = result && (hasFilter() == other.hasFilter()); + if (hasFilter()) { + result = result && getFilter() + .equals(other.getFilter()); + } + result = result && (hasRunViewType() == other.hasRunViewType()); + if (hasRunViewType()) { + result = result && runViewType_ == other.runViewType_; + } + result = result && (hasMaxResults() == other.hasMaxResults()); + if (hasMaxResults()) { + result = result && (getMaxResults() + == other.getMaxResults()); } - result = result && (hasParamName() == other.hasParamName()); - if (hasParamName()) { - result = result && getParamName() - .equals(other.getParamName()); + result = result && getOrderByList() + .equals(other.getOrderByList()); + result = result && (hasPageToken() == other.hasPageToken()); + if (hasPageToken()) { + result = result && getPageToken() + .equals(other.getPageToken()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -32732,82 +33521,98 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (hasRunUuid()) { - hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; - hash = (53 * hash) + getRunUuid().hashCode(); + if (getExperimentIdsCount() > 0) { + hash = (37 * hash) + EXPERIMENT_IDS_FIELD_NUMBER; + hash = (53 * hash) + getExperimentIdsList().hashCode(); + } + if (hasFilter()) { + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + } + if (hasRunViewType()) { + hash = (37 * hash) + RUN_VIEW_TYPE_FIELD_NUMBER; + hash = (53 * hash) + runViewType_; + } + if (hasMaxResults()) { + hash = (37 * hash) + MAX_RESULTS_FIELD_NUMBER; + hash = (53 * hash) + getMaxResults(); } - if (hasParamName()) { - hash = (37 * hash) + PARAM_NAME_FIELD_NUMBER; - hash = (53 * hash) + getParamName().hashCode(); + if (getOrderByCount() > 0) { + hash = (37 * hash) + ORDER_BY_FIELD_NUMBER; + hash = (53 * hash) + getOrderByList().hashCode(); + } + if (hasPageToken()) { + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.mlflow.api.proto.Service.GetParam parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetParam parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetParam parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam parseFrom(byte[] data) + public static org.mlflow.api.proto.Service.SearchRuns parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetParam parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam parseFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.SearchRuns parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetParam parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam parseDelimitedFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.SearchRuns parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetParam parseDelimitedFrom( + public static org.mlflow.api.proto.Service.SearchRuns parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetParam parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetParam parseFrom( + public static org.mlflow.api.proto.Service.SearchRuns parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -32820,7 +33625,7 @@ public static org.mlflow.api.proto.Service.GetParam parseFrom( public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetParam prototype) { + public static Builder newBuilder(org.mlflow.api.proto.Service.SearchRuns prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -32836,26 +33641,26 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code mlflow.GetParam} + * Protobuf type {@code mlflow.SearchRuns} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetParam) - org.mlflow.api.proto.Service.GetParamOrBuilder { + // @@protoc_insertion_point(builder_implements:mlflow.SearchRuns) + org.mlflow.api.proto.Service.SearchRunsOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetParam.class, org.mlflow.api.proto.Service.GetParam.Builder.class); + org.mlflow.api.proto.Service.SearchRuns.class, org.mlflow.api.proto.Service.SearchRuns.Builder.class); } - // Construct using org.mlflow.api.proto.Service.GetParam.newBuilder() + // Construct using org.mlflow.api.proto.Service.SearchRuns.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -32873,27 +33678,35 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - runUuid_ = ""; + experimentIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); - paramName_ = ""; + filter_ = ""; bitField0_ = (bitField0_ & ~0x00000002); + runViewType_ = 1; + bitField0_ = (bitField0_ & ~0x00000004); + maxResults_ = 1000; + bitField0_ = (bitField0_ & ~0x00000008); + orderBy_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + pageToken_ = ""; + bitField0_ = (bitField0_ & ~0x00000020); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetParam_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_descriptor; } @java.lang.Override - public org.mlflow.api.proto.Service.GetParam getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetParam.getDefaultInstance(); + public org.mlflow.api.proto.Service.SearchRuns getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.SearchRuns.getDefaultInstance(); } @java.lang.Override - public org.mlflow.api.proto.Service.GetParam build() { - org.mlflow.api.proto.Service.GetParam result = buildPartial(); + public org.mlflow.api.proto.Service.SearchRuns build() { + org.mlflow.api.proto.Service.SearchRuns result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -32901,18 +33714,36 @@ public org.mlflow.api.proto.Service.GetParam build() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetParam buildPartial() { - org.mlflow.api.proto.Service.GetParam result = new org.mlflow.api.proto.Service.GetParam(this); + public org.mlflow.api.proto.Service.SearchRuns buildPartial() { + org.mlflow.api.proto.Service.SearchRuns result = new org.mlflow.api.proto.Service.SearchRuns(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + experimentIds_ = experimentIds_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); } - result.runUuid_ = runUuid_; + result.experimentIds_ = experimentIds_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.filter_ = filter_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } - result.paramName_ = paramName_; + result.runViewType_ = runViewType_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.maxResults_ = maxResults_; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + orderBy_ = orderBy_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.orderBy_ = orderBy_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000008; + } + result.pageToken_ = pageToken_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -32952,24 +33783,50 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetParam) { - return mergeFrom((org.mlflow.api.proto.Service.GetParam)other); + if (other instanceof org.mlflow.api.proto.Service.SearchRuns) { + return mergeFrom((org.mlflow.api.proto.Service.SearchRuns)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.GetParam other) { - if (other == org.mlflow.api.proto.Service.GetParam.getDefaultInstance()) return this; - if (other.hasRunUuid()) { - bitField0_ |= 0x00000001; - runUuid_ = other.runUuid_; + public Builder mergeFrom(org.mlflow.api.proto.Service.SearchRuns other) { + if (other == org.mlflow.api.proto.Service.SearchRuns.getDefaultInstance()) return this; + if (!other.experimentIds_.isEmpty()) { + if (experimentIds_.isEmpty()) { + experimentIds_ = other.experimentIds_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureExperimentIdsIsMutable(); + experimentIds_.addAll(other.experimentIds_); + } onChanged(); } - if (other.hasParamName()) { + if (other.hasFilter()) { bitField0_ |= 0x00000002; - paramName_ = other.paramName_; + filter_ = other.filter_; + onChanged(); + } + if (other.hasRunViewType()) { + setRunViewType(other.getRunViewType()); + } + if (other.hasMaxResults()) { + setMaxResults(other.getMaxResults()); + } + if (!other.orderBy_.isEmpty()) { + if (orderBy_.isEmpty()) { + orderBy_ = other.orderBy_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureOrderByIsMutable(); + orderBy_.addAll(other.orderBy_); + } + onChanged(); + } + if (other.hasPageToken()) { + bitField0_ |= 0x00000020; + pageToken_ = other.pageToken_; onChanged(); } this.mergeUnknownFields(other.unknownFields); @@ -32987,11 +33844,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.GetParam parsedMessage = null; + org.mlflow.api.proto.Service.SearchRuns parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetParam) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.SearchRuns) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -33002,132 +33859,173 @@ public Builder mergeFrom( } private int bitField0_; - private java.lang.Object runUuid_ = ""; + private com.google.protobuf.LazyStringList experimentIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureExperimentIdsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + experimentIds_ = new com.google.protobuf.LazyStringArrayList(experimentIds_); + bitField0_ |= 0x00000001; + } + } /** *
-       * ID of the run from which to retrieve the param value.
+       * List of experiment IDs to search over.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public com.google.protobuf.ProtocolStringList + getExperimentIdsList() { + return experimentIds_.getUnmodifiableView(); } /** *
-       * ID of the run from which to retrieve the param value.
+       * List of experiment IDs to search over.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - runUuid_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } + public int getExperimentIdsCount() { + return experimentIds_.size(); + } + /** + *
+       * List of experiment IDs to search over.
+       * 
+ * + * repeated string experiment_ids = 1; + */ + public java.lang.String getExperimentIds(int index) { + return experimentIds_.get(index); } /** *
-       * ID of the run from which to retrieve the param value.
+       * List of experiment IDs to search over.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - runUuid_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + getExperimentIdsBytes(int index) { + return experimentIds_.getByteString(index); } /** *
-       * ID of the run from which to retrieve the param value.
+       * List of experiment IDs to search over.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - public Builder setRunUuid( + public Builder setExperimentIds( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureExperimentIdsIsMutable(); + experimentIds_.set(index, value); + onChanged(); + return this; + } + /** + *
+       * List of experiment IDs to search over.
+       * 
+ * + * repeated string experiment_ids = 1; + */ + public Builder addExperimentIds( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; - runUuid_ = value; + ensureExperimentIdsIsMutable(); + experimentIds_.add(value); onChanged(); return this; } /** *
-       * ID of the run from which to retrieve the param value.
+       * List of experiment IDs to search over.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - public Builder clearRunUuid() { + public Builder addAllExperimentIds( + java.lang.Iterable values) { + ensureExperimentIdsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, experimentIds_); + onChanged(); + return this; + } + /** + *
+       * List of experiment IDs to search over.
+       * 
+ * + * repeated string experiment_ids = 1; + */ + public Builder clearExperimentIds() { + experimentIds_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); - runUuid_ = getDefaultInstance().getRunUuid(); onChanged(); return this; } /** *
-       * ID of the run from which to retrieve the param value.
+       * List of experiment IDs to search over.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated string experiment_ids = 1; */ - public Builder setRunUuidBytes( + public Builder addExperimentIdsBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; - runUuid_ = value; + ensureExperimentIdsIsMutable(); + experimentIds_.add(value); onChanged(); return this; } - private java.lang.Object paramName_ = ""; + private java.lang.Object filter_ = ""; /** *
-       * Name of the param.
+       * A filter expression over params, metrics, and tags, that allows returning a subset of
+       * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+       * between a param, metric, or tag and a constant.
+       * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+       * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+       * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+       * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
        * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * optional string filter = 4; */ - public boolean hasParamName() { + public boolean hasFilter() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * Name of the param.
+       * A filter expression over params, metrics, and tags, that allows returning a subset of
+       * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+       * between a param, metric, or tag and a constant.
+       * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+       * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+       * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+       * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
        * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * optional string filter = 4; */ - public java.lang.String getParamName() { - java.lang.Object ref = paramName_; + public java.lang.String getFilter() { + java.lang.Object ref = filter_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - paramName_ = s; + filter_ = s; } return s; } else { @@ -33136,19 +34034,25 @@ public java.lang.String getParamName() { } /** *
-       * Name of the param.
+       * A filter expression over params, metrics, and tags, that allows returning a subset of
+       * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+       * between a param, metric, or tag and a constant.
+       * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+       * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+       * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+       * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
        * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * optional string filter = 4; */ public com.google.protobuf.ByteString - getParamNameBytes() { - java.lang.Object ref = paramName_; + getFilterBytes() { + java.lang.Object ref = filter_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - paramName_ = b; + filter_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -33156,1092 +34060,414 @@ public java.lang.String getParamName() { } /** *
-       * Name of the param.
+       * A filter expression over params, metrics, and tags, that allows returning a subset of
+       * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+       * between a param, metric, or tag and a constant.
+       * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+       * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+       * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+       * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
        * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * optional string filter = 4; */ - public Builder setParamName( + public Builder setFilter( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; - paramName_ = value; + filter_ = value; onChanged(); return this; } /** *
-       * Name of the param.
+       * A filter expression over params, metrics, and tags, that allows returning a subset of
+       * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+       * between a param, metric, or tag and a constant.
+       * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+       * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+       * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+       * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
        * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * optional string filter = 4; */ - public Builder clearParamName() { + public Builder clearFilter() { bitField0_ = (bitField0_ & ~0x00000002); - paramName_ = getDefaultInstance().getParamName(); + filter_ = getDefaultInstance().getFilter(); onChanged(); return this; } /** *
-       * Name of the param.
+       * A filter expression over params, metrics, and tags, that allows returning a subset of
+       * runs. The syntax is a subset of SQL that supports ANDing together binary operations
+       * between a param, metric, or tag and a constant.
+       * Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'``
+       * You can select columns with special characters (hyphen, space, period, etc.) by using double quotes:
+       * ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'``
+       * Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``.
        * 
* - * optional string param_name = 2 [(.validate_required) = true]; + * optional string filter = 4; */ - public Builder setParamNameBytes( + public Builder setFilterBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; - paramName_ = value; + filter_ = value; onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:mlflow.GetParam) - } - - // @@protoc_insertion_point(class_scope:mlflow.GetParam) - private static final org.mlflow.api.proto.Service.GetParam DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetParam(); - } - - public static org.mlflow.api.proto.Service.GetParam getDefaultInstance() { - return DEFAULT_INSTANCE; - } - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public GetParam parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new GetParam(input, extensionRegistry); + private int runViewType_ = 1; + /** + *
+       * Whether to display only active, only deleted, or all runs.
+       * Defaults to only active runs.
+       * 
+ * + * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; + */ + public boolean hasRunViewType() { + return ((bitField0_ & 0x00000004) == 0x00000004); } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.GetParam getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public interface SearchExpressionOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.SearchExpression) - com.google.protobuf.MessageOrBuilder { - - /** - *
-     * A metric search expression.
-     * 
- * - * optional .mlflow.MetricSearchExpression metric = 1; - */ - boolean hasMetric(); - /** - *
-     * A metric search expression.
-     * 
- * - * optional .mlflow.MetricSearchExpression metric = 1; - */ - org.mlflow.api.proto.Service.MetricSearchExpression getMetric(); - /** - *
-     * A metric search expression.
-     * 
- * - * optional .mlflow.MetricSearchExpression metric = 1; - */ - org.mlflow.api.proto.Service.MetricSearchExpressionOrBuilder getMetricOrBuilder(); - - /** - *
-     * A parameter search expression.
-     * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; - */ - boolean hasParameter(); - /** - *
-     * A parameter search expression.
-     * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; - */ - org.mlflow.api.proto.Service.ParameterSearchExpression getParameter(); - /** - *
-     * A parameter search expression.
-     * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; - */ - org.mlflow.api.proto.Service.ParameterSearchExpressionOrBuilder getParameterOrBuilder(); - - public org.mlflow.api.proto.Service.SearchExpression.ExpressionCase getExpressionCase(); - } - /** - * Protobuf type {@code mlflow.SearchExpression} - */ - public static final class SearchExpression extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.SearchExpression) - SearchExpressionOrBuilder { - private static final long serialVersionUID = 0L; - // Use SearchExpression.newBuilder() to construct. - private SearchExpression(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private SearchExpression() { - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SearchExpression( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); + /** + *
+       * Whether to display only active, only deleted, or all runs.
+       * Defaults to only active runs.
+       * 
+ * + * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; + */ + public org.mlflow.api.proto.Service.ViewType getRunViewType() { + @SuppressWarnings("deprecation") + org.mlflow.api.proto.Service.ViewType result = org.mlflow.api.proto.Service.ViewType.valueOf(runViewType_); + return result == null ? org.mlflow.api.proto.Service.ViewType.ACTIVE_ONLY : result; } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - org.mlflow.api.proto.Service.MetricSearchExpression.Builder subBuilder = null; - if (expressionCase_ == 1) { - subBuilder = ((org.mlflow.api.proto.Service.MetricSearchExpression) expression_).toBuilder(); - } - expression_ = - input.readMessage(org.mlflow.api.proto.Service.MetricSearchExpression.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom((org.mlflow.api.proto.Service.MetricSearchExpression) expression_); - expression_ = subBuilder.buildPartial(); - } - expressionCase_ = 1; - break; - } - case 18: { - org.mlflow.api.proto.Service.ParameterSearchExpression.Builder subBuilder = null; - if (expressionCase_ == 2) { - subBuilder = ((org.mlflow.api.proto.Service.ParameterSearchExpression) expression_).toBuilder(); - } - expression_ = - input.readMessage(org.mlflow.api.proto.Service.ParameterSearchExpression.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom((org.mlflow.api.proto.Service.ParameterSearchExpression) expression_); - expression_ = subBuilder.buildPartial(); - } - expressionCase_ = 2; - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } + /** + *
+       * Whether to display only active, only deleted, or all runs.
+       * Defaults to only active runs.
+       * 
+ * + * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; + */ + public Builder setRunViewType(org.mlflow.api.proto.Service.ViewType value) { + if (value == null) { + throw new NullPointerException(); } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchExpression_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchExpression_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.SearchExpression.class, org.mlflow.api.proto.Service.SearchExpression.Builder.class); - } - - private int bitField0_; - private int expressionCase_ = 0; - private java.lang.Object expression_; - public enum ExpressionCase - implements com.google.protobuf.Internal.EnumLite { - METRIC(1), - PARAMETER(2), - EXPRESSION_NOT_SET(0); - private final int value; - private ExpressionCase(int value) { - this.value = value; + bitField0_ |= 0x00000004; + runViewType_ = value.getNumber(); + onChanged(); + return this; } /** - * @deprecated Use {@link #forNumber(int)} instead. + *
+       * Whether to display only active, only deleted, or all runs.
+       * Defaults to only active runs.
+       * 
+ * + * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; */ - @java.lang.Deprecated - public static ExpressionCase valueOf(int value) { - return forNumber(value); + public Builder clearRunViewType() { + bitField0_ = (bitField0_ & ~0x00000004); + runViewType_ = 1; + onChanged(); + return this; } - public static ExpressionCase forNumber(int value) { - switch (value) { - case 1: return METRIC; - case 2: return PARAMETER; - case 0: return EXPRESSION_NOT_SET; - default: return null; - } - } - public int getNumber() { - return this.value; - } - }; - - public ExpressionCase - getExpressionCase() { - return ExpressionCase.forNumber( - expressionCase_); - } - - public static final int METRIC_FIELD_NUMBER = 1; - /** - *
-     * A metric search expression.
-     * 
- * - * optional .mlflow.MetricSearchExpression metric = 1; - */ - public boolean hasMetric() { - return expressionCase_ == 1; - } - /** - *
-     * A metric search expression.
-     * 
- * - * optional .mlflow.MetricSearchExpression metric = 1; - */ - public org.mlflow.api.proto.Service.MetricSearchExpression getMetric() { - if (expressionCase_ == 1) { - return (org.mlflow.api.proto.Service.MetricSearchExpression) expression_; - } - return org.mlflow.api.proto.Service.MetricSearchExpression.getDefaultInstance(); - } - /** - *
-     * A metric search expression.
-     * 
- * - * optional .mlflow.MetricSearchExpression metric = 1; - */ - public org.mlflow.api.proto.Service.MetricSearchExpressionOrBuilder getMetricOrBuilder() { - if (expressionCase_ == 1) { - return (org.mlflow.api.proto.Service.MetricSearchExpression) expression_; - } - return org.mlflow.api.proto.Service.MetricSearchExpression.getDefaultInstance(); - } - - public static final int PARAMETER_FIELD_NUMBER = 2; - /** - *
-     * A parameter search expression.
-     * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; - */ - public boolean hasParameter() { - return expressionCase_ == 2; - } - /** - *
-     * A parameter search expression.
-     * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; - */ - public org.mlflow.api.proto.Service.ParameterSearchExpression getParameter() { - if (expressionCase_ == 2) { - return (org.mlflow.api.proto.Service.ParameterSearchExpression) expression_; - } - return org.mlflow.api.proto.Service.ParameterSearchExpression.getDefaultInstance(); - } - /** - *
-     * A parameter search expression.
-     * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; - */ - public org.mlflow.api.proto.Service.ParameterSearchExpressionOrBuilder getParameterOrBuilder() { - if (expressionCase_ == 2) { - return (org.mlflow.api.proto.Service.ParameterSearchExpression) expression_; - } - return org.mlflow.api.proto.Service.ParameterSearchExpression.getDefaultInstance(); - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (expressionCase_ == 1) { - output.writeMessage(1, (org.mlflow.api.proto.Service.MetricSearchExpression) expression_); - } - if (expressionCase_ == 2) { - output.writeMessage(2, (org.mlflow.api.proto.Service.ParameterSearchExpression) expression_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (expressionCase_ == 1) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, (org.mlflow.api.proto.Service.MetricSearchExpression) expression_); - } - if (expressionCase_ == 2) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, (org.mlflow.api.proto.Service.ParameterSearchExpression) expression_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.mlflow.api.proto.Service.SearchExpression)) { - return super.equals(obj); - } - org.mlflow.api.proto.Service.SearchExpression other = (org.mlflow.api.proto.Service.SearchExpression) obj; - - boolean result = true; - result = result && getExpressionCase().equals( - other.getExpressionCase()); - if (!result) return false; - switch (expressionCase_) { - case 1: - result = result && getMetric() - .equals(other.getMetric()); - break; - case 2: - result = result && getParameter() - .equals(other.getParameter()); - break; - case 0: - default: - } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - switch (expressionCase_) { - case 1: - hash = (37 * hash) + METRIC_FIELD_NUMBER; - hash = (53 * hash) + getMetric().hashCode(); - break; - case 2: - hash = (37 * hash) + PARAMETER_FIELD_NUMBER; - hash = (53 * hash) + getParameter().hashCode(); - break; - case 0: - default: - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.SearchExpression parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.SearchExpression parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchExpression parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.SearchExpression parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchExpression parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.SearchExpression parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchExpression parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.SearchExpression parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchExpression parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.SearchExpression parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchExpression parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.SearchExpression parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.SearchExpression prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code mlflow.SearchExpression} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.SearchExpression) - org.mlflow.api.proto.Service.SearchExpressionOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchExpression_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchExpression_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.SearchExpression.class, org.mlflow.api.proto.Service.SearchExpression.Builder.class); - } - - // Construct using org.mlflow.api.proto.Service.SearchExpression.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - expressionCase_ = 0; - expression_ = null; - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchExpression_descriptor; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchExpression getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.SearchExpression.getDefaultInstance(); - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchExpression build() { - org.mlflow.api.proto.Service.SearchExpression result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchExpression buildPartial() { - org.mlflow.api.proto.Service.SearchExpression result = new org.mlflow.api.proto.Service.SearchExpression(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (expressionCase_ == 1) { - if (metricBuilder_ == null) { - result.expression_ = expression_; - } else { - result.expression_ = metricBuilder_.build(); - } - } - if (expressionCase_ == 2) { - if (parameterBuilder_ == null) { - result.expression_ = expression_; - } else { - result.expression_ = parameterBuilder_.build(); - } - } - result.bitField0_ = to_bitField0_; - result.expressionCase_ = expressionCase_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.SearchExpression) { - return mergeFrom((org.mlflow.api.proto.Service.SearchExpression)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.mlflow.api.proto.Service.SearchExpression other) { - if (other == org.mlflow.api.proto.Service.SearchExpression.getDefaultInstance()) return this; - switch (other.getExpressionCase()) { - case METRIC: { - mergeMetric(other.getMetric()); - break; - } - case PARAMETER: { - mergeParameter(other.getParameter()); - break; - } - case EXPRESSION_NOT_SET: { - break; - } - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.SearchExpression parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.SearchExpression) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int expressionCase_ = 0; - private java.lang.Object expression_; - public ExpressionCase - getExpressionCase() { - return ExpressionCase.forNumber( - expressionCase_); - } - - public Builder clearExpression() { - expressionCase_ = 0; - expression_ = null; - onChanged(); - return this; - } - - private int bitField0_; - - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.MetricSearchExpression, org.mlflow.api.proto.Service.MetricSearchExpression.Builder, org.mlflow.api.proto.Service.MetricSearchExpressionOrBuilder> metricBuilder_; + private int maxResults_ = 1000; /** *
-       * A metric search expression.
+       * Maximum number of runs desired. Max threshold is 50000
        * 
* - * optional .mlflow.MetricSearchExpression metric = 1; + * optional int32 max_results = 5 [default = 1000]; */ - public boolean hasMetric() { - return expressionCase_ == 1; + public boolean hasMaxResults() { + return ((bitField0_ & 0x00000008) == 0x00000008); } /** *
-       * A metric search expression.
+       * Maximum number of runs desired. Max threshold is 50000
        * 
* - * optional .mlflow.MetricSearchExpression metric = 1; + * optional int32 max_results = 5 [default = 1000]; */ - public org.mlflow.api.proto.Service.MetricSearchExpression getMetric() { - if (metricBuilder_ == null) { - if (expressionCase_ == 1) { - return (org.mlflow.api.proto.Service.MetricSearchExpression) expression_; - } - return org.mlflow.api.proto.Service.MetricSearchExpression.getDefaultInstance(); - } else { - if (expressionCase_ == 1) { - return metricBuilder_.getMessage(); - } - return org.mlflow.api.proto.Service.MetricSearchExpression.getDefaultInstance(); - } + public int getMaxResults() { + return maxResults_; } /** *
-       * A metric search expression.
+       * Maximum number of runs desired. Max threshold is 50000
        * 
* - * optional .mlflow.MetricSearchExpression metric = 1; + * optional int32 max_results = 5 [default = 1000]; */ - public Builder setMetric(org.mlflow.api.proto.Service.MetricSearchExpression value) { - if (metricBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - expression_ = value; - onChanged(); - } else { - metricBuilder_.setMessage(value); - } - expressionCase_ = 1; + public Builder setMaxResults(int value) { + bitField0_ |= 0x00000008; + maxResults_ = value; + onChanged(); return this; } /** *
-       * A metric search expression.
+       * Maximum number of runs desired. Max threshold is 50000
        * 
* - * optional .mlflow.MetricSearchExpression metric = 1; + * optional int32 max_results = 5 [default = 1000]; */ - public Builder setMetric( - org.mlflow.api.proto.Service.MetricSearchExpression.Builder builderForValue) { - if (metricBuilder_ == null) { - expression_ = builderForValue.build(); - onChanged(); - } else { - metricBuilder_.setMessage(builderForValue.build()); - } - expressionCase_ = 1; + public Builder clearMaxResults() { + bitField0_ = (bitField0_ & ~0x00000008); + maxResults_ = 1000; + onChanged(); return this; } + + private com.google.protobuf.LazyStringList orderBy_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureOrderByIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + orderBy_ = new com.google.protobuf.LazyStringArrayList(orderBy_); + bitField0_ |= 0x00000010; + } + } /** *
-       * A metric search expression.
+       * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+       * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+       * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+       * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+       * (and this is the default ordering criterion if order_by is not provided).
        * 
* - * optional .mlflow.MetricSearchExpression metric = 1; + * repeated string order_by = 6; */ - public Builder mergeMetric(org.mlflow.api.proto.Service.MetricSearchExpression value) { - if (metricBuilder_ == null) { - if (expressionCase_ == 1 && - expression_ != org.mlflow.api.proto.Service.MetricSearchExpression.getDefaultInstance()) { - expression_ = org.mlflow.api.proto.Service.MetricSearchExpression.newBuilder((org.mlflow.api.proto.Service.MetricSearchExpression) expression_) - .mergeFrom(value).buildPartial(); - } else { - expression_ = value; - } - onChanged(); - } else { - if (expressionCase_ == 1) { - metricBuilder_.mergeFrom(value); - } - metricBuilder_.setMessage(value); - } - expressionCase_ = 1; - return this; + public com.google.protobuf.ProtocolStringList + getOrderByList() { + return orderBy_.getUnmodifiableView(); } /** *
-       * A metric search expression.
+       * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+       * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+       * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+       * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+       * (and this is the default ordering criterion if order_by is not provided).
        * 
* - * optional .mlflow.MetricSearchExpression metric = 1; + * repeated string order_by = 6; */ - public Builder clearMetric() { - if (metricBuilder_ == null) { - if (expressionCase_ == 1) { - expressionCase_ = 0; - expression_ = null; - onChanged(); - } - } else { - if (expressionCase_ == 1) { - expressionCase_ = 0; - expression_ = null; - } - metricBuilder_.clear(); - } - return this; + public int getOrderByCount() { + return orderBy_.size(); } /** *
-       * A metric search expression.
+       * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+       * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+       * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+       * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+       * (and this is the default ordering criterion if order_by is not provided).
        * 
* - * optional .mlflow.MetricSearchExpression metric = 1; + * repeated string order_by = 6; */ - public org.mlflow.api.proto.Service.MetricSearchExpression.Builder getMetricBuilder() { - return getMetricFieldBuilder().getBuilder(); + public java.lang.String getOrderBy(int index) { + return orderBy_.get(index); } /** *
-       * A metric search expression.
+       * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+       * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+       * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+       * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+       * (and this is the default ordering criterion if order_by is not provided).
        * 
* - * optional .mlflow.MetricSearchExpression metric = 1; + * repeated string order_by = 6; */ - public org.mlflow.api.proto.Service.MetricSearchExpressionOrBuilder getMetricOrBuilder() { - if ((expressionCase_ == 1) && (metricBuilder_ != null)) { - return metricBuilder_.getMessageOrBuilder(); - } else { - if (expressionCase_ == 1) { - return (org.mlflow.api.proto.Service.MetricSearchExpression) expression_; - } - return org.mlflow.api.proto.Service.MetricSearchExpression.getDefaultInstance(); - } + public com.google.protobuf.ByteString + getOrderByBytes(int index) { + return orderBy_.getByteString(index); } /** *
-       * A metric search expression.
+       * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+       * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+       * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+       * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+       * (and this is the default ordering criterion if order_by is not provided).
        * 
* - * optional .mlflow.MetricSearchExpression metric = 1; + * repeated string order_by = 6; */ - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.MetricSearchExpression, org.mlflow.api.proto.Service.MetricSearchExpression.Builder, org.mlflow.api.proto.Service.MetricSearchExpressionOrBuilder> - getMetricFieldBuilder() { - if (metricBuilder_ == null) { - if (!(expressionCase_ == 1)) { - expression_ = org.mlflow.api.proto.Service.MetricSearchExpression.getDefaultInstance(); - } - metricBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.MetricSearchExpression, org.mlflow.api.proto.Service.MetricSearchExpression.Builder, org.mlflow.api.proto.Service.MetricSearchExpressionOrBuilder>( - (org.mlflow.api.proto.Service.MetricSearchExpression) expression_, - getParentForChildren(), - isClean()); - expression_ = null; - } - expressionCase_ = 1; - onChanged();; - return metricBuilder_; + public Builder setOrderBy( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureOrderByIsMutable(); + orderBy_.set(index, value); + onChanged(); + return this; } - - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.ParameterSearchExpression, org.mlflow.api.proto.Service.ParameterSearchExpression.Builder, org.mlflow.api.proto.Service.ParameterSearchExpressionOrBuilder> parameterBuilder_; /** *
-       * A parameter search expression.
+       * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+       * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+       * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+       * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+       * (and this is the default ordering criterion if order_by is not provided).
        * 
* - * optional .mlflow.ParameterSearchExpression parameter = 2; + * repeated string order_by = 6; */ - public boolean hasParameter() { - return expressionCase_ == 2; + public Builder addOrderBy( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureOrderByIsMutable(); + orderBy_.add(value); + onChanged(); + return this; } /** *
-       * A parameter search expression.
+       * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+       * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+       * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+       * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+       * (and this is the default ordering criterion if order_by is not provided).
        * 
* - * optional .mlflow.ParameterSearchExpression parameter = 2; + * repeated string order_by = 6; */ - public org.mlflow.api.proto.Service.ParameterSearchExpression getParameter() { - if (parameterBuilder_ == null) { - if (expressionCase_ == 2) { - return (org.mlflow.api.proto.Service.ParameterSearchExpression) expression_; - } - return org.mlflow.api.proto.Service.ParameterSearchExpression.getDefaultInstance(); - } else { - if (expressionCase_ == 2) { - return parameterBuilder_.getMessage(); - } - return org.mlflow.api.proto.Service.ParameterSearchExpression.getDefaultInstance(); - } + public Builder addAllOrderBy( + java.lang.Iterable values) { + ensureOrderByIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, orderBy_); + onChanged(); + return this; } /** *
-       * A parameter search expression.
+       * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+       * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+       * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+       * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+       * (and this is the default ordering criterion if order_by is not provided).
        * 
* - * optional .mlflow.ParameterSearchExpression parameter = 2; + * repeated string order_by = 6; */ - public Builder setParameter(org.mlflow.api.proto.Service.ParameterSearchExpression value) { - if (parameterBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - expression_ = value; - onChanged(); - } else { - parameterBuilder_.setMessage(value); - } - expressionCase_ = 2; + public Builder clearOrderBy() { + orderBy_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); return this; } /** *
-       * A parameter search expression.
+       * List of columns to be ordered by, including attributes, params, metrics, and tags with an
+       * optional "DESC" or "ASC" annotation, where "ASC" is the default.
+       * Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"]
+       * Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time
+       * (and this is the default ordering criterion if order_by is not provided).
        * 
* - * optional .mlflow.ParameterSearchExpression parameter = 2; + * repeated string order_by = 6; */ - public Builder setParameter( - org.mlflow.api.proto.Service.ParameterSearchExpression.Builder builderForValue) { - if (parameterBuilder_ == null) { - expression_ = builderForValue.build(); - onChanged(); - } else { - parameterBuilder_.setMessage(builderForValue.build()); - } - expressionCase_ = 2; + public Builder addOrderByBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureOrderByIsMutable(); + orderBy_.add(value); + onChanged(); return this; } + + private java.lang.Object pageToken_ = ""; /** - *
-       * A parameter search expression.
-       * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; + * optional string page_token = 7; */ - public Builder mergeParameter(org.mlflow.api.proto.Service.ParameterSearchExpression value) { - if (parameterBuilder_ == null) { - if (expressionCase_ == 2 && - expression_ != org.mlflow.api.proto.Service.ParameterSearchExpression.getDefaultInstance()) { - expression_ = org.mlflow.api.proto.Service.ParameterSearchExpression.newBuilder((org.mlflow.api.proto.Service.ParameterSearchExpression) expression_) - .mergeFrom(value).buildPartial(); - } else { - expression_ = value; + public boolean hasPageToken() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional string page_token = 7; + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + pageToken_ = s; } - onChanged(); + return s; } else { - if (expressionCase_ == 2) { - parameterBuilder_.mergeFrom(value); - } - parameterBuilder_.setMessage(value); + return (java.lang.String) ref; } - expressionCase_ = 2; - return this; } /** - *
-       * A parameter search expression.
-       * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; + * optional string page_token = 7; */ - public Builder clearParameter() { - if (parameterBuilder_ == null) { - if (expressionCase_ == 2) { - expressionCase_ = 0; - expression_ = null; - onChanged(); - } + public com.google.protobuf.ByteString + getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + pageToken_ = b; + return b; } else { - if (expressionCase_ == 2) { - expressionCase_ = 0; - expression_ = null; - } - parameterBuilder_.clear(); + return (com.google.protobuf.ByteString) ref; } - return this; } /** - *
-       * A parameter search expression.
-       * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; + * optional string page_token = 7; */ - public org.mlflow.api.proto.Service.ParameterSearchExpression.Builder getParameterBuilder() { - return getParameterFieldBuilder().getBuilder(); + public Builder setPageToken( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + pageToken_ = value; + onChanged(); + return this; } /** - *
-       * A parameter search expression.
-       * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; + * optional string page_token = 7; */ - public org.mlflow.api.proto.Service.ParameterSearchExpressionOrBuilder getParameterOrBuilder() { - if ((expressionCase_ == 2) && (parameterBuilder_ != null)) { - return parameterBuilder_.getMessageOrBuilder(); - } else { - if (expressionCase_ == 2) { - return (org.mlflow.api.proto.Service.ParameterSearchExpression) expression_; - } - return org.mlflow.api.proto.Service.ParameterSearchExpression.getDefaultInstance(); - } + public Builder clearPageToken() { + bitField0_ = (bitField0_ & ~0x00000020); + pageToken_ = getDefaultInstance().getPageToken(); + onChanged(); + return this; } /** - *
-       * A parameter search expression.
-       * 
- * - * optional .mlflow.ParameterSearchExpression parameter = 2; + * optional string page_token = 7; */ - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.ParameterSearchExpression, org.mlflow.api.proto.Service.ParameterSearchExpression.Builder, org.mlflow.api.proto.Service.ParameterSearchExpressionOrBuilder> - getParameterFieldBuilder() { - if (parameterBuilder_ == null) { - if (!(expressionCase_ == 2)) { - expression_ = org.mlflow.api.proto.Service.ParameterSearchExpression.getDefaultInstance(); - } - parameterBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.ParameterSearchExpression, org.mlflow.api.proto.Service.ParameterSearchExpression.Builder, org.mlflow.api.proto.Service.ParameterSearchExpressionOrBuilder>( - (org.mlflow.api.proto.Service.ParameterSearchExpression) expression_, - getParentForChildren(), - isClean()); - expression_ = null; - } - expressionCase_ = 2; - onChanged();; - return parameterBuilder_; + public Builder setPageTokenBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + pageToken_ = value; + onChanged(); + return this; } @java.lang.Override public final Builder setUnknownFields( @@ -34256,145 +34482,147 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.SearchExpression) + // @@protoc_insertion_point(builder_scope:mlflow.SearchRuns) } - // @@protoc_insertion_point(class_scope:mlflow.SearchExpression) - private static final org.mlflow.api.proto.Service.SearchExpression DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.SearchRuns) + private static final org.mlflow.api.proto.Service.SearchRuns DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.SearchExpression(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.SearchRuns(); } - public static org.mlflow.api.proto.Service.SearchExpression getDefaultInstance() { + public static org.mlflow.api.proto.Service.SearchRuns getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Deprecated public static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override - public SearchExpression parsePartialFrom( + public SearchRuns parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SearchExpression(input, extensionRegistry); + return new SearchRuns(input, extensionRegistry); } }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override - public org.mlflow.api.proto.Service.SearchExpression getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.SearchRuns getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } - public interface MetricSearchExpressionOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.MetricSearchExpression) + public interface ListArtifactsOrBuilder extends + // @@protoc_insertion_point(interface_extends:mlflow.ListArtifacts) com.google.protobuf.MessageOrBuilder { /** *
-     * :ref:`mlflowMetric` key for search.
+     * ID of the run whose artifacts to list. Must be provided.
      * 
* - * optional string key = 1; + * optional string run_id = 3; */ - boolean hasKey(); + boolean hasRunId(); /** *
-     * :ref:`mlflowMetric` key for search.
+     * ID of the run whose artifacts to list. Must be provided.
      * 
* - * optional string key = 1; + * optional string run_id = 3; */ - java.lang.String getKey(); + java.lang.String getRunId(); /** *
-     * :ref:`mlflowMetric` key for search.
+     * ID of the run whose artifacts to list. Must be provided.
      * 
* - * optional string key = 1; + * optional string run_id = 3; */ com.google.protobuf.ByteString - getKeyBytes(); + getRunIdBytes(); /** *
-     * [Deprecated in 0.7.0, to be removed in future version] 
-     * Float clause for comparison. Use 'double' instead.
+     * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional .mlflow.FloatClause float = 2; + * optional string run_uuid = 1; */ - boolean hasFloat(); + boolean hasRunUuid(); /** *
-     * [Deprecated in 0.7.0, to be removed in future version] 
-     * Float clause for comparison. Use 'double' instead.
+     * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional .mlflow.FloatClause float = 2; + * optional string run_uuid = 1; */ - org.mlflow.api.proto.Service.FloatClause getFloat(); + java.lang.String getRunUuid(); /** *
-     * [Deprecated in 0.7.0, to be removed in future version] 
-     * Float clause for comparison. Use 'double' instead.
+     * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+     * be removed in a future MLflow version.
      * 
* - * optional .mlflow.FloatClause float = 2; + * optional string run_uuid = 1; */ - org.mlflow.api.proto.Service.FloatClauseOrBuilder getFloatOrBuilder(); + com.google.protobuf.ByteString + getRunUuidBytes(); /** *
-     * Double clause of comparison
+     * Filter artifacts matching this path (a relative path from the root artifact directory).
      * 
* - * optional .mlflow.DoubleClause double = 3; + * optional string path = 2; */ - boolean hasDouble(); + boolean hasPath(); /** *
-     * Double clause of comparison
+     * Filter artifacts matching this path (a relative path from the root artifact directory).
      * 
* - * optional .mlflow.DoubleClause double = 3; + * optional string path = 2; */ - org.mlflow.api.proto.Service.DoubleClause getDouble(); + java.lang.String getPath(); /** *
-     * Double clause of comparison
+     * Filter artifacts matching this path (a relative path from the root artifact directory).
      * 
* - * optional .mlflow.DoubleClause double = 3; + * optional string path = 2; */ - org.mlflow.api.proto.Service.DoubleClauseOrBuilder getDoubleOrBuilder(); - - public org.mlflow.api.proto.Service.MetricSearchExpression.ClauseCase getClauseCase(); + com.google.protobuf.ByteString + getPathBytes(); } /** - * Protobuf type {@code mlflow.MetricSearchExpression} + * Protobuf type {@code mlflow.ListArtifacts} */ - public static final class MetricSearchExpression extends + public static final class ListArtifacts extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.MetricSearchExpression) - MetricSearchExpressionOrBuilder { + // @@protoc_insertion_point(message_implements:mlflow.ListArtifacts) + ListArtifactsOrBuilder { private static final long serialVersionUID = 0L; - // Use MetricSearchExpression.newBuilder() to construct. - private MetricSearchExpression(com.google.protobuf.GeneratedMessageV3.Builder builder) { + // Use ListArtifacts.newBuilder() to construct. + private ListArtifacts(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private MetricSearchExpression() { - key_ = ""; + private ListArtifacts() { + runId_ = ""; + runUuid_ = ""; + path_ = ""; } @java.lang.Override @@ -34402,7 +34630,7 @@ private MetricSearchExpression() { getUnknownFields() { return this.unknownFields; } - private MetricSearchExpression( + private ListArtifacts( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -34423,36 +34651,20 @@ private MetricSearchExpression( break; case 10: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - key_ = bs; + bitField0_ |= 0x00000002; + runUuid_ = bs; break; } case 18: { - org.mlflow.api.proto.Service.FloatClause.Builder subBuilder = null; - if (clauseCase_ == 2) { - subBuilder = ((org.mlflow.api.proto.Service.FloatClause) clause_).toBuilder(); - } - clause_ = - input.readMessage(org.mlflow.api.proto.Service.FloatClause.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom((org.mlflow.api.proto.Service.FloatClause) clause_); - clause_ = subBuilder.buildPartial(); - } - clauseCase_ = 2; + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000004; + path_ = bs; break; } case 26: { - org.mlflow.api.proto.Service.DoubleClause.Builder subBuilder = null; - if (clauseCase_ == 3) { - subBuilder = ((org.mlflow.api.proto.Service.DoubleClause) clause_).toBuilder(); - } - clause_ = - input.readMessage(org.mlflow.api.proto.Service.DoubleClause.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom((org.mlflow.api.proto.Service.DoubleClause) clause_); - clause_ = subBuilder.buildPartial(); - } - clauseCase_ = 3; + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + runId_ = bs; break; } default: { @@ -34476,1372 +34688,1291 @@ private MetricSearchExpression( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_MetricSearchExpression_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_MetricSearchExpression_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.MetricSearchExpression.class, org.mlflow.api.proto.Service.MetricSearchExpression.Builder.class); + org.mlflow.api.proto.Service.ListArtifacts.class, org.mlflow.api.proto.Service.ListArtifacts.Builder.class); } - private int bitField0_; - private int clauseCase_ = 0; - private java.lang.Object clause_; - public enum ClauseCase - implements com.google.protobuf.Internal.EnumLite { - FLOAT(2), - DOUBLE(3), - CLAUSE_NOT_SET(0); - private final int value; - private ClauseCase(int value) { - this.value = value; - } + public interface ResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:mlflow.ListArtifacts.Response) + com.google.protobuf.MessageOrBuilder { + /** - * @deprecated Use {@link #forNumber(int)} instead. + *
+       * Root artifact directory for the run.
+       * 
+ * + * optional string root_uri = 1; */ - @java.lang.Deprecated - public static ClauseCase valueOf(int value) { - return forNumber(value); - } - - public static ClauseCase forNumber(int value) { - switch (value) { - case 2: return FLOAT; - case 3: return DOUBLE; - case 0: return CLAUSE_NOT_SET; - default: return null; - } - } - public int getNumber() { - return this.value; - } - }; - - public ClauseCase - getClauseCase() { - return ClauseCase.forNumber( - clauseCase_); - } + boolean hasRootUri(); + /** + *
+       * Root artifact directory for the run.
+       * 
+ * + * optional string root_uri = 1; + */ + java.lang.String getRootUri(); + /** + *
+       * Root artifact directory for the run.
+       * 
+ * + * optional string root_uri = 1; + */ + com.google.protobuf.ByteString + getRootUriBytes(); - public static final int KEY_FIELD_NUMBER = 1; - private volatile java.lang.Object key_; - /** - *
-     * :ref:`mlflowMetric` key for search.
-     * 
- * - * optional string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + java.util.List + getFilesList(); + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + org.mlflow.api.proto.Service.FileInfo getFiles(int index); + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + int getFilesCount(); + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + java.util.List + getFilesOrBuilderList(); + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + org.mlflow.api.proto.Service.FileInfoOrBuilder getFilesOrBuilder( + int index); } /** - *
-     * :ref:`mlflowMetric` key for search.
-     * 
- * - * optional string key = 1; + * Protobuf type {@code mlflow.ListArtifacts.Response} */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } - return s; + public static final class Response extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:mlflow.ListArtifacts.Response) + ResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use Response.newBuilder() to construct. + private Response(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); } - } - /** - *
-     * :ref:`mlflowMetric` key for search.
-     * 
- * - * optional string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + private Response() { + rootUri_ = ""; + files_ = java.util.Collections.emptyList(); } - } - public static final int FLOAT_FIELD_NUMBER = 2; - /** - *
-     * [Deprecated in 0.7.0, to be removed in future version] 
-     * Float clause for comparison. Use 'double' instead.
-     * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public boolean hasFloat() { - return clauseCase_ == 2; - } - /** - *
-     * [Deprecated in 0.7.0, to be removed in future version] 
-     * Float clause for comparison. Use 'double' instead.
-     * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public org.mlflow.api.proto.Service.FloatClause getFloat() { - if (clauseCase_ == 2) { - return (org.mlflow.api.proto.Service.FloatClause) clause_; - } - return org.mlflow.api.proto.Service.FloatClause.getDefaultInstance(); - } - /** - *
-     * [Deprecated in 0.7.0, to be removed in future version] 
-     * Float clause for comparison. Use 'double' instead.
-     * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public org.mlflow.api.proto.Service.FloatClauseOrBuilder getFloatOrBuilder() { - if (clauseCase_ == 2) { - return (org.mlflow.api.proto.Service.FloatClause) clause_; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; } - return org.mlflow.api.proto.Service.FloatClause.getDefaultInstance(); - } - - public static final int DOUBLE_FIELD_NUMBER = 3; - /** - *
-     * Double clause of comparison
-     * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public boolean hasDouble() { - return clauseCase_ == 3; - } - /** - *
-     * Double clause of comparison
-     * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public org.mlflow.api.proto.Service.DoubleClause getDouble() { - if (clauseCase_ == 3) { - return (org.mlflow.api.proto.Service.DoubleClause) clause_; + private Response( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + rootUri_ = bs; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + files_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + files_.add( + input.readMessage(org.mlflow.api.proto.Service.FileInfo.PARSER, extensionRegistry)); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + files_ = java.util.Collections.unmodifiableList(files_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } } - return org.mlflow.api.proto.Service.DoubleClause.getDefaultInstance(); - } - /** - *
-     * Double clause of comparison
-     * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public org.mlflow.api.proto.Service.DoubleClauseOrBuilder getDoubleOrBuilder() { - if (clauseCase_ == 3) { - return (org.mlflow.api.proto.Service.DoubleClause) clause_; + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_descriptor; } - return org.mlflow.api.proto.Service.DoubleClause.getDefaultInstance(); - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - memoizedIsInitialized = 1; - return true; - } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.mlflow.api.proto.Service.ListArtifacts.Response.class, org.mlflow.api.proto.Service.ListArtifacts.Response.Builder.class); + } - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, key_); + private int bitField0_; + public static final int ROOT_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object rootUri_; + /** + *
+       * Root artifact directory for the run.
+       * 
+ * + * optional string root_uri = 1; + */ + public boolean hasRootUri() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - if (clauseCase_ == 2) { - output.writeMessage(2, (org.mlflow.api.proto.Service.FloatClause) clause_); + /** + *
+       * Root artifact directory for the run.
+       * 
+ * + * optional string root_uri = 1; + */ + public java.lang.String getRootUri() { + java.lang.Object ref = rootUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rootUri_ = s; + } + return s; + } } - if (clauseCase_ == 3) { - output.writeMessage(3, (org.mlflow.api.proto.Service.DoubleClause) clause_); + /** + *
+       * Root artifact directory for the run.
+       * 
+ * + * optional string root_uri = 1; + */ + public com.google.protobuf.ByteString + getRootUriBytes() { + java.lang.Object ref = rootUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rootUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, key_); + public static final int FILES_FIELD_NUMBER = 2; + private java.util.List files_; + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public java.util.List getFilesList() { + return files_; } - if (clauseCase_ == 2) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, (org.mlflow.api.proto.Service.FloatClause) clause_); + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public java.util.List + getFilesOrBuilderList() { + return files_; } - if (clauseCase_ == 3) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, (org.mlflow.api.proto.Service.DoubleClause) clause_); + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public int getFilesCount() { + return files_.size(); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public org.mlflow.api.proto.Service.FileInfo getFiles(int index) { + return files_.get(index); } - if (!(obj instanceof org.mlflow.api.proto.Service.MetricSearchExpression)) { - return super.equals(obj); + /** + *
+       * File location and metadata for artifacts.
+       * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public org.mlflow.api.proto.Service.FileInfoOrBuilder getFilesOrBuilder( + int index) { + return files_.get(index); } - org.mlflow.api.proto.Service.MetricSearchExpression other = (org.mlflow.api.proto.Service.MetricSearchExpression) obj; - boolean result = true; - result = result && (hasKey() == other.hasKey()); - if (hasKey()) { - result = result && getKey() - .equals(other.getKey()); + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; } - result = result && getClauseCase().equals( - other.getClauseCase()); - if (!result) return false; - switch (clauseCase_) { - case 2: - result = result && getFloat() - .equals(other.getFloat()); - break; - case 3: - result = result && getDouble() - .equals(other.getDouble()); - break; - case 0: - default: + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, rootUri_); + } + for (int i = 0; i < files_.size(); i++) { + output.writeMessage(2, files_.get(i)); + } + unknownFields.writeTo(output); } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, rootUri_); + } + for (int i = 0; i < files_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, files_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasKey()) { - hash = (37 * hash) + KEY_FIELD_NUMBER; - hash = (53 * hash) + getKey().hashCode(); + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.mlflow.api.proto.Service.ListArtifacts.Response)) { + return super.equals(obj); + } + org.mlflow.api.proto.Service.ListArtifacts.Response other = (org.mlflow.api.proto.Service.ListArtifacts.Response) obj; + + boolean result = true; + result = result && (hasRootUri() == other.hasRootUri()); + if (hasRootUri()) { + result = result && getRootUri() + .equals(other.getRootUri()); + } + result = result && getFilesList() + .equals(other.getFilesList()); + result = result && unknownFields.equals(other.unknownFields); + return result; } - switch (clauseCase_) { - case 2: - hash = (37 * hash) + FLOAT_FIELD_NUMBER; - hash = (53 * hash) + getFloat().hashCode(); - break; - case 3: - hash = (37 * hash) + DOUBLE_FIELD_NUMBER; - hash = (53 * hash) + getDouble().hashCode(); - break; - case 0: - default: + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRootUri()) { + hash = (37 * hash) + ROOT_URI_FIELD_NUMBER; + hash = (53 * hash) + getRootUri().hashCode(); + } + if (getFilesCount() > 0) { + hash = (37 * hash) + FILES_FIELD_NUMBER; + hash = (53 * hash) + getFilesList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.MetricSearchExpression parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.MetricSearchExpression prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code mlflow.MetricSearchExpression} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.MetricSearchExpression) - org.mlflow.api.proto.Service.MetricSearchExpressionOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_MetricSearchExpression_descriptor; + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_MetricSearchExpression_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.MetricSearchExpression.class, org.mlflow.api.proto.Service.MetricSearchExpression.Builder.class); + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - - // Construct using org.mlflow.api.proto.Service.MetricSearchExpression.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - @java.lang.Override - public Builder clear() { - super.clear(); - key_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - clauseCase_ = 0; - clause_ = null; - return this; + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_MetricSearchExpression_descriptor; + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); } - - @java.lang.Override - public org.mlflow.api.proto.Service.MetricSearchExpression getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.MetricSearchExpression.getDefaultInstance(); + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); } - - @java.lang.Override - public org.mlflow.api.proto.Service.MetricSearchExpression build() { - org.mlflow.api.proto.Service.MetricSearchExpression result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); } - - @java.lang.Override - public org.mlflow.api.proto.Service.MetricSearchExpression buildPartial() { - org.mlflow.api.proto.Service.MetricSearchExpression result = new org.mlflow.api.proto.Service.MetricSearchExpression(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.key_ = key_; - if (clauseCase_ == 2) { - if (floatBuilder_ == null) { - result.clause_ = clause_; - } else { - result.clause_ = floatBuilder_.build(); - } - } - if (clauseCase_ == 3) { - if (doubleBuilder_ == null) { - result.clause_ = clause_; - } else { - result.clause_ = doubleBuilder_.build(); - } - } - result.bitField0_ = to_bitField0_; - result.clauseCase_ = clauseCase_; - onBuilt(); - return result; + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); + public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); } + @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); + public static Builder newBuilder(org.mlflow.api.proto.Service.ListArtifacts.Response prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); } + @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.MetricSearchExpression) { - return mergeFrom((org.mlflow.api.proto.Service.MetricSearchExpression)other); - } else { - super.mergeFrom(other); - return this; + /** + * Protobuf type {@code mlflow.ListArtifacts.Response} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:mlflow.ListArtifacts.Response) + org.mlflow.api.proto.Service.ListArtifacts.ResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_descriptor; } - } - public Builder mergeFrom(org.mlflow.api.proto.Service.MetricSearchExpression other) { - if (other == org.mlflow.api.proto.Service.MetricSearchExpression.getDefaultInstance()) return this; - if (other.hasKey()) { - bitField0_ |= 0x00000001; - key_ = other.key_; - onChanged(); - } - switch (other.getClauseCase()) { - case FLOAT: { - mergeFloat(other.getFloat()); - break; - } - case DOUBLE: { - mergeDouble(other.getDouble()); - break; - } - case CLAUSE_NOT_SET: { - break; - } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.mlflow.api.proto.Service.ListArtifacts.Response.class, org.mlflow.api.proto.Service.ListArtifacts.Response.Builder.class); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - @java.lang.Override - public final boolean isInitialized() { - return true; - } + // Construct using org.mlflow.api.proto.Service.ListArtifacts.Response.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.MetricSearchExpression parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.MetricSearchExpression) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getFilesFieldBuilder(); } } - return this; - } - private int clauseCase_ = 0; - private java.lang.Object clause_; - public ClauseCase - getClauseCase() { - return ClauseCase.forNumber( - clauseCase_); - } + @java.lang.Override + public Builder clear() { + super.clear(); + rootUri_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (filesBuilder_ == null) { + files_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + filesBuilder_.clear(); + } + return this; + } - public Builder clearClause() { - clauseCase_ = 0; - clause_ = null; - onChanged(); - return this; - } + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_descriptor; + } - private int bitField0_; + @java.lang.Override + public org.mlflow.api.proto.Service.ListArtifacts.Response getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.ListArtifacts.Response.getDefaultInstance(); + } - private java.lang.Object key_ = ""; - /** - *
-       * :ref:`mlflowMetric` key for search.
-       * 
- * - * optional string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-       * :ref:`mlflowMetric` key for search.
-       * 
- * - * optional string key = 1; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; + @java.lang.Override + public org.mlflow.api.proto.Service.ListArtifacts.Response build() { + org.mlflow.api.proto.Service.ListArtifacts.Response result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * :ref:`mlflowMetric` key for search.
-       * 
- * - * optional string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + return result; } - } - /** - *
-       * :ref:`mlflowMetric` key for search.
-       * 
- * - * optional string key = 1; - */ - public Builder setKey( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - /** - *
-       * :ref:`mlflowMetric` key for search.
-       * 
- * - * optional string key = 1; - */ - public Builder clearKey() { - bitField0_ = (bitField0_ & ~0x00000001); - key_ = getDefaultInstance().getKey(); - onChanged(); - return this; - } - /** - *
-       * :ref:`mlflowMetric` key for search.
-       * 
- * - * optional string key = 1; - */ - public Builder setKeyBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.FloatClause, org.mlflow.api.proto.Service.FloatClause.Builder, org.mlflow.api.proto.Service.FloatClauseOrBuilder> floatBuilder_; - /** - *
-       * [Deprecated in 0.7.0, to be removed in future version] 
-       * Float clause for comparison. Use 'double' instead.
-       * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public boolean hasFloat() { - return clauseCase_ == 2; - } - /** - *
-       * [Deprecated in 0.7.0, to be removed in future version] 
-       * Float clause for comparison. Use 'double' instead.
-       * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public org.mlflow.api.proto.Service.FloatClause getFloat() { - if (floatBuilder_ == null) { - if (clauseCase_ == 2) { - return (org.mlflow.api.proto.Service.FloatClause) clause_; + @java.lang.Override + public org.mlflow.api.proto.Service.ListArtifacts.Response buildPartial() { + org.mlflow.api.proto.Service.ListArtifacts.Response result = new org.mlflow.api.proto.Service.ListArtifacts.Response(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; } - return org.mlflow.api.proto.Service.FloatClause.getDefaultInstance(); - } else { - if (clauseCase_ == 2) { - return floatBuilder_.getMessage(); + result.rootUri_ = rootUri_; + if (filesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + files_ = java.util.Collections.unmodifiableList(files_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.files_ = files_; + } else { + result.files_ = filesBuilder_.build(); } - return org.mlflow.api.proto.Service.FloatClause.getDefaultInstance(); + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - } - /** - *
-       * [Deprecated in 0.7.0, to be removed in future version] 
-       * Float clause for comparison. Use 'double' instead.
-       * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public Builder setFloat(org.mlflow.api.proto.Service.FloatClause value) { - if (floatBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - clause_ = value; - onChanged(); - } else { - floatBuilder_.setMessage(value); + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); } - clauseCase_ = 2; - return this; - } - /** - *
-       * [Deprecated in 0.7.0, to be removed in future version] 
-       * Float clause for comparison. Use 'double' instead.
-       * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public Builder setFloat( - org.mlflow.api.proto.Service.FloatClause.Builder builderForValue) { - if (floatBuilder_ == null) { - clause_ = builderForValue.build(); - onChanged(); - } else { - floatBuilder_.setMessage(builderForValue.build()); + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); } - clauseCase_ = 2; - return this; - } - /** - *
-       * [Deprecated in 0.7.0, to be removed in future version] 
-       * Float clause for comparison. Use 'double' instead.
-       * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public Builder mergeFloat(org.mlflow.api.proto.Service.FloatClause value) { - if (floatBuilder_ == null) { - if (clauseCase_ == 2 && - clause_ != org.mlflow.api.proto.Service.FloatClause.getDefaultInstance()) { - clause_ = org.mlflow.api.proto.Service.FloatClause.newBuilder((org.mlflow.api.proto.Service.FloatClause) clause_) - .mergeFrom(value).buildPartial(); - } else { - clause_ = value; - } - onChanged(); - } else { - if (clauseCase_ == 2) { - floatBuilder_.mergeFrom(value); - } - floatBuilder_.setMessage(value); + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); } - clauseCase_ = 2; - return this; - } - /** - *
-       * [Deprecated in 0.7.0, to be removed in future version] 
-       * Float clause for comparison. Use 'double' instead.
-       * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public Builder clearFloat() { - if (floatBuilder_ == null) { - if (clauseCase_ == 2) { - clauseCase_ = 0; - clause_ = null; - onChanged(); - } - } else { - if (clauseCase_ == 2) { - clauseCase_ = 0; - clause_ = null; - } - floatBuilder_.clear(); + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); } - return this; - } - /** - *
-       * [Deprecated in 0.7.0, to be removed in future version] 
-       * Float clause for comparison. Use 'double' instead.
-       * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public org.mlflow.api.proto.Service.FloatClause.Builder getFloatBuilder() { - return getFloatFieldBuilder().getBuilder(); - } - /** - *
-       * [Deprecated in 0.7.0, to be removed in future version] 
-       * Float clause for comparison. Use 'double' instead.
-       * 
- * - * optional .mlflow.FloatClause float = 2; - */ - public org.mlflow.api.proto.Service.FloatClauseOrBuilder getFloatOrBuilder() { - if ((clauseCase_ == 2) && (floatBuilder_ != null)) { - return floatBuilder_.getMessageOrBuilder(); - } else { - if (clauseCase_ == 2) { - return (org.mlflow.api.proto.Service.FloatClause) clause_; - } - return org.mlflow.api.proto.Service.FloatClause.getDefaultInstance(); + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); } - } - /** - *
-       * [Deprecated in 0.7.0, to be removed in future version] 
-       * Float clause for comparison. Use 'double' instead.
-       * 
- * - * optional .mlflow.FloatClause float = 2; - */ - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.FloatClause, org.mlflow.api.proto.Service.FloatClause.Builder, org.mlflow.api.proto.Service.FloatClauseOrBuilder> - getFloatFieldBuilder() { - if (floatBuilder_ == null) { - if (!(clauseCase_ == 2)) { - clause_ = org.mlflow.api.proto.Service.FloatClause.getDefaultInstance(); - } - floatBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.FloatClause, org.mlflow.api.proto.Service.FloatClause.Builder, org.mlflow.api.proto.Service.FloatClauseOrBuilder>( - (org.mlflow.api.proto.Service.FloatClause) clause_, - getParentForChildren(), - isClean()); - clause_ = null; + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.mlflow.api.proto.Service.ListArtifacts.Response) { + return mergeFrom((org.mlflow.api.proto.Service.ListArtifacts.Response)other); + } else { + super.mergeFrom(other); + return this; + } } - clauseCase_ = 2; - onChanged();; - return floatBuilder_; - } - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.DoubleClause, org.mlflow.api.proto.Service.DoubleClause.Builder, org.mlflow.api.proto.Service.DoubleClauseOrBuilder> doubleBuilder_; - /** - *
-       * Double clause of comparison
-       * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public boolean hasDouble() { - return clauseCase_ == 3; - } - /** - *
-       * Double clause of comparison
-       * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public org.mlflow.api.proto.Service.DoubleClause getDouble() { - if (doubleBuilder_ == null) { - if (clauseCase_ == 3) { - return (org.mlflow.api.proto.Service.DoubleClause) clause_; + public Builder mergeFrom(org.mlflow.api.proto.Service.ListArtifacts.Response other) { + if (other == org.mlflow.api.proto.Service.ListArtifacts.Response.getDefaultInstance()) return this; + if (other.hasRootUri()) { + bitField0_ |= 0x00000001; + rootUri_ = other.rootUri_; + onChanged(); } - return org.mlflow.api.proto.Service.DoubleClause.getDefaultInstance(); - } else { - if (clauseCase_ == 3) { - return doubleBuilder_.getMessage(); + if (filesBuilder_ == null) { + if (!other.files_.isEmpty()) { + if (files_.isEmpty()) { + files_ = other.files_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureFilesIsMutable(); + files_.addAll(other.files_); + } + onChanged(); + } + } else { + if (!other.files_.isEmpty()) { + if (filesBuilder_.isEmpty()) { + filesBuilder_.dispose(); + filesBuilder_ = null; + files_ = other.files_; + bitField0_ = (bitField0_ & ~0x00000002); + filesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getFilesFieldBuilder() : null; + } else { + filesBuilder_.addAllMessages(other.files_); + } + } } - return org.mlflow.api.proto.Service.DoubleClause.getDefaultInstance(); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; } - } - /** - *
-       * Double clause of comparison
-       * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public Builder setDouble(org.mlflow.api.proto.Service.DoubleClause value) { - if (doubleBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.mlflow.api.proto.Service.ListArtifacts.Response parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.mlflow.api.proto.Service.ListArtifacts.Response) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } } - clause_ = value; - onChanged(); - } else { - doubleBuilder_.setMessage(value); + return this; } - clauseCase_ = 3; - return this; - } - /** - *
-       * Double clause of comparison
-       * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public Builder setDouble( - org.mlflow.api.proto.Service.DoubleClause.Builder builderForValue) { - if (doubleBuilder_ == null) { - clause_ = builderForValue.build(); - onChanged(); - } else { - doubleBuilder_.setMessage(builderForValue.build()); + private int bitField0_; + + private java.lang.Object rootUri_ = ""; + /** + *
+         * Root artifact directory for the run.
+         * 
+ * + * optional string root_uri = 1; + */ + public boolean hasRootUri() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - clauseCase_ = 3; - return this; - } - /** - *
-       * Double clause of comparison
-       * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public Builder mergeDouble(org.mlflow.api.proto.Service.DoubleClause value) { - if (doubleBuilder_ == null) { - if (clauseCase_ == 3 && - clause_ != org.mlflow.api.proto.Service.DoubleClause.getDefaultInstance()) { - clause_ = org.mlflow.api.proto.Service.DoubleClause.newBuilder((org.mlflow.api.proto.Service.DoubleClause) clause_) - .mergeFrom(value).buildPartial(); + /** + *
+         * Root artifact directory for the run.
+         * 
+ * + * optional string root_uri = 1; + */ + public java.lang.String getRootUri() { + java.lang.Object ref = rootUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rootUri_ = s; + } + return s; } else { - clause_ = value; - } - onChanged(); - } else { - if (clauseCase_ == 3) { - doubleBuilder_.mergeFrom(value); + return (java.lang.String) ref; } - doubleBuilder_.setMessage(value); } - clauseCase_ = 3; - return this; - } - /** - *
-       * Double clause of comparison
-       * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public Builder clearDouble() { - if (doubleBuilder_ == null) { - if (clauseCase_ == 3) { - clauseCase_ = 0; - clause_ = null; - onChanged(); - } - } else { - if (clauseCase_ == 3) { - clauseCase_ = 0; - clause_ = null; + /** + *
+         * Root artifact directory for the run.
+         * 
+ * + * optional string root_uri = 1; + */ + public com.google.protobuf.ByteString + getRootUriBytes() { + java.lang.Object ref = rootUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rootUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - doubleBuilder_.clear(); } - return this; - } - /** - *
-       * Double clause of comparison
-       * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public org.mlflow.api.proto.Service.DoubleClause.Builder getDoubleBuilder() { - return getDoubleFieldBuilder().getBuilder(); - } - /** - *
-       * Double clause of comparison
-       * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - public org.mlflow.api.proto.Service.DoubleClauseOrBuilder getDoubleOrBuilder() { - if ((clauseCase_ == 3) && (doubleBuilder_ != null)) { - return doubleBuilder_.getMessageOrBuilder(); - } else { - if (clauseCase_ == 3) { - return (org.mlflow.api.proto.Service.DoubleClause) clause_; - } - return org.mlflow.api.proto.Service.DoubleClause.getDefaultInstance(); + /** + *
+         * Root artifact directory for the run.
+         * 
+ * + * optional string root_uri = 1; + */ + public Builder setRootUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rootUri_ = value; + onChanged(); + return this; } - } - /** - *
-       * Double clause of comparison
-       * 
- * - * optional .mlflow.DoubleClause double = 3; - */ - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.DoubleClause, org.mlflow.api.proto.Service.DoubleClause.Builder, org.mlflow.api.proto.Service.DoubleClauseOrBuilder> - getDoubleFieldBuilder() { - if (doubleBuilder_ == null) { - if (!(clauseCase_ == 3)) { - clause_ = org.mlflow.api.proto.Service.DoubleClause.getDefaultInstance(); - } - doubleBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.DoubleClause, org.mlflow.api.proto.Service.DoubleClause.Builder, org.mlflow.api.proto.Service.DoubleClauseOrBuilder>( - (org.mlflow.api.proto.Service.DoubleClause) clause_, - getParentForChildren(), - isClean()); - clause_ = null; + /** + *
+         * Root artifact directory for the run.
+         * 
+ * + * optional string root_uri = 1; + */ + public Builder clearRootUri() { + bitField0_ = (bitField0_ & ~0x00000001); + rootUri_ = getDefaultInstance().getRootUri(); + onChanged(); + return this; } - clauseCase_ = 3; - onChanged();; - return doubleBuilder_; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:mlflow.MetricSearchExpression) - } - - // @@protoc_insertion_point(class_scope:mlflow.MetricSearchExpression) - private static final org.mlflow.api.proto.Service.MetricSearchExpression DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.MetricSearchExpression(); - } - - public static org.mlflow.api.proto.Service.MetricSearchExpression getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public MetricSearchExpression parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new MetricSearchExpression(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.MetricSearchExpression getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - + /** + *
+         * Root artifact directory for the run.
+         * 
+ * + * optional string root_uri = 1; + */ + public Builder setRootUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); } + bitField0_ |= 0x00000001; + rootUri_ = value; + onChanged(); + return this; + } - public interface ParameterSearchExpressionOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.ParameterSearchExpression) - com.google.protobuf.MessageOrBuilder { - - /** - *
-     * :ref:`mlflowParam` key for search.
-     * 
- * - * optional string key = 1; - */ - boolean hasKey(); - /** - *
-     * :ref:`mlflowParam` key for search.
-     * 
- * - * optional string key = 1; - */ - java.lang.String getKey(); - /** - *
-     * :ref:`mlflowParam` key for search.
-     * 
- * - * optional string key = 1; - */ - com.google.protobuf.ByteString - getKeyBytes(); - - /** - *
-     * String clause for comparison.
-     * 
- * - * optional .mlflow.StringClause string = 2; - */ - boolean hasString(); - /** - *
-     * String clause for comparison.
-     * 
- * - * optional .mlflow.StringClause string = 2; - */ - org.mlflow.api.proto.Service.StringClause getString(); - /** - *
-     * String clause for comparison.
-     * 
- * - * optional .mlflow.StringClause string = 2; - */ - org.mlflow.api.proto.Service.StringClauseOrBuilder getStringOrBuilder(); + private java.util.List files_ = + java.util.Collections.emptyList(); + private void ensureFilesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + files_ = new java.util.ArrayList(files_); + bitField0_ |= 0x00000002; + } + } - public org.mlflow.api.proto.Service.ParameterSearchExpression.ClauseCase getClauseCase(); - } - /** - * Protobuf type {@code mlflow.ParameterSearchExpression} - */ - public static final class ParameterSearchExpression extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.ParameterSearchExpression) - ParameterSearchExpressionOrBuilder { - private static final long serialVersionUID = 0L; - // Use ParameterSearchExpression.newBuilder() to construct. - private ParameterSearchExpression(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private ParameterSearchExpression() { - key_ = ""; - } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.FileInfo, org.mlflow.api.proto.Service.FileInfo.Builder, org.mlflow.api.proto.Service.FileInfoOrBuilder> filesBuilder_; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ParameterSearchExpression( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - key_ = bs; - break; - } - case 18: { - org.mlflow.api.proto.Service.StringClause.Builder subBuilder = null; - if (clauseCase_ == 2) { - subBuilder = ((org.mlflow.api.proto.Service.StringClause) clause_).toBuilder(); - } - clause_ = - input.readMessage(org.mlflow.api.proto.Service.StringClause.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom((org.mlflow.api.proto.Service.StringClause) clause_); - clause_ = subBuilder.buildPartial(); - } - clauseCase_ = 2; - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public java.util.List getFilesList() { + if (filesBuilder_ == null) { + return java.util.Collections.unmodifiableList(files_); + } else { + return filesBuilder_.getMessageList(); } } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ParameterSearchExpression_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ParameterSearchExpression_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.ParameterSearchExpression.class, org.mlflow.api.proto.Service.ParameterSearchExpression.Builder.class); - } - - private int bitField0_; - private int clauseCase_ = 0; - private java.lang.Object clause_; - public enum ClauseCase - implements com.google.protobuf.Internal.EnumLite { - STRING(2), - CLAUSE_NOT_SET(0); - private final int value; - private ClauseCase(int value) { - this.value = value; - } - /** - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static ClauseCase valueOf(int value) { - return forNumber(value); - } - - public static ClauseCase forNumber(int value) { - switch (value) { - case 2: return STRING; - case 0: return CLAUSE_NOT_SET; - default: return null; + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public int getFilesCount() { + if (filesBuilder_ == null) { + return files_.size(); + } else { + return filesBuilder_.getCount(); + } } - } - public int getNumber() { - return this.value; - } - }; - - public ClauseCase - getClauseCase() { - return ClauseCase.forNumber( - clauseCase_); - } - - public static final int KEY_FIELD_NUMBER = 1; - private volatile java.lang.Object key_; - /** - *
-     * :ref:`mlflowParam` key for search.
-     * 
- * - * optional string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-     * :ref:`mlflowParam` key for search.
-     * 
- * - * optional string key = 1; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public org.mlflow.api.proto.Service.FileInfo getFiles(int index) { + if (filesBuilder_ == null) { + return files_.get(index); + } else { + return filesBuilder_.getMessage(index); + } } - return s; - } - } - /** - *
-     * :ref:`mlflowParam` key for search.
-     * 
- * - * optional string key = 1; + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public Builder setFiles( + int index, org.mlflow.api.proto.Service.FileInfo value) { + if (filesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFilesIsMutable(); + files_.set(index, value); + onChanged(); + } else { + filesBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public Builder setFiles( + int index, org.mlflow.api.proto.Service.FileInfo.Builder builderForValue) { + if (filesBuilder_ == null) { + ensureFilesIsMutable(); + files_.set(index, builderForValue.build()); + onChanged(); + } else { + filesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public Builder addFiles(org.mlflow.api.proto.Service.FileInfo value) { + if (filesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFilesIsMutable(); + files_.add(value); + onChanged(); + } else { + filesBuilder_.addMessage(value); + } + return this; + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public Builder addFiles( + int index, org.mlflow.api.proto.Service.FileInfo value) { + if (filesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFilesIsMutable(); + files_.add(index, value); + onChanged(); + } else { + filesBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public Builder addFiles( + org.mlflow.api.proto.Service.FileInfo.Builder builderForValue) { + if (filesBuilder_ == null) { + ensureFilesIsMutable(); + files_.add(builderForValue.build()); + onChanged(); + } else { + filesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public Builder addFiles( + int index, org.mlflow.api.proto.Service.FileInfo.Builder builderForValue) { + if (filesBuilder_ == null) { + ensureFilesIsMutable(); + files_.add(index, builderForValue.build()); + onChanged(); + } else { + filesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public Builder addAllFiles( + java.lang.Iterable values) { + if (filesBuilder_ == null) { + ensureFilesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, files_); + onChanged(); + } else { + filesBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public Builder clearFiles() { + if (filesBuilder_ == null) { + files_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + filesBuilder_.clear(); + } + return this; + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public Builder removeFiles(int index) { + if (filesBuilder_ == null) { + ensureFilesIsMutable(); + files_.remove(index); + onChanged(); + } else { + filesBuilder_.remove(index); + } + return this; + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public org.mlflow.api.proto.Service.FileInfo.Builder getFilesBuilder( + int index) { + return getFilesFieldBuilder().getBuilder(index); + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public org.mlflow.api.proto.Service.FileInfoOrBuilder getFilesOrBuilder( + int index) { + if (filesBuilder_ == null) { + return files_.get(index); } else { + return filesBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public java.util.List + getFilesOrBuilderList() { + if (filesBuilder_ != null) { + return filesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(files_); + } + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public org.mlflow.api.proto.Service.FileInfo.Builder addFilesBuilder() { + return getFilesFieldBuilder().addBuilder( + org.mlflow.api.proto.Service.FileInfo.getDefaultInstance()); + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public org.mlflow.api.proto.Service.FileInfo.Builder addFilesBuilder( + int index) { + return getFilesFieldBuilder().addBuilder( + index, org.mlflow.api.proto.Service.FileInfo.getDefaultInstance()); + } + /** + *
+         * File location and metadata for artifacts.
+         * 
+ * + * repeated .mlflow.FileInfo files = 2; + */ + public java.util.List + getFilesBuilderList() { + return getFilesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.FileInfo, org.mlflow.api.proto.Service.FileInfo.Builder, org.mlflow.api.proto.Service.FileInfoOrBuilder> + getFilesFieldBuilder() { + if (filesBuilder_ == null) { + filesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.FileInfo, org.mlflow.api.proto.Service.FileInfo.Builder, org.mlflow.api.proto.Service.FileInfoOrBuilder>( + files_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + files_ = null; + } + return filesBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:mlflow.ListArtifacts.Response) + } + + // @@protoc_insertion_point(class_scope:mlflow.ListArtifacts.Response) + private static final org.mlflow.api.proto.Service.ListArtifacts.Response DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.ListArtifacts.Response(); + } + + public static org.mlflow.api.proto.Service.ListArtifacts.Response getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Response parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Response(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.mlflow.api.proto.Service.ListArtifacts.Response getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private int bitField0_; + public static final int RUN_ID_FIELD_NUMBER = 3; + private volatile java.lang.Object runId_; + /** + *
+     * ID of the run whose artifacts to list. Must be provided.
+     * 
+ * + * optional string run_id = 3; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * ID of the run whose artifacts to list. Must be provided.
+     * 
+ * + * optional string run_id = 3; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } + } + /** + *
+     * ID of the run whose artifacts to list. Must be provided.
+     * 
+ * + * optional string run_id = 3; */ public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; + getRunIdBytes() { + java.lang.Object ref = runId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - key_ = b; + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int RUN_UUID_FIELD_NUMBER = 1; + private volatile java.lang.Object runUuid_; + /** + *
+     * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+     * be removed in a future MLflow version.
+     * 
+ * + * optional string run_uuid = 1; + */ + public boolean hasRunUuid() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + *
+     * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+     * be removed in a future MLflow version.
+     * 
+ * + * optional string run_uuid = 1; + */ + public java.lang.String getRunUuid() { + java.lang.Object ref = runUuid_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runUuid_ = s; + } + return s; + } + } + /** + *
+     * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+     * be removed in a future MLflow version.
+     * 
+ * + * optional string run_uuid = 1; + */ + public com.google.protobuf.ByteString + getRunUuidBytes() { + java.lang.Object ref = runUuid_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runUuid_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int STRING_FIELD_NUMBER = 2; + public static final int PATH_FIELD_NUMBER = 2; + private volatile java.lang.Object path_; /** *
-     * String clause for comparison.
+     * Filter artifacts matching this path (a relative path from the root artifact directory).
      * 
* - * optional .mlflow.StringClause string = 2; + * optional string path = 2; */ - public boolean hasString() { - return clauseCase_ == 2; + public boolean hasPath() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
-     * String clause for comparison.
+     * Filter artifacts matching this path (a relative path from the root artifact directory).
      * 
* - * optional .mlflow.StringClause string = 2; + * optional string path = 2; */ - public org.mlflow.api.proto.Service.StringClause getString() { - if (clauseCase_ == 2) { - return (org.mlflow.api.proto.Service.StringClause) clause_; + public java.lang.String getPath() { + java.lang.Object ref = path_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + path_ = s; + } + return s; } - return org.mlflow.api.proto.Service.StringClause.getDefaultInstance(); } /** *
-     * String clause for comparison.
+     * Filter artifacts matching this path (a relative path from the root artifact directory).
      * 
* - * optional .mlflow.StringClause string = 2; + * optional string path = 2; */ - public org.mlflow.api.proto.Service.StringClauseOrBuilder getStringOrBuilder() { - if (clauseCase_ == 2) { - return (org.mlflow.api.proto.Service.StringClause) clause_; + public com.google.protobuf.ByteString + getPathBytes() { + java.lang.Object ref = path_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + path_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - return org.mlflow.api.proto.Service.StringClause.getDefaultInstance(); } private byte memoizedIsInitialized = -1; @@ -35858,11 +35989,14 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, key_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, path_); } - if (clauseCase_ == 2) { - output.writeMessage(2, (org.mlflow.api.proto.Service.StringClause) clause_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, runId_); } unknownFields.writeTo(output); } @@ -35873,12 +36007,14 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, key_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); } - if (clauseCase_ == 2) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, (org.mlflow.api.proto.Service.StringClause) clause_); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, path_); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, runId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -35890,27 +36026,26 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.mlflow.api.proto.Service.ParameterSearchExpression)) { + if (!(obj instanceof org.mlflow.api.proto.Service.ListArtifacts)) { return super.equals(obj); } - org.mlflow.api.proto.Service.ParameterSearchExpression other = (org.mlflow.api.proto.Service.ParameterSearchExpression) obj; + org.mlflow.api.proto.Service.ListArtifacts other = (org.mlflow.api.proto.Service.ListArtifacts) obj; boolean result = true; - result = result && (hasKey() == other.hasKey()); - if (hasKey()) { - result = result && getKey() - .equals(other.getKey()); + result = result && (hasRunId() == other.hasRunId()); + if (hasRunId()) { + result = result && getRunId() + .equals(other.getRunId()); + } + result = result && (hasRunUuid() == other.hasRunUuid()); + if (hasRunUuid()) { + result = result && getRunUuid() + .equals(other.getRunUuid()); } - result = result && getClauseCase().equals( - other.getClauseCase()); - if (!result) return false; - switch (clauseCase_) { - case 2: - result = result && getString() - .equals(other.getString()); - break; - case 0: - default: + result = result && (hasPath() == other.hasPath()); + if (hasPath()) { + result = result && getPath() + .equals(other.getPath()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -35923,86 +36058,86 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (hasKey()) { - hash = (37 * hash) + KEY_FIELD_NUMBER; - hash = (53 * hash) + getKey().hashCode(); + if (hasRunId()) { + hash = (37 * hash) + RUN_ID_FIELD_NUMBER; + hash = (53 * hash) + getRunId().hashCode(); + } + if (hasRunUuid()) { + hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; + hash = (53 * hash) + getRunUuid().hashCode(); } - switch (clauseCase_) { - case 2: - hash = (37 * hash) + STRING_FIELD_NUMBER; - hash = (53 * hash) + getString().hashCode(); - break; - case 0: - default: + if (hasPath()) { + hash = (37 * hash) + PATH_FIELD_NUMBER; + hash = (53 * hash) + getPath().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom( + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom( + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom( + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom( + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom(byte[] data) + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom( + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom( + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseDelimitedFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.ListArtifacts parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseDelimitedFrom( + public static org.mlflow.api.proto.Service.ListArtifacts parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom( + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom( + public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -36015,7 +36150,7 @@ public static org.mlflow.api.proto.Service.ParameterSearchExpression parseFrom( public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.mlflow.api.proto.Service.ParameterSearchExpression prototype) { + public static Builder newBuilder(org.mlflow.api.proto.Service.ListArtifacts prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -36031,26 +36166,26 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code mlflow.ParameterSearchExpression} + * Protobuf type {@code mlflow.ListArtifacts} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.ParameterSearchExpression) - org.mlflow.api.proto.Service.ParameterSearchExpressionOrBuilder { + // @@protoc_insertion_point(builder_implements:mlflow.ListArtifacts) + org.mlflow.api.proto.Service.ListArtifactsOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ParameterSearchExpression_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ParameterSearchExpression_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.ParameterSearchExpression.class, org.mlflow.api.proto.Service.ParameterSearchExpression.Builder.class); + org.mlflow.api.proto.Service.ListArtifacts.class, org.mlflow.api.proto.Service.ListArtifacts.Builder.class); } - // Construct using org.mlflow.api.proto.Service.ParameterSearchExpression.newBuilder() + // Construct using org.mlflow.api.proto.Service.ListArtifacts.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -36068,27 +36203,29 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - key_ = ""; + runId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - clauseCase_ = 0; - clause_ = null; + runUuid_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + path_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ParameterSearchExpression_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_descriptor; } @java.lang.Override - public org.mlflow.api.proto.Service.ParameterSearchExpression getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.ParameterSearchExpression.getDefaultInstance(); + public org.mlflow.api.proto.Service.ListArtifacts getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.ListArtifacts.getDefaultInstance(); } @java.lang.Override - public org.mlflow.api.proto.Service.ParameterSearchExpression build() { - org.mlflow.api.proto.Service.ParameterSearchExpression result = buildPartial(); + public org.mlflow.api.proto.Service.ListArtifacts build() { + org.mlflow.api.proto.Service.ListArtifacts result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -36096,23 +36233,23 @@ public org.mlflow.api.proto.Service.ParameterSearchExpression build() { } @java.lang.Override - public org.mlflow.api.proto.Service.ParameterSearchExpression buildPartial() { - org.mlflow.api.proto.Service.ParameterSearchExpression result = new org.mlflow.api.proto.Service.ParameterSearchExpression(this); + public org.mlflow.api.proto.Service.ListArtifacts buildPartial() { + org.mlflow.api.proto.Service.ListArtifacts result = new org.mlflow.api.proto.Service.ListArtifacts(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.key_ = key_; - if (clauseCase_ == 2) { - if (stringBuilder_ == null) { - result.clause_ = clause_; - } else { - result.clause_ = stringBuilder_.build(); - } + result.runId_ = runId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.runUuid_ = runUuid_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; } + result.path_ = path_; result.bitField0_ = to_bitField0_; - result.clauseCase_ = clauseCase_; onBuilt(); return result; } @@ -36151,29 +36288,30 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.ParameterSearchExpression) { - return mergeFrom((org.mlflow.api.proto.Service.ParameterSearchExpression)other); + if (other instanceof org.mlflow.api.proto.Service.ListArtifacts) { + return mergeFrom((org.mlflow.api.proto.Service.ListArtifacts)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.ParameterSearchExpression other) { - if (other == org.mlflow.api.proto.Service.ParameterSearchExpression.getDefaultInstance()) return this; - if (other.hasKey()) { + public Builder mergeFrom(org.mlflow.api.proto.Service.ListArtifacts other) { + if (other == org.mlflow.api.proto.Service.ListArtifacts.getDefaultInstance()) return this; + if (other.hasRunId()) { bitField0_ |= 0x00000001; - key_ = other.key_; + runId_ = other.runId_; onChanged(); } - switch (other.getClauseCase()) { - case STRING: { - mergeString(other.getString()); - break; - } - case CLAUSE_NOT_SET: { - break; - } + if (other.hasRunUuid()) { + bitField0_ |= 0x00000002; + runUuid_ = other.runUuid_; + onChanged(); + } + if (other.hasPath()) { + bitField0_ |= 0x00000004; + path_ = other.path_; + onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -36190,11 +36328,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.ParameterSearchExpression parsedMessage = null; + org.mlflow.api.proto.Service.ListArtifacts parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.ParameterSearchExpression) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.ListArtifacts) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -36203,49 +36341,34 @@ public Builder mergeFrom( } return this; } - private int clauseCase_ = 0; - private java.lang.Object clause_; - public ClauseCase - getClauseCase() { - return ClauseCase.forNumber( - clauseCase_); - } - - public Builder clearClause() { - clauseCase_ = 0; - clause_ = null; - onChanged(); - return this; - } - private int bitField0_; - private java.lang.Object key_ = ""; + private java.lang.Object runId_ = ""; /** *
-       * :ref:`mlflowParam` key for search.
+       * ID of the run whose artifacts to list. Must be provided.
        * 
* - * optional string key = 1; + * optional string run_id = 3; */ - public boolean hasKey() { + public boolean hasRunId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-       * :ref:`mlflowParam` key for search.
+       * ID of the run whose artifacts to list. Must be provided.
        * 
* - * optional string key = 1; + * optional string run_id = 3; */ - public java.lang.String getKey() { - java.lang.Object ref = key_; + public java.lang.String getRunId() { + java.lang.Object ref = runId_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - key_ = s; + runId_ = s; } return s; } else { @@ -36254,19 +36377,19 @@ public java.lang.String getKey() { } /** *
-       * :ref:`mlflowParam` key for search.
+       * ID of the run whose artifacts to list. Must be provided.
        * 
* - * optional string key = 1; + * optional string run_id = 3; */ public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; + getRunIdBytes() { + java.lang.Object ref = runId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - key_ = b; + runId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -36274,222 +36397,256 @@ public java.lang.String getKey() { } /** *
-       * :ref:`mlflowParam` key for search.
+       * ID of the run whose artifacts to list. Must be provided.
        * 
* - * optional string key = 1; + * optional string run_id = 3; */ - public Builder setKey( + public Builder setRunId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - key_ = value; + runId_ = value; onChanged(); return this; } /** *
-       * :ref:`mlflowParam` key for search.
+       * ID of the run whose artifacts to list. Must be provided.
        * 
* - * optional string key = 1; + * optional string run_id = 3; */ - public Builder clearKey() { + public Builder clearRunId() { bitField0_ = (bitField0_ & ~0x00000001); - key_ = getDefaultInstance().getKey(); + runId_ = getDefaultInstance().getRunId(); onChanged(); return this; } /** *
-       * :ref:`mlflowParam` key for search.
+       * ID of the run whose artifacts to list. Must be provided.
        * 
* - * optional string key = 1; + * optional string run_id = 3; */ - public Builder setKeyBytes( + public Builder setRunIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - key_ = value; + runId_ = value; onChanged(); return this; } - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.StringClause, org.mlflow.api.proto.Service.StringClause.Builder, org.mlflow.api.proto.Service.StringClauseOrBuilder> stringBuilder_; + private java.lang.Object runUuid_ = ""; /** *
-       * String clause for comparison.
+       * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional .mlflow.StringClause string = 2; + * optional string run_uuid = 1; */ - public boolean hasString() { - return clauseCase_ == 2; + public boolean hasRunUuid() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * String clause for comparison.
+       * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional .mlflow.StringClause string = 2; + * optional string run_uuid = 1; */ - public org.mlflow.api.proto.Service.StringClause getString() { - if (stringBuilder_ == null) { - if (clauseCase_ == 2) { - return (org.mlflow.api.proto.Service.StringClause) clause_; + public java.lang.String getRunUuid() { + java.lang.Object ref = runUuid_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runUuid_ = s; } - return org.mlflow.api.proto.Service.StringClause.getDefaultInstance(); + return s; } else { - if (clauseCase_ == 2) { - return stringBuilder_.getMessage(); - } - return org.mlflow.api.proto.Service.StringClause.getDefaultInstance(); + return (java.lang.String) ref; } } /** *
-       * String clause for comparison.
+       * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional .mlflow.StringClause string = 2; + * optional string run_uuid = 1; */ - public Builder setString(org.mlflow.api.proto.Service.StringClause value) { - if (stringBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - clause_ = value; - onChanged(); + public com.google.protobuf.ByteString + getRunUuidBytes() { + java.lang.Object ref = runUuid_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runUuid_ = b; + return b; } else { - stringBuilder_.setMessage(value); + return (com.google.protobuf.ByteString) ref; } - clauseCase_ = 2; + } + /** + *
+       * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+       * be removed in a future MLflow version.
+       * 
+ * + * optional string run_uuid = 1; + */ + public Builder setRunUuid( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + runUuid_ = value; + onChanged(); return this; } /** *
-       * String clause for comparison.
+       * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional .mlflow.StringClause string = 2; + * optional string run_uuid = 1; */ - public Builder setString( - org.mlflow.api.proto.Service.StringClause.Builder builderForValue) { - if (stringBuilder_ == null) { - clause_ = builderForValue.build(); - onChanged(); - } else { - stringBuilder_.setMessage(builderForValue.build()); - } - clauseCase_ = 2; + public Builder clearRunUuid() { + bitField0_ = (bitField0_ & ~0x00000002); + runUuid_ = getDefaultInstance().getRunUuid(); + onChanged(); return this; } /** *
-       * String clause for comparison.
+       * [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will
+       * be removed in a future MLflow version.
        * 
* - * optional .mlflow.StringClause string = 2; + * optional string run_uuid = 1; */ - public Builder mergeString(org.mlflow.api.proto.Service.StringClause value) { - if (stringBuilder_ == null) { - if (clauseCase_ == 2 && - clause_ != org.mlflow.api.proto.Service.StringClause.getDefaultInstance()) { - clause_ = org.mlflow.api.proto.Service.StringClause.newBuilder((org.mlflow.api.proto.Service.StringClause) clause_) - .mergeFrom(value).buildPartial(); - } else { - clause_ = value; + public Builder setRunUuidBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + runUuid_ = value; + onChanged(); + return this; + } + + private java.lang.Object path_ = ""; + /** + *
+       * Filter artifacts matching this path (a relative path from the root artifact directory).
+       * 
+ * + * optional string path = 2; + */ + public boolean hasPath() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + *
+       * Filter artifacts matching this path (a relative path from the root artifact directory).
+       * 
+ * + * optional string path = 2; + */ + public java.lang.String getPath() { + java.lang.Object ref = path_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + path_ = s; } - onChanged(); + return s; } else { - if (clauseCase_ == 2) { - stringBuilder_.mergeFrom(value); - } - stringBuilder_.setMessage(value); + return (java.lang.String) ref; } - clauseCase_ = 2; - return this; } /** *
-       * String clause for comparison.
+       * Filter artifacts matching this path (a relative path from the root artifact directory).
        * 
* - * optional .mlflow.StringClause string = 2; + * optional string path = 2; */ - public Builder clearString() { - if (stringBuilder_ == null) { - if (clauseCase_ == 2) { - clauseCase_ = 0; - clause_ = null; - onChanged(); - } + public com.google.protobuf.ByteString + getPathBytes() { + java.lang.Object ref = path_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + path_ = b; + return b; } else { - if (clauseCase_ == 2) { - clauseCase_ = 0; - clause_ = null; - } - stringBuilder_.clear(); + return (com.google.protobuf.ByteString) ref; } - return this; } /** *
-       * String clause for comparison.
+       * Filter artifacts matching this path (a relative path from the root artifact directory).
        * 
* - * optional .mlflow.StringClause string = 2; + * optional string path = 2; */ - public org.mlflow.api.proto.Service.StringClause.Builder getStringBuilder() { - return getStringFieldBuilder().getBuilder(); + public Builder setPath( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + path_ = value; + onChanged(); + return this; } /** *
-       * String clause for comparison.
+       * Filter artifacts matching this path (a relative path from the root artifact directory).
        * 
* - * optional .mlflow.StringClause string = 2; + * optional string path = 2; */ - public org.mlflow.api.proto.Service.StringClauseOrBuilder getStringOrBuilder() { - if ((clauseCase_ == 2) && (stringBuilder_ != null)) { - return stringBuilder_.getMessageOrBuilder(); - } else { - if (clauseCase_ == 2) { - return (org.mlflow.api.proto.Service.StringClause) clause_; - } - return org.mlflow.api.proto.Service.StringClause.getDefaultInstance(); - } + public Builder clearPath() { + bitField0_ = (bitField0_ & ~0x00000004); + path_ = getDefaultInstance().getPath(); + onChanged(); + return this; } /** *
-       * String clause for comparison.
+       * Filter artifacts matching this path (a relative path from the root artifact directory).
        * 
* - * optional .mlflow.StringClause string = 2; + * optional string path = 2; */ - private com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.StringClause, org.mlflow.api.proto.Service.StringClause.Builder, org.mlflow.api.proto.Service.StringClauseOrBuilder> - getStringFieldBuilder() { - if (stringBuilder_ == null) { - if (!(clauseCase_ == 2)) { - clause_ = org.mlflow.api.proto.Service.StringClause.getDefaultInstance(); - } - stringBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - org.mlflow.api.proto.Service.StringClause, org.mlflow.api.proto.Service.StringClause.Builder, org.mlflow.api.proto.Service.StringClauseOrBuilder>( - (org.mlflow.api.proto.Service.StringClause) clause_, - getParentForChildren(), - isClean()); - clause_ = null; - } - clauseCase_ = 2; - onChanged();; - return stringBuilder_; + public Builder setPathBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + path_ = value; + onChanged(); + return this; } @java.lang.Override public final Builder setUnknownFields( @@ -36504,117 +36661,130 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.ParameterSearchExpression) + // @@protoc_insertion_point(builder_scope:mlflow.ListArtifacts) } - // @@protoc_insertion_point(class_scope:mlflow.ParameterSearchExpression) - private static final org.mlflow.api.proto.Service.ParameterSearchExpression DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.ListArtifacts) + private static final org.mlflow.api.proto.Service.ListArtifacts DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.ParameterSearchExpression(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.ListArtifacts(); } - public static org.mlflow.api.proto.Service.ParameterSearchExpression getDefaultInstance() { + public static org.mlflow.api.proto.Service.ListArtifacts getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Deprecated public static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override - public ParameterSearchExpression parsePartialFrom( + public ListArtifacts parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new ParameterSearchExpression(input, extensionRegistry); + return new ListArtifacts(input, extensionRegistry); } }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override - public org.mlflow.api.proto.Service.ParameterSearchExpression getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.ListArtifacts getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } - public interface StringClauseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.StringClause) + public interface FileInfoOrBuilder extends + // @@protoc_insertion_point(interface_extends:mlflow.FileInfo) com.google.protobuf.MessageOrBuilder { /** *
-     * OneOf ("==", "!=", "~")
+     * Path relative to the root artifact directory run.
      * 
* - * optional string comparator = 1; + * optional string path = 1; */ - boolean hasComparator(); + boolean hasPath(); /** *
-     * OneOf ("==", "!=", "~")
+     * Path relative to the root artifact directory run.
      * 
* - * optional string comparator = 1; + * optional string path = 1; */ - java.lang.String getComparator(); + java.lang.String getPath(); /** *
-     * OneOf ("==", "!=", "~")
+     * Path relative to the root artifact directory run.
      * 
* - * optional string comparator = 1; + * optional string path = 1; */ com.google.protobuf.ByteString - getComparatorBytes(); + getPathBytes(); /** *
-     * String value for comparison.
+     * Whether the path is a directory.
      * 
* - * optional string value = 2; + * optional bool is_dir = 2; */ - boolean hasValue(); + boolean hasIsDir(); + /** + *
+     * Whether the path is a directory.
+     * 
+ * + * optional bool is_dir = 2; + */ + boolean getIsDir(); + /** *
-     * String value for comparison.
+     * Size in bytes. Unset for directories.
      * 
* - * optional string value = 2; + * optional int64 file_size = 3; */ - java.lang.String getValue(); + boolean hasFileSize(); /** *
-     * String value for comparison.
+     * Size in bytes. Unset for directories.
      * 
* - * optional string value = 2; + * optional int64 file_size = 3; */ - com.google.protobuf.ByteString - getValueBytes(); + long getFileSize(); } /** - * Protobuf type {@code mlflow.StringClause} + *
+   * Metadata of a single artifact file or directory.
+   * 
+ * + * Protobuf type {@code mlflow.FileInfo} */ - public static final class StringClause extends + public static final class FileInfo extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.StringClause) - StringClauseOrBuilder { + // @@protoc_insertion_point(message_implements:mlflow.FileInfo) + FileInfoOrBuilder { private static final long serialVersionUID = 0L; - // Use StringClause.newBuilder() to construct. - private StringClause(com.google.protobuf.GeneratedMessageV3.Builder builder) { + // Use FileInfo.newBuilder() to construct. + private FileInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private StringClause() { - comparator_ = ""; - value_ = ""; + private FileInfo() { + path_ = ""; + isDir_ = false; + fileSize_ = 0L; } @java.lang.Override @@ -36622,7 +36792,7 @@ private StringClause() { getUnknownFields() { return this.unknownFields; } - private StringClause( + private FileInfo( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -36644,13 +36814,17 @@ private StringClause( case 10: { com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - comparator_ = bs; + path_ = bs; break; } - case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); + case 16: { bitField0_ |= 0x00000002; - value_ = bs; + isDir_ = input.readBool(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + fileSize_ = input.readInt64(); break; } default: { @@ -36674,39 +36848,39 @@ private StringClause( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_StringClause_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_StringClause_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.StringClause.class, org.mlflow.api.proto.Service.StringClause.Builder.class); + org.mlflow.api.proto.Service.FileInfo.class, org.mlflow.api.proto.Service.FileInfo.Builder.class); } private int bitField0_; - public static final int COMPARATOR_FIELD_NUMBER = 1; - private volatile java.lang.Object comparator_; + public static final int PATH_FIELD_NUMBER = 1; + private volatile java.lang.Object path_; /** *
-     * OneOf ("==", "!=", "~")
+     * Path relative to the root artifact directory run.
      * 
* - * optional string comparator = 1; + * optional string path = 1; */ - public boolean hasComparator() { + public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-     * OneOf ("==", "!=", "~")
+     * Path relative to the root artifact directory run.
      * 
* - * optional string comparator = 1; + * optional string path = 1; */ - public java.lang.String getComparator() { - java.lang.Object ref = comparator_; + public java.lang.String getPath() { + java.lang.Object ref = path_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -36714,84 +36888,76 @@ public java.lang.String getComparator() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - comparator_ = s; + path_ = s; } return s; } } /** *
-     * OneOf ("==", "!=", "~")
+     * Path relative to the root artifact directory run.
      * 
* - * optional string comparator = 1; + * optional string path = 1; */ public com.google.protobuf.ByteString - getComparatorBytes() { - java.lang.Object ref = comparator_; + getPathBytes() { + java.lang.Object ref = path_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - comparator_ = b; + path_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - public static final int VALUE_FIELD_NUMBER = 2; - private volatile java.lang.Object value_; + public static final int IS_DIR_FIELD_NUMBER = 2; + private boolean isDir_; /** *
-     * String value for comparison.
+     * Whether the path is a directory.
      * 
* - * optional string value = 2; + * optional bool is_dir = 2; */ - public boolean hasValue() { + public boolean hasIsDir() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * String value for comparison.
+     * Whether the path is a directory.
      * 
* - * optional string value = 2; + * optional bool is_dir = 2; */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } - return s; - } + public boolean getIsDir() { + return isDir_; } + + public static final int FILE_SIZE_FIELD_NUMBER = 3; + private long fileSize_; /** *
-     * String value for comparison.
+     * Size in bytes. Unset for directories.
      * 
* - * optional string value = 2; + * optional int64 file_size = 3; */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public boolean hasFileSize() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + *
+     * Size in bytes. Unset for directories.
+     * 
+ * + * optional int64 file_size = 3; + */ + public long getFileSize() { + return fileSize_; } private byte memoizedIsInitialized = -1; @@ -36809,10 +36975,13 @@ public final boolean isInitialized() { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, comparator_); + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, path_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, value_); + output.writeBool(2, isDir_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, fileSize_); } unknownFields.writeTo(output); } @@ -36824,10 +36993,15 @@ public int getSerializedSize() { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, comparator_); + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, path_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, value_); + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, isDir_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, fileSize_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -36839,21 +37013,26 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.mlflow.api.proto.Service.StringClause)) { + if (!(obj instanceof org.mlflow.api.proto.Service.FileInfo)) { return super.equals(obj); } - org.mlflow.api.proto.Service.StringClause other = (org.mlflow.api.proto.Service.StringClause) obj; + org.mlflow.api.proto.Service.FileInfo other = (org.mlflow.api.proto.Service.FileInfo) obj; boolean result = true; - result = result && (hasComparator() == other.hasComparator()); - if (hasComparator()) { - result = result && getComparator() - .equals(other.getComparator()); + result = result && (hasPath() == other.hasPath()); + if (hasPath()) { + result = result && getPath() + .equals(other.getPath()); } - result = result && (hasValue() == other.hasValue()); - if (hasValue()) { - result = result && getValue() - .equals(other.getValue()); + result = result && (hasIsDir() == other.hasIsDir()); + if (hasIsDir()) { + result = result && (getIsDir() + == other.getIsDir()); + } + result = result && (hasFileSize() == other.hasFileSize()); + if (hasFileSize()) { + result = result && (getFileSize() + == other.getFileSize()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -36866,82 +37045,88 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (hasComparator()) { - hash = (37 * hash) + COMPARATOR_FIELD_NUMBER; - hash = (53 * hash) + getComparator().hashCode(); + if (hasPath()) { + hash = (37 * hash) + PATH_FIELD_NUMBER; + hash = (53 * hash) + getPath().hashCode(); } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + getValue().hashCode(); + if (hasIsDir()) { + hash = (37 * hash) + IS_DIR_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getIsDir()); + } + if (hasFileSize()) { + hash = (37 * hash) + FILE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getFileSize()); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.mlflow.api.proto.Service.StringClause parseFrom( + public static org.mlflow.api.proto.Service.FileInfo parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.StringClause parseFrom( + public static org.mlflow.api.proto.Service.FileInfo parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.StringClause parseFrom( + public static org.mlflow.api.proto.Service.FileInfo parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.StringClause parseFrom( + public static org.mlflow.api.proto.Service.FileInfo parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.StringClause parseFrom(byte[] data) + public static org.mlflow.api.proto.Service.FileInfo parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.StringClause parseFrom( + public static org.mlflow.api.proto.Service.FileInfo parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.StringClause parseFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.FileInfo parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.StringClause parseFrom( + public static org.mlflow.api.proto.Service.FileInfo parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.StringClause parseDelimitedFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.FileInfo parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.StringClause parseDelimitedFrom( + public static org.mlflow.api.proto.Service.FileInfo parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.StringClause parseFrom( + public static org.mlflow.api.proto.Service.FileInfo parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.StringClause parseFrom( + public static org.mlflow.api.proto.Service.FileInfo parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -36954,7 +37139,7 @@ public static org.mlflow.api.proto.Service.StringClause parseFrom( public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.mlflow.api.proto.Service.StringClause prototype) { + public static Builder newBuilder(org.mlflow.api.proto.Service.FileInfo prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -36970,26 +37155,30 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code mlflow.StringClause} + *
+     * Metadata of a single artifact file or directory.
+     * 
+ * + * Protobuf type {@code mlflow.FileInfo} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.StringClause) - org.mlflow.api.proto.Service.StringClauseOrBuilder { + // @@protoc_insertion_point(builder_implements:mlflow.FileInfo) + org.mlflow.api.proto.Service.FileInfoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_StringClause_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_StringClause_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.StringClause.class, org.mlflow.api.proto.Service.StringClause.Builder.class); + org.mlflow.api.proto.Service.FileInfo.class, org.mlflow.api.proto.Service.FileInfo.Builder.class); } - // Construct using org.mlflow.api.proto.Service.StringClause.newBuilder() + // Construct using org.mlflow.api.proto.Service.FileInfo.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -37007,27 +37196,29 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - comparator_ = ""; + path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - value_ = ""; + isDir_ = false; bitField0_ = (bitField0_ & ~0x00000002); + fileSize_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_StringClause_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_descriptor; } @java.lang.Override - public org.mlflow.api.proto.Service.StringClause getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.StringClause.getDefaultInstance(); + public org.mlflow.api.proto.Service.FileInfo getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.FileInfo.getDefaultInstance(); } @java.lang.Override - public org.mlflow.api.proto.Service.StringClause build() { - org.mlflow.api.proto.Service.StringClause result = buildPartial(); + public org.mlflow.api.proto.Service.FileInfo build() { + org.mlflow.api.proto.Service.FileInfo result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -37035,18 +37226,22 @@ public org.mlflow.api.proto.Service.StringClause build() { } @java.lang.Override - public org.mlflow.api.proto.Service.StringClause buildPartial() { - org.mlflow.api.proto.Service.StringClause result = new org.mlflow.api.proto.Service.StringClause(this); + public org.mlflow.api.proto.Service.FileInfo buildPartial() { + org.mlflow.api.proto.Service.FileInfo result = new org.mlflow.api.proto.Service.FileInfo(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.comparator_ = comparator_; + result.path_ = path_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.value_ = value_; + result.isDir_ = isDir_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.fileSize_ = fileSize_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -37086,25 +37281,26 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.StringClause) { - return mergeFrom((org.mlflow.api.proto.Service.StringClause)other); + if (other instanceof org.mlflow.api.proto.Service.FileInfo) { + return mergeFrom((org.mlflow.api.proto.Service.FileInfo)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.StringClause other) { - if (other == org.mlflow.api.proto.Service.StringClause.getDefaultInstance()) return this; - if (other.hasComparator()) { + public Builder mergeFrom(org.mlflow.api.proto.Service.FileInfo other) { + if (other == org.mlflow.api.proto.Service.FileInfo.getDefaultInstance()) return this; + if (other.hasPath()) { bitField0_ |= 0x00000001; - comparator_ = other.comparator_; + path_ = other.path_; onChanged(); } - if (other.hasValue()) { - bitField0_ |= 0x00000002; - value_ = other.value_; - onChanged(); + if (other.hasIsDir()) { + setIsDir(other.getIsDir()); + } + if (other.hasFileSize()) { + setFileSize(other.getFileSize()); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -37121,11 +37317,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.StringClause parsedMessage = null; + org.mlflow.api.proto.Service.FileInfo parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.StringClause) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.FileInfo) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -37136,32 +37332,32 @@ public Builder mergeFrom( } private int bitField0_; - private java.lang.Object comparator_ = ""; + private java.lang.Object path_ = ""; /** *
-       * OneOf ("==", "!=", "~")
+       * Path relative to the root artifact directory run.
        * 
* - * optional string comparator = 1; + * optional string path = 1; */ - public boolean hasComparator() { + public boolean hasPath() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** *
-       * OneOf ("==", "!=", "~")
+       * Path relative to the root artifact directory run.
        * 
* - * optional string comparator = 1; + * optional string path = 1; */ - public java.lang.String getComparator() { - java.lang.Object ref = comparator_; + public java.lang.String getPath() { + java.lang.Object ref = path_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - comparator_ = s; + path_ = s; } return s; } else { @@ -37170,19 +37366,19 @@ public java.lang.String getComparator() { } /** *
-       * OneOf ("==", "!=", "~")
+       * Path relative to the root artifact directory run.
        * 
* - * optional string comparator = 1; + * optional string path = 1; */ public com.google.protobuf.ByteString - getComparatorBytes() { - java.lang.Object ref = comparator_; + getPathBytes() { + java.lang.Object ref = path_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - comparator_ = b; + path_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -37190,148 +37386,144 @@ public java.lang.String getComparator() { } /** *
-       * OneOf ("==", "!=", "~")
+       * Path relative to the root artifact directory run.
        * 
* - * optional string comparator = 1; + * optional string path = 1; */ - public Builder setComparator( + public Builder setPath( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - comparator_ = value; + path_ = value; onChanged(); return this; } /** *
-       * OneOf ("==", "!=", "~")
+       * Path relative to the root artifact directory run.
        * 
* - * optional string comparator = 1; + * optional string path = 1; */ - public Builder clearComparator() { + public Builder clearPath() { bitField0_ = (bitField0_ & ~0x00000001); - comparator_ = getDefaultInstance().getComparator(); + path_ = getDefaultInstance().getPath(); onChanged(); return this; } /** *
-       * OneOf ("==", "!=", "~")
+       * Path relative to the root artifact directory run.
        * 
* - * optional string comparator = 1; + * optional string path = 1; */ - public Builder setComparatorBytes( + public Builder setPathBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - comparator_ = value; + path_ = value; onChanged(); return this; } - private java.lang.Object value_ = ""; + private boolean isDir_ ; /** *
-       * String value for comparison.
+       * Whether the path is a directory.
        * 
* - * optional string value = 2; + * optional bool is_dir = 2; */ - public boolean hasValue() { + public boolean hasIsDir() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * String value for comparison.
+       * Whether the path is a directory.
        * 
* - * optional string value = 2; + * optional bool is_dir = 2; */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } + public boolean getIsDir() { + return isDir_; } /** *
-       * String value for comparison.
+       * Whether the path is a directory.
        * 
* - * optional string value = 2; + * optional bool is_dir = 2; */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public Builder setIsDir(boolean value) { + bitField0_ |= 0x00000002; + isDir_ = value; + onChanged(); + return this; } /** *
-       * String value for comparison.
+       * Whether the path is a directory.
        * 
* - * optional string value = 2; + * optional bool is_dir = 2; */ - public Builder setValue( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; + public Builder clearIsDir() { + bitField0_ = (bitField0_ & ~0x00000002); + isDir_ = false; onChanged(); return this; } + + private long fileSize_ ; /** *
-       * String value for comparison.
+       * Size in bytes. Unset for directories.
        * 
* - * optional string value = 2; + * optional int64 file_size = 3; */ - public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); + public boolean hasFileSize() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + *
+       * Size in bytes. Unset for directories.
+       * 
+ * + * optional int64 file_size = 3; + */ + public long getFileSize() { + return fileSize_; + } + /** + *
+       * Size in bytes. Unset for directories.
+       * 
+ * + * optional int64 file_size = 3; + */ + public Builder setFileSize(long value) { + bitField0_ |= 0x00000004; + fileSize_ = value; onChanged(); return this; } /** *
-       * String value for comparison.
+       * Size in bytes. Unset for directories.
        * 
* - * optional string value = 2; + * optional int64 file_size = 3; */ - public Builder setValueBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; + public Builder clearFileSize() { + bitField0_ = (bitField0_ & ~0x00000004); + fileSize_ = 0L; onChanged(); return this; } @@ -37348,108 +37540,147 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.StringClause) + // @@protoc_insertion_point(builder_scope:mlflow.FileInfo) } - // @@protoc_insertion_point(class_scope:mlflow.StringClause) - private static final org.mlflow.api.proto.Service.StringClause DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.FileInfo) + private static final org.mlflow.api.proto.Service.FileInfo DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.StringClause(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.FileInfo(); } - public static org.mlflow.api.proto.Service.StringClause getDefaultInstance() { + public static org.mlflow.api.proto.Service.FileInfo getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Deprecated public static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override - public StringClause parsePartialFrom( + public FileInfo parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new StringClause(input, extensionRegistry); + return new FileInfo(input, extensionRegistry); } }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override - public org.mlflow.api.proto.Service.StringClause getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.FileInfo getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } - public interface FloatClauseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.FloatClause) + public interface GetMetricHistoryOrBuilder extends + // @@protoc_insertion_point(interface_extends:mlflow.GetMetricHistory) com.google.protobuf.MessageOrBuilder { /** *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
+     * ID of the run from which to fetch metric values. Must be provided.
      * 
* - * optional string comparator = 1; + * optional string run_id = 3; */ - boolean hasComparator(); + boolean hasRunId(); /** *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
+     * ID of the run from which to fetch metric values. Must be provided.
      * 
* - * optional string comparator = 1; + * optional string run_id = 3; */ - java.lang.String getComparator(); + java.lang.String getRunId(); /** *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
+     * ID of the run from which to fetch metric values. Must be provided.
      * 
* - * optional string comparator = 1; + * optional string run_id = 3; */ com.google.protobuf.ByteString - getComparatorBytes(); + getRunIdBytes(); /** *
-     * Float value for comparison.
+     * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+     * will be removed in a future MLflow version.
      * 
* - * optional float value = 2; + * optional string run_uuid = 1; */ - boolean hasValue(); + boolean hasRunUuid(); + /** + *
+     * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+     * will be removed in a future MLflow version.
+     * 
+ * + * optional string run_uuid = 1; + */ + java.lang.String getRunUuid(); /** *
-     * Float value for comparison.
+     * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+     * will be removed in a future MLflow version.
      * 
* - * optional float value = 2; + * optional string run_uuid = 1; + */ + com.google.protobuf.ByteString + getRunUuidBytes(); + + /** + *
+     * Name of the metric.
+     * 
+ * + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; + */ + boolean hasMetricKey(); + /** + *
+     * Name of the metric.
+     * 
+ * + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; + */ + java.lang.String getMetricKey(); + /** + *
+     * Name of the metric.
+     * 
+ * + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ - float getValue(); + com.google.protobuf.ByteString + getMetricKeyBytes(); } /** - * Protobuf type {@code mlflow.FloatClause} + * Protobuf type {@code mlflow.GetMetricHistory} */ - public static final class FloatClause extends + public static final class GetMetricHistory extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.FloatClause) - FloatClauseOrBuilder { + // @@protoc_insertion_point(message_implements:mlflow.GetMetricHistory) + GetMetricHistoryOrBuilder { private static final long serialVersionUID = 0L; - // Use FloatClause.newBuilder() to construct. - private FloatClause(com.google.protobuf.GeneratedMessageV3.Builder builder) { + // Use GetMetricHistory.newBuilder() to construct. + private GetMetricHistory(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private FloatClause() { - comparator_ = ""; - value_ = 0F; + private GetMetricHistory() { + runId_ = ""; + runUuid_ = ""; + metricKey_ = ""; } @java.lang.Override @@ -37457,7 +37688,7 @@ private FloatClause() { getUnknownFields() { return this.unknownFields; } - private FloatClause( + private GetMetricHistory( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -37478,13 +37709,20 @@ private FloatClause( break; case 10: { com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - comparator_ = bs; + bitField0_ |= 0x00000002; + runUuid_ = bs; break; } - case 21: { - bitField0_ |= 0x00000002; - value_ = input.readFloat(); + case 18: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000004; + metricKey_ = bs; + break; + } + case 26: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + runId_ = bs; break; } default: { @@ -37508,6766 +37746,446 @@ private FloatClause( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FloatClause_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FloatClause_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.FloatClause.class, org.mlflow.api.proto.Service.FloatClause.Builder.class); + org.mlflow.api.proto.Service.GetMetricHistory.class, org.mlflow.api.proto.Service.GetMetricHistory.Builder.class); } - private int bitField0_; - public static final int COMPARATOR_FIELD_NUMBER = 1; - private volatile java.lang.Object comparator_; - /** - *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
-     * 
- * - * optional string comparator = 1; - */ - public boolean hasComparator() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public interface ResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:mlflow.GetMetricHistory.Response) + com.google.protobuf.MessageOrBuilder { + + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + java.util.List + getMetricsList(); + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + org.mlflow.api.proto.Service.Metric getMetrics(int index); + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + int getMetricsCount(); + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + java.util.List + getMetricsOrBuilderList(); + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + org.mlflow.api.proto.Service.MetricOrBuilder getMetricsOrBuilder( + int index); } /** - *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
-     * 
- * - * optional string comparator = 1; + * Protobuf type {@code mlflow.GetMetricHistory.Response} */ - public java.lang.String getComparator() { - java.lang.Object ref = comparator_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - comparator_ = s; - } - return s; + public static final class Response extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:mlflow.GetMetricHistory.Response) + ResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use Response.newBuilder() to construct. + private Response(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); } - } - /** - *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
-     * 
- * - * optional string comparator = 1; - */ - public com.google.protobuf.ByteString - getComparatorBytes() { - java.lang.Object ref = comparator_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - comparator_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + private Response() { + metrics_ = java.util.Collections.emptyList(); } - } - - public static final int VALUE_FIELD_NUMBER = 2; - private float value_; - /** - *
-     * Float value for comparison.
-     * 
- * - * optional float value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-     * Float value for comparison.
-     * 
- * - * optional float value = 2; - */ - public float getValue() { - return value_; - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, comparator_); + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeFloat(2, value_); + private Response( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + metrics_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + metrics_.add( + input.readMessage(org.mlflow.api.proto.Service.Metric.PARSER, extensionRegistry)); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + metrics_ = java.util.Collections.unmodifiableList(metrics_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_descriptor; } - unknownFields.writeTo(output); - } - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.mlflow.api.proto.Service.GetMetricHistory.Response.class, org.mlflow.api.proto.Service.GetMetricHistory.Response.Builder.class); + } - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, comparator_); + public static final int METRICS_FIELD_NUMBER = 1; + private java.util.List metrics_; + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public java.util.List getMetricsList() { + return metrics_; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeFloatSize(2, value_); + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public java.util.List + getMetricsOrBuilderList() { + return metrics_; } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public int getMetricsCount() { + return metrics_.size(); } - if (!(obj instanceof org.mlflow.api.proto.Service.FloatClause)) { - return super.equals(obj); + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public org.mlflow.api.proto.Service.Metric getMetrics(int index) { + return metrics_.get(index); } - org.mlflow.api.proto.Service.FloatClause other = (org.mlflow.api.proto.Service.FloatClause) obj; - - boolean result = true; - result = result && (hasComparator() == other.hasComparator()); - if (hasComparator()) { - result = result && getComparator() - .equals(other.getComparator()); + /** + *
+       * All logged values for this metric.
+       * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public org.mlflow.api.proto.Service.MetricOrBuilder getMetricsOrBuilder( + int index) { + return metrics_.get(index); } - result = result && (hasValue() == other.hasValue()); - if (hasValue()) { - result = result && ( - java.lang.Float.floatToIntBits(getValue()) - == java.lang.Float.floatToIntBits( - other.getValue())); + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < metrics_.size(); i++) { + output.writeMessage(1, metrics_.get(i)); + } + unknownFields.writeTo(output); } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasComparator()) { - hash = (37 * hash) + COMPARATOR_FIELD_NUMBER; - hash = (53 * hash) + getComparator().hashCode(); + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < metrics_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, metrics_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + java.lang.Float.floatToIntBits( - getValue()); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.FloatClause parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.FloatClause parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FloatClause parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.FloatClause parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FloatClause parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.FloatClause parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FloatClause parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.FloatClause parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FloatClause parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.FloatClause parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FloatClause parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.FloatClause parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.FloatClause prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code mlflow.FloatClause} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.FloatClause) - org.mlflow.api.proto.Service.FloatClauseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FloatClause_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FloatClause_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.FloatClause.class, org.mlflow.api.proto.Service.FloatClause.Builder.class); - } - - // Construct using org.mlflow.api.proto.Service.FloatClause.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - comparator_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - value_ = 0F; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FloatClause_descriptor; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.FloatClause getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.FloatClause.getDefaultInstance(); - } - - @java.lang.Override - public org.mlflow.api.proto.Service.FloatClause build() { - org.mlflow.api.proto.Service.FloatClause result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.mlflow.api.proto.Service.GetMetricHistory.Response)) { + return super.equals(obj); + } + org.mlflow.api.proto.Service.GetMetricHistory.Response other = (org.mlflow.api.proto.Service.GetMetricHistory.Response) obj; + + boolean result = true; + result = result && getMetricsList() + .equals(other.getMetricsList()); + result = result && unknownFields.equals(other.unknownFields); + return result; } @java.lang.Override - public org.mlflow.api.proto.Service.FloatClause buildPartial() { - org.mlflow.api.proto.Service.FloatClause result = new org.mlflow.api.proto.Service.FloatClause(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; } - result.comparator_ = comparator_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getMetricsCount() > 0) { + hash = (37 * hash) + METRICS_FIELD_NUMBER; + hash = (53 * hash) + getMetricsList().hashCode(); } - result.value_ = value_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; } - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.FloatClause) { - return mergeFrom((org.mlflow.api.proto.Service.FloatClause)other); - } else { - super.mergeFrom(other); - return this; - } + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - - public Builder mergeFrom(org.mlflow.api.proto.Service.FloatClause other) { - if (other == org.mlflow.api.proto.Service.FloatClause.getDefaultInstance()) return this; - if (other.hasComparator()) { - bitField0_ |= 0x00000001; - comparator_ = other.comparator_; - onChanged(); - } - if (other.hasValue()) { - setValue(other.getValue()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - - @java.lang.Override - public final boolean isInitialized() { - return true; + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.FloatClause parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.FloatClause) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.lang.Object comparator_ = ""; - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public boolean hasComparator() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public java.lang.String getComparator() { - java.lang.Object ref = comparator_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - comparator_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public com.google.protobuf.ByteString - getComparatorBytes() { - java.lang.Object ref = comparator_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - comparator_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public Builder setComparator( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - comparator_ = value; - onChanged(); - return this; + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public Builder clearComparator() { - bitField0_ = (bitField0_ & ~0x00000001); - comparator_ = getDefaultInstance().getComparator(); - onChanged(); - return this; + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public Builder setComparatorBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - comparator_ = value; - onChanged(); - return this; + public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); } - private float value_ ; - /** - *
-       * Float value for comparison.
-       * 
- * - * optional float value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-       * Float value for comparison.
-       * 
- * - * optional float value = 2; - */ - public float getValue() { - return value_; - } - /** - *
-       * Float value for comparison.
-       * 
- * - * optional float value = 2; - */ - public Builder setValue(float value) { - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); } - /** - *
-       * Float value for comparison.
-       * 
- * - * optional float value = 2; - */ - public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000002); - value_ = 0F; - onChanged(); - return this; + public static Builder newBuilder(org.mlflow.api.proto.Service.GetMetricHistory.Response prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; } + /** + * Protobuf type {@code mlflow.GetMetricHistory.Response} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:mlflow.GetMetricHistory.Response) + org.mlflow.api.proto.Service.GetMetricHistory.ResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_descriptor; + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.mlflow.api.proto.Service.GetMetricHistory.Response.class, org.mlflow.api.proto.Service.GetMetricHistory.Response.Builder.class); + } - // @@protoc_insertion_point(builder_scope:mlflow.FloatClause) - } - - // @@protoc_insertion_point(class_scope:mlflow.FloatClause) - private static final org.mlflow.api.proto.Service.FloatClause DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.FloatClause(); - } - - public static org.mlflow.api.proto.Service.FloatClause getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public FloatClause parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new FloatClause(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } + // Construct using org.mlflow.api.proto.Service.GetMetricHistory.Response.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getMetricsFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (metricsBuilder_ == null) { + metrics_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + metricsBuilder_.clear(); + } + return this; + } - @java.lang.Override - public org.mlflow.api.proto.Service.FloatClause getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_descriptor; + } - } + @java.lang.Override + public org.mlflow.api.proto.Service.GetMetricHistory.Response getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.GetMetricHistory.Response.getDefaultInstance(); + } - public interface DoubleClauseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.DoubleClause) - com.google.protobuf.MessageOrBuilder { + @java.lang.Override + public org.mlflow.api.proto.Service.GetMetricHistory.Response build() { + org.mlflow.api.proto.Service.GetMetricHistory.Response result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } - /** - *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
-     * 
- * - * optional string comparator = 1; - */ - boolean hasComparator(); - /** - *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
-     * 
- * - * optional string comparator = 1; - */ - java.lang.String getComparator(); - /** - *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
-     * 
- * - * optional string comparator = 1; - */ - com.google.protobuf.ByteString - getComparatorBytes(); - - /** - *
-     * Float value for comparison.
-     * 
- * - * optional double value = 2; - */ - boolean hasValue(); - /** - *
-     * Float value for comparison.
-     * 
- * - * optional double value = 2; - */ - double getValue(); - } - /** - * Protobuf type {@code mlflow.DoubleClause} - */ - public static final class DoubleClause extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.DoubleClause) - DoubleClauseOrBuilder { - private static final long serialVersionUID = 0L; - // Use DoubleClause.newBuilder() to construct. - private DoubleClause(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private DoubleClause() { - comparator_ = ""; - value_ = 0D; - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private DoubleClause( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - comparator_ = bs; - break; - } - case 17: { - bitField0_ |= 0x00000002; - value_ = input.readDouble(); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_DoubleClause_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_DoubleClause_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.DoubleClause.class, org.mlflow.api.proto.Service.DoubleClause.Builder.class); - } - - private int bitField0_; - public static final int COMPARATOR_FIELD_NUMBER = 1; - private volatile java.lang.Object comparator_; - /** - *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
-     * 
- * - * optional string comparator = 1; - */ - public boolean hasComparator() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
-     * 
- * - * optional string comparator = 1; - */ - public java.lang.String getComparator() { - java.lang.Object ref = comparator_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - comparator_ = s; - } - return s; - } - } - /** - *
-     * OneOf (">", ">=", "==", "!=", "<=", "<")
-     * 
- * - * optional string comparator = 1; - */ - public com.google.protobuf.ByteString - getComparatorBytes() { - java.lang.Object ref = comparator_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - comparator_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int VALUE_FIELD_NUMBER = 2; - private double value_; - /** - *
-     * Float value for comparison.
-     * 
- * - * optional double value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-     * Float value for comparison.
-     * 
- * - * optional double value = 2; - */ - public double getValue() { - return value_; - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, comparator_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeDouble(2, value_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, comparator_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeDoubleSize(2, value_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.mlflow.api.proto.Service.DoubleClause)) { - return super.equals(obj); - } - org.mlflow.api.proto.Service.DoubleClause other = (org.mlflow.api.proto.Service.DoubleClause) obj; - - boolean result = true; - result = result && (hasComparator() == other.hasComparator()); - if (hasComparator()) { - result = result && getComparator() - .equals(other.getComparator()); - } - result = result && (hasValue() == other.hasValue()); - if (hasValue()) { - result = result && ( - java.lang.Double.doubleToLongBits(getValue()) - == java.lang.Double.doubleToLongBits( - other.getValue())); - } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasComparator()) { - hash = (37 * hash) + COMPARATOR_FIELD_NUMBER; - hash = (53 * hash) + getComparator().hashCode(); - } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - java.lang.Double.doubleToLongBits(getValue())); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.DoubleClause parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.DoubleClause parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.DoubleClause parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.DoubleClause parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.DoubleClause parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.DoubleClause parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.DoubleClause parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.DoubleClause parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.DoubleClause parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.DoubleClause parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.DoubleClause parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.DoubleClause parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.DoubleClause prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code mlflow.DoubleClause} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.DoubleClause) - org.mlflow.api.proto.Service.DoubleClauseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_DoubleClause_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_DoubleClause_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.DoubleClause.class, org.mlflow.api.proto.Service.DoubleClause.Builder.class); - } - - // Construct using org.mlflow.api.proto.Service.DoubleClause.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - comparator_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - value_ = 0D; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_DoubleClause_descriptor; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.DoubleClause getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.DoubleClause.getDefaultInstance(); - } - - @java.lang.Override - public org.mlflow.api.proto.Service.DoubleClause build() { - org.mlflow.api.proto.Service.DoubleClause result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.DoubleClause buildPartial() { - org.mlflow.api.proto.Service.DoubleClause result = new org.mlflow.api.proto.Service.DoubleClause(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.comparator_ = comparator_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.value_ = value_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.DoubleClause) { - return mergeFrom((org.mlflow.api.proto.Service.DoubleClause)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.mlflow.api.proto.Service.DoubleClause other) { - if (other == org.mlflow.api.proto.Service.DoubleClause.getDefaultInstance()) return this; - if (other.hasComparator()) { - bitField0_ |= 0x00000001; - comparator_ = other.comparator_; - onChanged(); - } - if (other.hasValue()) { - setValue(other.getValue()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.DoubleClause parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.DoubleClause) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.lang.Object comparator_ = ""; - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public boolean hasComparator() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public java.lang.String getComparator() { - java.lang.Object ref = comparator_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - comparator_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public com.google.protobuf.ByteString - getComparatorBytes() { - java.lang.Object ref = comparator_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - comparator_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public Builder setComparator( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - comparator_ = value; - onChanged(); - return this; - } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public Builder clearComparator() { - bitField0_ = (bitField0_ & ~0x00000001); - comparator_ = getDefaultInstance().getComparator(); - onChanged(); - return this; - } - /** - *
-       * OneOf (">", ">=", "==", "!=", "<=", "<")
-       * 
- * - * optional string comparator = 1; - */ - public Builder setComparatorBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - comparator_ = value; - onChanged(); - return this; - } - - private double value_ ; - /** - *
-       * Float value for comparison.
-       * 
- * - * optional double value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-       * Float value for comparison.
-       * 
- * - * optional double value = 2; - */ - public double getValue() { - return value_; - } - /** - *
-       * Float value for comparison.
-       * 
- * - * optional double value = 2; - */ - public Builder setValue(double value) { - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - /** - *
-       * Float value for comparison.
-       * 
- * - * optional double value = 2; - */ - public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000002); - value_ = 0D; - onChanged(); - return this; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:mlflow.DoubleClause) - } - - // @@protoc_insertion_point(class_scope:mlflow.DoubleClause) - private static final org.mlflow.api.proto.Service.DoubleClause DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.DoubleClause(); - } - - public static org.mlflow.api.proto.Service.DoubleClause getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public DoubleClause parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new DoubleClause(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.DoubleClause getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public interface SearchRunsOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.SearchRuns) - com.google.protobuf.MessageOrBuilder { - - /** - *
-     * List of experiment IDs to search over.
-     * 
- * - * repeated int64 experiment_ids = 1; - */ - java.util.List getExperimentIdsList(); - /** - *
-     * List of experiment IDs to search over.
-     * 
- * - * repeated int64 experiment_ids = 1; - */ - int getExperimentIdsCount(); - /** - *
-     * List of experiment IDs to search over.
-     * 
- * - * repeated int64 experiment_ids = 1; - */ - long getExperimentIds(int index); - - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - java.util.List - getAndedExpressionsList(); - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - org.mlflow.api.proto.Service.SearchExpression getAndedExpressions(int index); - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - int getAndedExpressionsCount(); - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - java.util.List - getAndedExpressionsOrBuilderList(); - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - org.mlflow.api.proto.Service.SearchExpressionOrBuilder getAndedExpressionsOrBuilder( - int index); - - /** - * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; - */ - boolean hasRunViewType(); - /** - * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; - */ - org.mlflow.api.proto.Service.ViewType getRunViewType(); - } - /** - * Protobuf type {@code mlflow.SearchRuns} - */ - public static final class SearchRuns extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.SearchRuns) - SearchRunsOrBuilder { - private static final long serialVersionUID = 0L; - // Use SearchRuns.newBuilder() to construct. - private SearchRuns(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private SearchRuns() { - experimentIds_ = java.util.Collections.emptyList(); - andedExpressions_ = java.util.Collections.emptyList(); - runViewType_ = 1; - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SearchRuns( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 8: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - experimentIds_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - experimentIds_.add(input.readInt64()); - break; - } - case 10: { - int length = input.readRawVarint32(); - int limit = input.pushLimit(length); - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { - experimentIds_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - while (input.getBytesUntilLimit() > 0) { - experimentIds_.add(input.readInt64()); - } - input.popLimit(limit); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - andedExpressions_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - andedExpressions_.add( - input.readMessage(org.mlflow.api.proto.Service.SearchExpression.PARSER, extensionRegistry)); - break; - } - case 24: { - int rawValue = input.readEnum(); - @SuppressWarnings("deprecation") - org.mlflow.api.proto.Service.ViewType value = org.mlflow.api.proto.Service.ViewType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(3, rawValue); - } else { - bitField0_ |= 0x00000001; - runViewType_ = rawValue; - } - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - experimentIds_ = java.util.Collections.unmodifiableList(experimentIds_); - } - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - andedExpressions_ = java.util.Collections.unmodifiableList(andedExpressions_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.SearchRuns.class, org.mlflow.api.proto.Service.SearchRuns.Builder.class); - } - - public interface ResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.SearchRuns.Response) - com.google.protobuf.MessageOrBuilder { - - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - java.util.List - getRunsList(); - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - org.mlflow.api.proto.Service.Run getRuns(int index); - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - int getRunsCount(); - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - java.util.List - getRunsOrBuilderList(); - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - org.mlflow.api.proto.Service.RunOrBuilder getRunsOrBuilder( - int index); - } - /** - * Protobuf type {@code mlflow.SearchRuns.Response} - */ - public static final class Response extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.SearchRuns.Response) - ResponseOrBuilder { - private static final long serialVersionUID = 0L; - // Use Response.newBuilder() to construct. - private Response(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private Response() { - runs_ = java.util.Collections.emptyList(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Response( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - runs_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - runs_.add( - input.readMessage(org.mlflow.api.proto.Service.Run.PARSER, extensionRegistry)); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - runs_ = java.util.Collections.unmodifiableList(runs_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.SearchRuns.Response.class, org.mlflow.api.proto.Service.SearchRuns.Response.Builder.class); - } - - public static final int RUNS_FIELD_NUMBER = 1; - private java.util.List runs_; - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - public java.util.List getRunsList() { - return runs_; - } - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - public java.util.List - getRunsOrBuilderList() { - return runs_; - } - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - public int getRunsCount() { - return runs_.size(); - } - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - public org.mlflow.api.proto.Service.Run getRuns(int index) { - return runs_.get(index); - } - /** - *
-       * Runs that match the search criteria.
-       * 
- * - * repeated .mlflow.Run runs = 1; - */ - public org.mlflow.api.proto.Service.RunOrBuilder getRunsOrBuilder( - int index) { - return runs_.get(index); - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - for (int i = 0; i < runs_.size(); i++) { - output.writeMessage(1, runs_.get(i)); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < runs_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, runs_.get(i)); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.mlflow.api.proto.Service.SearchRuns.Response)) { - return super.equals(obj); - } - org.mlflow.api.proto.Service.SearchRuns.Response other = (org.mlflow.api.proto.Service.SearchRuns.Response) obj; - - boolean result = true; - result = result && getRunsList() - .equals(other.getRunsList()); - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (getRunsCount() > 0) { - hash = (37 * hash) + RUNS_FIELD_NUMBER; - hash = (53 * hash) + getRunsList().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.SearchRuns.Response parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.SearchRuns.Response prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code mlflow.SearchRuns.Response} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.SearchRuns.Response) - org.mlflow.api.proto.Service.SearchRuns.ResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.SearchRuns.Response.class, org.mlflow.api.proto.Service.SearchRuns.Response.Builder.class); - } - - // Construct using org.mlflow.api.proto.Service.SearchRuns.Response.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - getRunsFieldBuilder(); - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - if (runsBuilder_ == null) { - runs_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - runsBuilder_.clear(); - } - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_Response_descriptor; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchRuns.Response getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.SearchRuns.Response.getDefaultInstance(); - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchRuns.Response build() { - org.mlflow.api.proto.Service.SearchRuns.Response result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchRuns.Response buildPartial() { - org.mlflow.api.proto.Service.SearchRuns.Response result = new org.mlflow.api.proto.Service.SearchRuns.Response(this); - int from_bitField0_ = bitField0_; - if (runsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - runs_ = java.util.Collections.unmodifiableList(runs_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.runs_ = runs_; - } else { - result.runs_ = runsBuilder_.build(); - } - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.SearchRuns.Response) { - return mergeFrom((org.mlflow.api.proto.Service.SearchRuns.Response)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.mlflow.api.proto.Service.SearchRuns.Response other) { - if (other == org.mlflow.api.proto.Service.SearchRuns.Response.getDefaultInstance()) return this; - if (runsBuilder_ == null) { - if (!other.runs_.isEmpty()) { - if (runs_.isEmpty()) { - runs_ = other.runs_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureRunsIsMutable(); - runs_.addAll(other.runs_); - } - onChanged(); - } - } else { - if (!other.runs_.isEmpty()) { - if (runsBuilder_.isEmpty()) { - runsBuilder_.dispose(); - runsBuilder_ = null; - runs_ = other.runs_; - bitField0_ = (bitField0_ & ~0x00000001); - runsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getRunsFieldBuilder() : null; - } else { - runsBuilder_.addAllMessages(other.runs_); - } - } - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.SearchRuns.Response parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.SearchRuns.Response) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.util.List runs_ = - java.util.Collections.emptyList(); - private void ensureRunsIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - runs_ = new java.util.ArrayList(runs_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder> runsBuilder_; - - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public java.util.List getRunsList() { - if (runsBuilder_ == null) { - return java.util.Collections.unmodifiableList(runs_); - } else { - return runsBuilder_.getMessageList(); - } - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public int getRunsCount() { - if (runsBuilder_ == null) { - return runs_.size(); - } else { - return runsBuilder_.getCount(); - } - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public org.mlflow.api.proto.Service.Run getRuns(int index) { - if (runsBuilder_ == null) { - return runs_.get(index); - } else { - return runsBuilder_.getMessage(index); - } - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public Builder setRuns( - int index, org.mlflow.api.proto.Service.Run value) { - if (runsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRunsIsMutable(); - runs_.set(index, value); - onChanged(); - } else { - runsBuilder_.setMessage(index, value); - } - return this; - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public Builder setRuns( - int index, org.mlflow.api.proto.Service.Run.Builder builderForValue) { - if (runsBuilder_ == null) { - ensureRunsIsMutable(); - runs_.set(index, builderForValue.build()); - onChanged(); - } else { - runsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public Builder addRuns(org.mlflow.api.proto.Service.Run value) { - if (runsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRunsIsMutable(); - runs_.add(value); - onChanged(); - } else { - runsBuilder_.addMessage(value); - } - return this; - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public Builder addRuns( - int index, org.mlflow.api.proto.Service.Run value) { - if (runsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRunsIsMutable(); - runs_.add(index, value); - onChanged(); - } else { - runsBuilder_.addMessage(index, value); - } - return this; - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public Builder addRuns( - org.mlflow.api.proto.Service.Run.Builder builderForValue) { - if (runsBuilder_ == null) { - ensureRunsIsMutable(); - runs_.add(builderForValue.build()); - onChanged(); - } else { - runsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public Builder addRuns( - int index, org.mlflow.api.proto.Service.Run.Builder builderForValue) { - if (runsBuilder_ == null) { - ensureRunsIsMutable(); - runs_.add(index, builderForValue.build()); - onChanged(); - } else { - runsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public Builder addAllRuns( - java.lang.Iterable values) { - if (runsBuilder_ == null) { - ensureRunsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, runs_); - onChanged(); - } else { - runsBuilder_.addAllMessages(values); - } - return this; - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public Builder clearRuns() { - if (runsBuilder_ == null) { - runs_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - runsBuilder_.clear(); - } - return this; - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public Builder removeRuns(int index) { - if (runsBuilder_ == null) { - ensureRunsIsMutable(); - runs_.remove(index); - onChanged(); - } else { - runsBuilder_.remove(index); - } - return this; - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public org.mlflow.api.proto.Service.Run.Builder getRunsBuilder( - int index) { - return getRunsFieldBuilder().getBuilder(index); - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public org.mlflow.api.proto.Service.RunOrBuilder getRunsOrBuilder( - int index) { - if (runsBuilder_ == null) { - return runs_.get(index); } else { - return runsBuilder_.getMessageOrBuilder(index); - } - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public java.util.List - getRunsOrBuilderList() { - if (runsBuilder_ != null) { - return runsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(runs_); - } - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public org.mlflow.api.proto.Service.Run.Builder addRunsBuilder() { - return getRunsFieldBuilder().addBuilder( - org.mlflow.api.proto.Service.Run.getDefaultInstance()); - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public org.mlflow.api.proto.Service.Run.Builder addRunsBuilder( - int index) { - return getRunsFieldBuilder().addBuilder( - index, org.mlflow.api.proto.Service.Run.getDefaultInstance()); - } - /** - *
-         * Runs that match the search criteria.
-         * 
- * - * repeated .mlflow.Run runs = 1; - */ - public java.util.List - getRunsBuilderList() { - return getRunsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder> - getRunsFieldBuilder() { - if (runsBuilder_ == null) { - runsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.Run, org.mlflow.api.proto.Service.Run.Builder, org.mlflow.api.proto.Service.RunOrBuilder>( - runs_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - runs_ = null; - } - return runsBuilder_; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:mlflow.SearchRuns.Response) - } - - // @@protoc_insertion_point(class_scope:mlflow.SearchRuns.Response) - private static final org.mlflow.api.proto.Service.SearchRuns.Response DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.SearchRuns.Response(); - } - - public static org.mlflow.api.proto.Service.SearchRuns.Response getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public Response parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Response(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchRuns.Response getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - private int bitField0_; - public static final int EXPERIMENT_IDS_FIELD_NUMBER = 1; - private java.util.List experimentIds_; - /** - *
-     * List of experiment IDs to search over.
-     * 
- * - * repeated int64 experiment_ids = 1; - */ - public java.util.List - getExperimentIdsList() { - return experimentIds_; - } - /** - *
-     * List of experiment IDs to search over.
-     * 
- * - * repeated int64 experiment_ids = 1; - */ - public int getExperimentIdsCount() { - return experimentIds_.size(); - } - /** - *
-     * List of experiment IDs to search over.
-     * 
- * - * repeated int64 experiment_ids = 1; - */ - public long getExperimentIds(int index) { - return experimentIds_.get(index); - } - - public static final int ANDED_EXPRESSIONS_FIELD_NUMBER = 2; - private java.util.List andedExpressions_; - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public java.util.List getAndedExpressionsList() { - return andedExpressions_; - } - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public java.util.List - getAndedExpressionsOrBuilderList() { - return andedExpressions_; - } - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public int getAndedExpressionsCount() { - return andedExpressions_.size(); - } - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public org.mlflow.api.proto.Service.SearchExpression getAndedExpressions(int index) { - return andedExpressions_.get(index); - } - /** - *
-     * Expressions describing runs (AND-ed together when filtering runs).
-     * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public org.mlflow.api.proto.Service.SearchExpressionOrBuilder getAndedExpressionsOrBuilder( - int index) { - return andedExpressions_.get(index); - } - - public static final int RUN_VIEW_TYPE_FIELD_NUMBER = 3; - private int runViewType_; - /** - * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; - */ - public boolean hasRunViewType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; - */ - public org.mlflow.api.proto.Service.ViewType getRunViewType() { - @SuppressWarnings("deprecation") - org.mlflow.api.proto.Service.ViewType result = org.mlflow.api.proto.Service.ViewType.valueOf(runViewType_); - return result == null ? org.mlflow.api.proto.Service.ViewType.ACTIVE_ONLY : result; - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - for (int i = 0; i < experimentIds_.size(); i++) { - output.writeInt64(1, experimentIds_.get(i)); - } - for (int i = 0; i < andedExpressions_.size(); i++) { - output.writeMessage(2, andedExpressions_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(3, runViewType_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < experimentIds_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeInt64SizeNoTag(experimentIds_.get(i)); - } - size += dataSize; - size += 1 * getExperimentIdsList().size(); - } - for (int i = 0; i < andedExpressions_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, andedExpressions_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(3, runViewType_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.mlflow.api.proto.Service.SearchRuns)) { - return super.equals(obj); - } - org.mlflow.api.proto.Service.SearchRuns other = (org.mlflow.api.proto.Service.SearchRuns) obj; - - boolean result = true; - result = result && getExperimentIdsList() - .equals(other.getExperimentIdsList()); - result = result && getAndedExpressionsList() - .equals(other.getAndedExpressionsList()); - result = result && (hasRunViewType() == other.hasRunViewType()); - if (hasRunViewType()) { - result = result && runViewType_ == other.runViewType_; - } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (getExperimentIdsCount() > 0) { - hash = (37 * hash) + EXPERIMENT_IDS_FIELD_NUMBER; - hash = (53 * hash) + getExperimentIdsList().hashCode(); - } - if (getAndedExpressionsCount() > 0) { - hash = (37 * hash) + ANDED_EXPRESSIONS_FIELD_NUMBER; - hash = (53 * hash) + getAndedExpressionsList().hashCode(); - } - if (hasRunViewType()) { - hash = (37 * hash) + RUN_VIEW_TYPE_FIELD_NUMBER; - hash = (53 * hash) + runViewType_; - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.SearchRuns parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.SearchRuns parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.SearchRuns parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.SearchRuns parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.SearchRuns parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.SearchRuns parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.SearchRuns parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.SearchRuns parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.SearchRuns prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code mlflow.SearchRuns} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.SearchRuns) - org.mlflow.api.proto.Service.SearchRunsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.SearchRuns.class, org.mlflow.api.proto.Service.SearchRuns.Builder.class); - } - - // Construct using org.mlflow.api.proto.Service.SearchRuns.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - getAndedExpressionsFieldBuilder(); - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - experimentIds_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - if (andedExpressionsBuilder_ == null) { - andedExpressions_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - andedExpressionsBuilder_.clear(); - } - runViewType_ = 1; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_SearchRuns_descriptor; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchRuns getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.SearchRuns.getDefaultInstance(); - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchRuns build() { - org.mlflow.api.proto.Service.SearchRuns result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchRuns buildPartial() { - org.mlflow.api.proto.Service.SearchRuns result = new org.mlflow.api.proto.Service.SearchRuns(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - experimentIds_ = java.util.Collections.unmodifiableList(experimentIds_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.experimentIds_ = experimentIds_; - if (andedExpressionsBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - andedExpressions_ = java.util.Collections.unmodifiableList(andedExpressions_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.andedExpressions_ = andedExpressions_; - } else { - result.andedExpressions_ = andedExpressionsBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000001; - } - result.runViewType_ = runViewType_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.SearchRuns) { - return mergeFrom((org.mlflow.api.proto.Service.SearchRuns)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.mlflow.api.proto.Service.SearchRuns other) { - if (other == org.mlflow.api.proto.Service.SearchRuns.getDefaultInstance()) return this; - if (!other.experimentIds_.isEmpty()) { - if (experimentIds_.isEmpty()) { - experimentIds_ = other.experimentIds_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureExperimentIdsIsMutable(); - experimentIds_.addAll(other.experimentIds_); - } - onChanged(); - } - if (andedExpressionsBuilder_ == null) { - if (!other.andedExpressions_.isEmpty()) { - if (andedExpressions_.isEmpty()) { - andedExpressions_ = other.andedExpressions_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureAndedExpressionsIsMutable(); - andedExpressions_.addAll(other.andedExpressions_); - } - onChanged(); - } - } else { - if (!other.andedExpressions_.isEmpty()) { - if (andedExpressionsBuilder_.isEmpty()) { - andedExpressionsBuilder_.dispose(); - andedExpressionsBuilder_ = null; - andedExpressions_ = other.andedExpressions_; - bitField0_ = (bitField0_ & ~0x00000002); - andedExpressionsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getAndedExpressionsFieldBuilder() : null; - } else { - andedExpressionsBuilder_.addAllMessages(other.andedExpressions_); - } - } - } - if (other.hasRunViewType()) { - setRunViewType(other.getRunViewType()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.SearchRuns parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.SearchRuns) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.util.List experimentIds_ = java.util.Collections.emptyList(); - private void ensureExperimentIdsIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - experimentIds_ = new java.util.ArrayList(experimentIds_); - bitField0_ |= 0x00000001; - } - } - /** - *
-       * List of experiment IDs to search over.
-       * 
- * - * repeated int64 experiment_ids = 1; - */ - public java.util.List - getExperimentIdsList() { - return java.util.Collections.unmodifiableList(experimentIds_); - } - /** - *
-       * List of experiment IDs to search over.
-       * 
- * - * repeated int64 experiment_ids = 1; - */ - public int getExperimentIdsCount() { - return experimentIds_.size(); - } - /** - *
-       * List of experiment IDs to search over.
-       * 
- * - * repeated int64 experiment_ids = 1; - */ - public long getExperimentIds(int index) { - return experimentIds_.get(index); - } - /** - *
-       * List of experiment IDs to search over.
-       * 
- * - * repeated int64 experiment_ids = 1; - */ - public Builder setExperimentIds( - int index, long value) { - ensureExperimentIdsIsMutable(); - experimentIds_.set(index, value); - onChanged(); - return this; - } - /** - *
-       * List of experiment IDs to search over.
-       * 
- * - * repeated int64 experiment_ids = 1; - */ - public Builder addExperimentIds(long value) { - ensureExperimentIdsIsMutable(); - experimentIds_.add(value); - onChanged(); - return this; - } - /** - *
-       * List of experiment IDs to search over.
-       * 
- * - * repeated int64 experiment_ids = 1; - */ - public Builder addAllExperimentIds( - java.lang.Iterable values) { - ensureExperimentIdsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, experimentIds_); - onChanged(); - return this; - } - /** - *
-       * List of experiment IDs to search over.
-       * 
- * - * repeated int64 experiment_ids = 1; - */ - public Builder clearExperimentIds() { - experimentIds_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - - private java.util.List andedExpressions_ = - java.util.Collections.emptyList(); - private void ensureAndedExpressionsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - andedExpressions_ = new java.util.ArrayList(andedExpressions_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.SearchExpression, org.mlflow.api.proto.Service.SearchExpression.Builder, org.mlflow.api.proto.Service.SearchExpressionOrBuilder> andedExpressionsBuilder_; - - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public java.util.List getAndedExpressionsList() { - if (andedExpressionsBuilder_ == null) { - return java.util.Collections.unmodifiableList(andedExpressions_); - } else { - return andedExpressionsBuilder_.getMessageList(); - } - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public int getAndedExpressionsCount() { - if (andedExpressionsBuilder_ == null) { - return andedExpressions_.size(); - } else { - return andedExpressionsBuilder_.getCount(); - } - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public org.mlflow.api.proto.Service.SearchExpression getAndedExpressions(int index) { - if (andedExpressionsBuilder_ == null) { - return andedExpressions_.get(index); - } else { - return andedExpressionsBuilder_.getMessage(index); - } - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public Builder setAndedExpressions( - int index, org.mlflow.api.proto.Service.SearchExpression value) { - if (andedExpressionsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAndedExpressionsIsMutable(); - andedExpressions_.set(index, value); - onChanged(); - } else { - andedExpressionsBuilder_.setMessage(index, value); - } - return this; - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public Builder setAndedExpressions( - int index, org.mlflow.api.proto.Service.SearchExpression.Builder builderForValue) { - if (andedExpressionsBuilder_ == null) { - ensureAndedExpressionsIsMutable(); - andedExpressions_.set(index, builderForValue.build()); - onChanged(); - } else { - andedExpressionsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public Builder addAndedExpressions(org.mlflow.api.proto.Service.SearchExpression value) { - if (andedExpressionsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAndedExpressionsIsMutable(); - andedExpressions_.add(value); - onChanged(); - } else { - andedExpressionsBuilder_.addMessage(value); - } - return this; - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public Builder addAndedExpressions( - int index, org.mlflow.api.proto.Service.SearchExpression value) { - if (andedExpressionsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAndedExpressionsIsMutable(); - andedExpressions_.add(index, value); - onChanged(); - } else { - andedExpressionsBuilder_.addMessage(index, value); - } - return this; - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public Builder addAndedExpressions( - org.mlflow.api.proto.Service.SearchExpression.Builder builderForValue) { - if (andedExpressionsBuilder_ == null) { - ensureAndedExpressionsIsMutable(); - andedExpressions_.add(builderForValue.build()); - onChanged(); - } else { - andedExpressionsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public Builder addAndedExpressions( - int index, org.mlflow.api.proto.Service.SearchExpression.Builder builderForValue) { - if (andedExpressionsBuilder_ == null) { - ensureAndedExpressionsIsMutable(); - andedExpressions_.add(index, builderForValue.build()); - onChanged(); - } else { - andedExpressionsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public Builder addAllAndedExpressions( - java.lang.Iterable values) { - if (andedExpressionsBuilder_ == null) { - ensureAndedExpressionsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, andedExpressions_); - onChanged(); - } else { - andedExpressionsBuilder_.addAllMessages(values); - } - return this; - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public Builder clearAndedExpressions() { - if (andedExpressionsBuilder_ == null) { - andedExpressions_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - andedExpressionsBuilder_.clear(); - } - return this; - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public Builder removeAndedExpressions(int index) { - if (andedExpressionsBuilder_ == null) { - ensureAndedExpressionsIsMutable(); - andedExpressions_.remove(index); - onChanged(); - } else { - andedExpressionsBuilder_.remove(index); - } - return this; - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public org.mlflow.api.proto.Service.SearchExpression.Builder getAndedExpressionsBuilder( - int index) { - return getAndedExpressionsFieldBuilder().getBuilder(index); - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public org.mlflow.api.proto.Service.SearchExpressionOrBuilder getAndedExpressionsOrBuilder( - int index) { - if (andedExpressionsBuilder_ == null) { - return andedExpressions_.get(index); } else { - return andedExpressionsBuilder_.getMessageOrBuilder(index); - } - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public java.util.List - getAndedExpressionsOrBuilderList() { - if (andedExpressionsBuilder_ != null) { - return andedExpressionsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(andedExpressions_); - } - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public org.mlflow.api.proto.Service.SearchExpression.Builder addAndedExpressionsBuilder() { - return getAndedExpressionsFieldBuilder().addBuilder( - org.mlflow.api.proto.Service.SearchExpression.getDefaultInstance()); - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public org.mlflow.api.proto.Service.SearchExpression.Builder addAndedExpressionsBuilder( - int index) { - return getAndedExpressionsFieldBuilder().addBuilder( - index, org.mlflow.api.proto.Service.SearchExpression.getDefaultInstance()); - } - /** - *
-       * Expressions describing runs (AND-ed together when filtering runs).
-       * 
- * - * repeated .mlflow.SearchExpression anded_expressions = 2; - */ - public java.util.List - getAndedExpressionsBuilderList() { - return getAndedExpressionsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.SearchExpression, org.mlflow.api.proto.Service.SearchExpression.Builder, org.mlflow.api.proto.Service.SearchExpressionOrBuilder> - getAndedExpressionsFieldBuilder() { - if (andedExpressionsBuilder_ == null) { - andedExpressionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.SearchExpression, org.mlflow.api.proto.Service.SearchExpression.Builder, org.mlflow.api.proto.Service.SearchExpressionOrBuilder>( - andedExpressions_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - andedExpressions_ = null; - } - return andedExpressionsBuilder_; - } - - private int runViewType_ = 1; - /** - * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; - */ - public boolean hasRunViewType() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; - */ - public org.mlflow.api.proto.Service.ViewType getRunViewType() { - @SuppressWarnings("deprecation") - org.mlflow.api.proto.Service.ViewType result = org.mlflow.api.proto.Service.ViewType.valueOf(runViewType_); - return result == null ? org.mlflow.api.proto.Service.ViewType.ACTIVE_ONLY : result; - } - /** - * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; - */ - public Builder setRunViewType(org.mlflow.api.proto.Service.ViewType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - runViewType_ = value.getNumber(); - onChanged(); - return this; - } - /** - * optional .mlflow.ViewType run_view_type = 3 [default = ACTIVE_ONLY]; - */ - public Builder clearRunViewType() { - bitField0_ = (bitField0_ & ~0x00000004); - runViewType_ = 1; - onChanged(); - return this; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:mlflow.SearchRuns) - } - - // @@protoc_insertion_point(class_scope:mlflow.SearchRuns) - private static final org.mlflow.api.proto.Service.SearchRuns DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.SearchRuns(); - } - - public static org.mlflow.api.proto.Service.SearchRuns getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public SearchRuns parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SearchRuns(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.SearchRuns getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public interface ListArtifactsOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.ListArtifacts) - com.google.protobuf.MessageOrBuilder { - - /** - *
-     * ID of the run whose artifacts to list.
-     * 
- * - * optional string run_uuid = 1; - */ - boolean hasRunUuid(); - /** - *
-     * ID of the run whose artifacts to list.
-     * 
- * - * optional string run_uuid = 1; - */ - java.lang.String getRunUuid(); - /** - *
-     * ID of the run whose artifacts to list.
-     * 
- * - * optional string run_uuid = 1; - */ - com.google.protobuf.ByteString - getRunUuidBytes(); - - /** - *
-     * Filter artifacts matching this path (a relative path from the root artifact directory).
-     * 
- * - * optional string path = 2; - */ - boolean hasPath(); - /** - *
-     * Filter artifacts matching this path (a relative path from the root artifact directory).
-     * 
- * - * optional string path = 2; - */ - java.lang.String getPath(); - /** - *
-     * Filter artifacts matching this path (a relative path from the root artifact directory).
-     * 
- * - * optional string path = 2; - */ - com.google.protobuf.ByteString - getPathBytes(); - } - /** - * Protobuf type {@code mlflow.ListArtifacts} - */ - public static final class ListArtifacts extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.ListArtifacts) - ListArtifactsOrBuilder { - private static final long serialVersionUID = 0L; - // Use ListArtifacts.newBuilder() to construct. - private ListArtifacts(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private ListArtifacts() { - runUuid_ = ""; - path_ = ""; - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ListArtifacts( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - runUuid_ = bs; - break; - } - case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; - path_ = bs; - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.ListArtifacts.class, org.mlflow.api.proto.Service.ListArtifacts.Builder.class); - } - - public interface ResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.ListArtifacts.Response) - com.google.protobuf.MessageOrBuilder { - - /** - *
-       * Root artifact directory for the run.
-       * 
- * - * optional string root_uri = 1; - */ - boolean hasRootUri(); - /** - *
-       * Root artifact directory for the run.
-       * 
- * - * optional string root_uri = 1; - */ - java.lang.String getRootUri(); - /** - *
-       * Root artifact directory for the run.
-       * 
- * - * optional string root_uri = 1; - */ - com.google.protobuf.ByteString - getRootUriBytes(); - - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - java.util.List - getFilesList(); - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - org.mlflow.api.proto.Service.FileInfo getFiles(int index); - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - int getFilesCount(); - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - java.util.List - getFilesOrBuilderList(); - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - org.mlflow.api.proto.Service.FileInfoOrBuilder getFilesOrBuilder( - int index); - } - /** - * Protobuf type {@code mlflow.ListArtifacts.Response} - */ - public static final class Response extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.ListArtifacts.Response) - ResponseOrBuilder { - private static final long serialVersionUID = 0L; - // Use Response.newBuilder() to construct. - private Response(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private Response() { - rootUri_ = ""; - files_ = java.util.Collections.emptyList(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Response( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - rootUri_ = bs; - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - files_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - files_.add( - input.readMessage(org.mlflow.api.proto.Service.FileInfo.PARSER, extensionRegistry)); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - files_ = java.util.Collections.unmodifiableList(files_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.ListArtifacts.Response.class, org.mlflow.api.proto.Service.ListArtifacts.Response.Builder.class); - } - - private int bitField0_; - public static final int ROOT_URI_FIELD_NUMBER = 1; - private volatile java.lang.Object rootUri_; - /** - *
-       * Root artifact directory for the run.
-       * 
- * - * optional string root_uri = 1; - */ - public boolean hasRootUri() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-       * Root artifact directory for the run.
-       * 
- * - * optional string root_uri = 1; - */ - public java.lang.String getRootUri() { - java.lang.Object ref = rootUri_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - rootUri_ = s; - } - return s; - } - } - /** - *
-       * Root artifact directory for the run.
-       * 
- * - * optional string root_uri = 1; - */ - public com.google.protobuf.ByteString - getRootUriBytes() { - java.lang.Object ref = rootUri_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - rootUri_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int FILES_FIELD_NUMBER = 2; - private java.util.List files_; - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public java.util.List getFilesList() { - return files_; - } - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public java.util.List - getFilesOrBuilderList() { - return files_; - } - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public int getFilesCount() { - return files_.size(); - } - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public org.mlflow.api.proto.Service.FileInfo getFiles(int index) { - return files_.get(index); - } - /** - *
-       * File location and metadata for artifacts.
-       * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public org.mlflow.api.proto.Service.FileInfoOrBuilder getFilesOrBuilder( - int index) { - return files_.get(index); - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, rootUri_); - } - for (int i = 0; i < files_.size(); i++) { - output.writeMessage(2, files_.get(i)); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, rootUri_); - } - for (int i = 0; i < files_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, files_.get(i)); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.mlflow.api.proto.Service.ListArtifacts.Response)) { - return super.equals(obj); - } - org.mlflow.api.proto.Service.ListArtifacts.Response other = (org.mlflow.api.proto.Service.ListArtifacts.Response) obj; - - boolean result = true; - result = result && (hasRootUri() == other.hasRootUri()); - if (hasRootUri()) { - result = result && getRootUri() - .equals(other.getRootUri()); - } - result = result && getFilesList() - .equals(other.getFilesList()); - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasRootUri()) { - hash = (37 * hash) + ROOT_URI_FIELD_NUMBER; - hash = (53 * hash) + getRootUri().hashCode(); - } - if (getFilesCount() > 0) { - hash = (37 * hash) + FILES_FIELD_NUMBER; - hash = (53 * hash) + getFilesList().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.ListArtifacts.Response parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.ListArtifacts.Response prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code mlflow.ListArtifacts.Response} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.ListArtifacts.Response) - org.mlflow.api.proto.Service.ListArtifacts.ResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.ListArtifacts.Response.class, org.mlflow.api.proto.Service.ListArtifacts.Response.Builder.class); - } - - // Construct using org.mlflow.api.proto.Service.ListArtifacts.Response.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - getFilesFieldBuilder(); - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - rootUri_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (filesBuilder_ == null) { - files_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - filesBuilder_.clear(); - } - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_Response_descriptor; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.ListArtifacts.Response getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.ListArtifacts.Response.getDefaultInstance(); - } - - @java.lang.Override - public org.mlflow.api.proto.Service.ListArtifacts.Response build() { - org.mlflow.api.proto.Service.ListArtifacts.Response result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.ListArtifacts.Response buildPartial() { - org.mlflow.api.proto.Service.ListArtifacts.Response result = new org.mlflow.api.proto.Service.ListArtifacts.Response(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.rootUri_ = rootUri_; - if (filesBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - files_ = java.util.Collections.unmodifiableList(files_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.files_ = files_; - } else { - result.files_ = filesBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.ListArtifacts.Response) { - return mergeFrom((org.mlflow.api.proto.Service.ListArtifacts.Response)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.mlflow.api.proto.Service.ListArtifacts.Response other) { - if (other == org.mlflow.api.proto.Service.ListArtifacts.Response.getDefaultInstance()) return this; - if (other.hasRootUri()) { - bitField0_ |= 0x00000001; - rootUri_ = other.rootUri_; - onChanged(); - } - if (filesBuilder_ == null) { - if (!other.files_.isEmpty()) { - if (files_.isEmpty()) { - files_ = other.files_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureFilesIsMutable(); - files_.addAll(other.files_); - } - onChanged(); - } - } else { - if (!other.files_.isEmpty()) { - if (filesBuilder_.isEmpty()) { - filesBuilder_.dispose(); - filesBuilder_ = null; - files_ = other.files_; - bitField0_ = (bitField0_ & ~0x00000002); - filesBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getFilesFieldBuilder() : null; - } else { - filesBuilder_.addAllMessages(other.files_); - } - } - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.ListArtifacts.Response parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.ListArtifacts.Response) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.lang.Object rootUri_ = ""; - /** - *
-         * Root artifact directory for the run.
-         * 
- * - * optional string root_uri = 1; - */ - public boolean hasRootUri() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-         * Root artifact directory for the run.
-         * 
- * - * optional string root_uri = 1; - */ - public java.lang.String getRootUri() { - java.lang.Object ref = rootUri_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - rootUri_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-         * Root artifact directory for the run.
-         * 
- * - * optional string root_uri = 1; - */ - public com.google.protobuf.ByteString - getRootUriBytes() { - java.lang.Object ref = rootUri_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - rootUri_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-         * Root artifact directory for the run.
-         * 
- * - * optional string root_uri = 1; - */ - public Builder setRootUri( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - rootUri_ = value; - onChanged(); - return this; - } - /** - *
-         * Root artifact directory for the run.
-         * 
- * - * optional string root_uri = 1; - */ - public Builder clearRootUri() { - bitField0_ = (bitField0_ & ~0x00000001); - rootUri_ = getDefaultInstance().getRootUri(); - onChanged(); - return this; - } - /** - *
-         * Root artifact directory for the run.
-         * 
- * - * optional string root_uri = 1; - */ - public Builder setRootUriBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - rootUri_ = value; - onChanged(); - return this; - } - - private java.util.List files_ = - java.util.Collections.emptyList(); - private void ensureFilesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - files_ = new java.util.ArrayList(files_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.FileInfo, org.mlflow.api.proto.Service.FileInfo.Builder, org.mlflow.api.proto.Service.FileInfoOrBuilder> filesBuilder_; - - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public java.util.List getFilesList() { - if (filesBuilder_ == null) { - return java.util.Collections.unmodifiableList(files_); - } else { - return filesBuilder_.getMessageList(); - } - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public int getFilesCount() { - if (filesBuilder_ == null) { - return files_.size(); - } else { - return filesBuilder_.getCount(); - } - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public org.mlflow.api.proto.Service.FileInfo getFiles(int index) { - if (filesBuilder_ == null) { - return files_.get(index); - } else { - return filesBuilder_.getMessage(index); - } - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public Builder setFiles( - int index, org.mlflow.api.proto.Service.FileInfo value) { - if (filesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFilesIsMutable(); - files_.set(index, value); - onChanged(); - } else { - filesBuilder_.setMessage(index, value); - } - return this; - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public Builder setFiles( - int index, org.mlflow.api.proto.Service.FileInfo.Builder builderForValue) { - if (filesBuilder_ == null) { - ensureFilesIsMutable(); - files_.set(index, builderForValue.build()); - onChanged(); - } else { - filesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public Builder addFiles(org.mlflow.api.proto.Service.FileInfo value) { - if (filesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFilesIsMutable(); - files_.add(value); - onChanged(); - } else { - filesBuilder_.addMessage(value); - } - return this; - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public Builder addFiles( - int index, org.mlflow.api.proto.Service.FileInfo value) { - if (filesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFilesIsMutable(); - files_.add(index, value); - onChanged(); - } else { - filesBuilder_.addMessage(index, value); - } - return this; - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public Builder addFiles( - org.mlflow.api.proto.Service.FileInfo.Builder builderForValue) { - if (filesBuilder_ == null) { - ensureFilesIsMutable(); - files_.add(builderForValue.build()); - onChanged(); - } else { - filesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public Builder addFiles( - int index, org.mlflow.api.proto.Service.FileInfo.Builder builderForValue) { - if (filesBuilder_ == null) { - ensureFilesIsMutable(); - files_.add(index, builderForValue.build()); - onChanged(); - } else { - filesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public Builder addAllFiles( - java.lang.Iterable values) { - if (filesBuilder_ == null) { - ensureFilesIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, files_); - onChanged(); - } else { - filesBuilder_.addAllMessages(values); - } - return this; - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public Builder clearFiles() { - if (filesBuilder_ == null) { - files_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - filesBuilder_.clear(); - } - return this; - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public Builder removeFiles(int index) { - if (filesBuilder_ == null) { - ensureFilesIsMutable(); - files_.remove(index); - onChanged(); - } else { - filesBuilder_.remove(index); - } - return this; - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public org.mlflow.api.proto.Service.FileInfo.Builder getFilesBuilder( - int index) { - return getFilesFieldBuilder().getBuilder(index); - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public org.mlflow.api.proto.Service.FileInfoOrBuilder getFilesOrBuilder( - int index) { - if (filesBuilder_ == null) { - return files_.get(index); } else { - return filesBuilder_.getMessageOrBuilder(index); - } - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public java.util.List - getFilesOrBuilderList() { - if (filesBuilder_ != null) { - return filesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(files_); - } - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public org.mlflow.api.proto.Service.FileInfo.Builder addFilesBuilder() { - return getFilesFieldBuilder().addBuilder( - org.mlflow.api.proto.Service.FileInfo.getDefaultInstance()); - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public org.mlflow.api.proto.Service.FileInfo.Builder addFilesBuilder( - int index) { - return getFilesFieldBuilder().addBuilder( - index, org.mlflow.api.proto.Service.FileInfo.getDefaultInstance()); - } - /** - *
-         * File location and metadata for artifacts.
-         * 
- * - * repeated .mlflow.FileInfo files = 2; - */ - public java.util.List - getFilesBuilderList() { - return getFilesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.FileInfo, org.mlflow.api.proto.Service.FileInfo.Builder, org.mlflow.api.proto.Service.FileInfoOrBuilder> - getFilesFieldBuilder() { - if (filesBuilder_ == null) { - filesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.FileInfo, org.mlflow.api.proto.Service.FileInfo.Builder, org.mlflow.api.proto.Service.FileInfoOrBuilder>( - files_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - files_ = null; - } - return filesBuilder_; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:mlflow.ListArtifacts.Response) - } - - // @@protoc_insertion_point(class_scope:mlflow.ListArtifacts.Response) - private static final org.mlflow.api.proto.Service.ListArtifacts.Response DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.ListArtifacts.Response(); - } - - public static org.mlflow.api.proto.Service.ListArtifacts.Response getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public Response parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Response(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.ListArtifacts.Response getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - private int bitField0_; - public static final int RUN_UUID_FIELD_NUMBER = 1; - private volatile java.lang.Object runUuid_; - /** - *
-     * ID of the run whose artifacts to list.
-     * 
- * - * optional string run_uuid = 1; - */ - public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-     * ID of the run whose artifacts to list.
-     * 
- * - * optional string run_uuid = 1; - */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - runUuid_ = s; - } - return s; - } - } - /** - *
-     * ID of the run whose artifacts to list.
-     * 
- * - * optional string run_uuid = 1; - */ - public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - runUuid_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int PATH_FIELD_NUMBER = 2; - private volatile java.lang.Object path_; - /** - *
-     * Filter artifacts matching this path (a relative path from the root artifact directory).
-     * 
- * - * optional string path = 2; - */ - public boolean hasPath() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-     * Filter artifacts matching this path (a relative path from the root artifact directory).
-     * 
- * - * optional string path = 2; - */ - public java.lang.String getPath() { - java.lang.Object ref = path_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - path_ = s; - } - return s; - } - } - /** - *
-     * Filter artifacts matching this path (a relative path from the root artifact directory).
-     * 
- * - * optional string path = 2; - */ - public com.google.protobuf.ByteString - getPathBytes() { - java.lang.Object ref = path_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - path_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, path_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, path_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.mlflow.api.proto.Service.ListArtifacts)) { - return super.equals(obj); - } - org.mlflow.api.proto.Service.ListArtifacts other = (org.mlflow.api.proto.Service.ListArtifacts) obj; - - boolean result = true; - result = result && (hasRunUuid() == other.hasRunUuid()); - if (hasRunUuid()) { - result = result && getRunUuid() - .equals(other.getRunUuid()); - } - result = result && (hasPath() == other.hasPath()); - if (hasPath()) { - result = result && getPath() - .equals(other.getPath()); - } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasRunUuid()) { - hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; - hash = (53 * hash) + getRunUuid().hashCode(); - } - if (hasPath()) { - hash = (37 * hash) + PATH_FIELD_NUMBER; - hash = (53 * hash) + getPath().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.ListArtifacts parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.ListArtifacts prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code mlflow.ListArtifacts} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.ListArtifacts) - org.mlflow.api.proto.Service.ListArtifactsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.ListArtifacts.class, org.mlflow.api.proto.Service.ListArtifacts.Builder.class); - } - - // Construct using org.mlflow.api.proto.Service.ListArtifacts.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - runUuid_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - path_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_ListArtifacts_descriptor; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.ListArtifacts getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.ListArtifacts.getDefaultInstance(); - } - - @java.lang.Override - public org.mlflow.api.proto.Service.ListArtifacts build() { - org.mlflow.api.proto.Service.ListArtifacts result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.ListArtifacts buildPartial() { - org.mlflow.api.proto.Service.ListArtifacts result = new org.mlflow.api.proto.Service.ListArtifacts(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.runUuid_ = runUuid_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.path_ = path_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.ListArtifacts) { - return mergeFrom((org.mlflow.api.proto.Service.ListArtifacts)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.mlflow.api.proto.Service.ListArtifacts other) { - if (other == org.mlflow.api.proto.Service.ListArtifacts.getDefaultInstance()) return this; - if (other.hasRunUuid()) { - bitField0_ |= 0x00000001; - runUuid_ = other.runUuid_; - onChanged(); - } - if (other.hasPath()) { - bitField0_ |= 0x00000002; - path_ = other.path_; - onChanged(); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.ListArtifacts parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.ListArtifacts) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.lang.Object runUuid_ = ""; - /** - *
-       * ID of the run whose artifacts to list.
-       * 
- * - * optional string run_uuid = 1; - */ - public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-       * ID of the run whose artifacts to list.
-       * 
- * - * optional string run_uuid = 1; - */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - runUuid_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * ID of the run whose artifacts to list.
-       * 
- * - * optional string run_uuid = 1; - */ - public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - runUuid_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * ID of the run whose artifacts to list.
-       * 
- * - * optional string run_uuid = 1; - */ - public Builder setRunUuid( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - runUuid_ = value; - onChanged(); - return this; - } - /** - *
-       * ID of the run whose artifacts to list.
-       * 
- * - * optional string run_uuid = 1; - */ - public Builder clearRunUuid() { - bitField0_ = (bitField0_ & ~0x00000001); - runUuid_ = getDefaultInstance().getRunUuid(); - onChanged(); - return this; - } - /** - *
-       * ID of the run whose artifacts to list.
-       * 
- * - * optional string run_uuid = 1; - */ - public Builder setRunUuidBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - runUuid_ = value; - onChanged(); - return this; - } - - private java.lang.Object path_ = ""; - /** - *
-       * Filter artifacts matching this path (a relative path from the root artifact directory).
-       * 
- * - * optional string path = 2; - */ - public boolean hasPath() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-       * Filter artifacts matching this path (a relative path from the root artifact directory).
-       * 
- * - * optional string path = 2; - */ - public java.lang.String getPath() { - java.lang.Object ref = path_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - path_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * Filter artifacts matching this path (a relative path from the root artifact directory).
-       * 
- * - * optional string path = 2; - */ - public com.google.protobuf.ByteString - getPathBytes() { - java.lang.Object ref = path_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - path_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * Filter artifacts matching this path (a relative path from the root artifact directory).
-       * 
- * - * optional string path = 2; - */ - public Builder setPath( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - path_ = value; - onChanged(); - return this; - } - /** - *
-       * Filter artifacts matching this path (a relative path from the root artifact directory).
-       * 
- * - * optional string path = 2; - */ - public Builder clearPath() { - bitField0_ = (bitField0_ & ~0x00000002); - path_ = getDefaultInstance().getPath(); - onChanged(); - return this; - } - /** - *
-       * Filter artifacts matching this path (a relative path from the root artifact directory).
-       * 
- * - * optional string path = 2; - */ - public Builder setPathBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - path_ = value; - onChanged(); - return this; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:mlflow.ListArtifacts) - } - - // @@protoc_insertion_point(class_scope:mlflow.ListArtifacts) - private static final org.mlflow.api.proto.Service.ListArtifacts DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.ListArtifacts(); - } - - public static org.mlflow.api.proto.Service.ListArtifacts getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public ListArtifacts parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ListArtifacts(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.ListArtifacts getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public interface FileInfoOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.FileInfo) - com.google.protobuf.MessageOrBuilder { - - /** - *
-     * Path relative to the root artifact directory run.
-     * 
- * - * optional string path = 1; - */ - boolean hasPath(); - /** - *
-     * Path relative to the root artifact directory run.
-     * 
- * - * optional string path = 1; - */ - java.lang.String getPath(); - /** - *
-     * Path relative to the root artifact directory run.
-     * 
- * - * optional string path = 1; - */ - com.google.protobuf.ByteString - getPathBytes(); - - /** - *
-     * Whether the path is a directory.
-     * 
- * - * optional bool is_dir = 2; - */ - boolean hasIsDir(); - /** - *
-     * Whether the path is a directory.
-     * 
- * - * optional bool is_dir = 2; - */ - boolean getIsDir(); - - /** - *
-     * Size in bytes. Unset for directories.
-     * 
- * - * optional int64 file_size = 3; - */ - boolean hasFileSize(); - /** - *
-     * Size in bytes. Unset for directories.
-     * 
- * - * optional int64 file_size = 3; - */ - long getFileSize(); - } - /** - *
-   * Metadata of a single artifact file or directory.
-   * 
- * - * Protobuf type {@code mlflow.FileInfo} - */ - public static final class FileInfo extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.FileInfo) - FileInfoOrBuilder { - private static final long serialVersionUID = 0L; - // Use FileInfo.newBuilder() to construct. - private FileInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private FileInfo() { - path_ = ""; - isDir_ = false; - fileSize_ = 0L; - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private FileInfo( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - path_ = bs; - break; - } - case 16: { - bitField0_ |= 0x00000002; - isDir_ = input.readBool(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - fileSize_ = input.readInt64(); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.FileInfo.class, org.mlflow.api.proto.Service.FileInfo.Builder.class); - } - - private int bitField0_; - public static final int PATH_FIELD_NUMBER = 1; - private volatile java.lang.Object path_; - /** - *
-     * Path relative to the root artifact directory run.
-     * 
- * - * optional string path = 1; - */ - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-     * Path relative to the root artifact directory run.
-     * 
- * - * optional string path = 1; - */ - public java.lang.String getPath() { - java.lang.Object ref = path_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - path_ = s; - } - return s; - } - } - /** - *
-     * Path relative to the root artifact directory run.
-     * 
- * - * optional string path = 1; - */ - public com.google.protobuf.ByteString - getPathBytes() { - java.lang.Object ref = path_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - path_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int IS_DIR_FIELD_NUMBER = 2; - private boolean isDir_; - /** - *
-     * Whether the path is a directory.
-     * 
- * - * optional bool is_dir = 2; - */ - public boolean hasIsDir() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-     * Whether the path is a directory.
-     * 
- * - * optional bool is_dir = 2; - */ - public boolean getIsDir() { - return isDir_; - } - - public static final int FILE_SIZE_FIELD_NUMBER = 3; - private long fileSize_; - /** - *
-     * Size in bytes. Unset for directories.
-     * 
- * - * optional int64 file_size = 3; - */ - public boolean hasFileSize() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - *
-     * Size in bytes. Unset for directories.
-     * 
- * - * optional int64 file_size = 3; - */ - public long getFileSize() { - return fileSize_; - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, path_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, isDir_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(3, fileSize_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, path_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, isDir_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(3, fileSize_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.mlflow.api.proto.Service.FileInfo)) { - return super.equals(obj); - } - org.mlflow.api.proto.Service.FileInfo other = (org.mlflow.api.proto.Service.FileInfo) obj; - - boolean result = true; - result = result && (hasPath() == other.hasPath()); - if (hasPath()) { - result = result && getPath() - .equals(other.getPath()); - } - result = result && (hasIsDir() == other.hasIsDir()); - if (hasIsDir()) { - result = result && (getIsDir() - == other.getIsDir()); - } - result = result && (hasFileSize() == other.hasFileSize()); - if (hasFileSize()) { - result = result && (getFileSize() - == other.getFileSize()); - } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasPath()) { - hash = (37 * hash) + PATH_FIELD_NUMBER; - hash = (53 * hash) + getPath().hashCode(); - } - if (hasIsDir()) { - hash = (37 * hash) + IS_DIR_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getIsDir()); - } - if (hasFileSize()) { - hash = (37 * hash) + FILE_SIZE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getFileSize()); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.FileInfo parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.FileInfo parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FileInfo parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.FileInfo parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FileInfo parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.FileInfo parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FileInfo parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.FileInfo parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FileInfo parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.FileInfo parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.FileInfo parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.FileInfo parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.FileInfo prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - *
-     * Metadata of a single artifact file or directory.
-     * 
- * - * Protobuf type {@code mlflow.FileInfo} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.FileInfo) - org.mlflow.api.proto.Service.FileInfoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.FileInfo.class, org.mlflow.api.proto.Service.FileInfo.Builder.class); - } - - // Construct using org.mlflow.api.proto.Service.FileInfo.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - path_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - isDir_ = false; - bitField0_ = (bitField0_ & ~0x00000002); - fileSize_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_FileInfo_descriptor; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.FileInfo getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.FileInfo.getDefaultInstance(); - } - - @java.lang.Override - public org.mlflow.api.proto.Service.FileInfo build() { - org.mlflow.api.proto.Service.FileInfo result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.FileInfo buildPartial() { - org.mlflow.api.proto.Service.FileInfo result = new org.mlflow.api.proto.Service.FileInfo(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.path_ = path_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.isDir_ = isDir_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.fileSize_ = fileSize_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.FileInfo) { - return mergeFrom((org.mlflow.api.proto.Service.FileInfo)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.mlflow.api.proto.Service.FileInfo other) { - if (other == org.mlflow.api.proto.Service.FileInfo.getDefaultInstance()) return this; - if (other.hasPath()) { - bitField0_ |= 0x00000001; - path_ = other.path_; - onChanged(); - } - if (other.hasIsDir()) { - setIsDir(other.getIsDir()); - } - if (other.hasFileSize()) { - setFileSize(other.getFileSize()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.FileInfo parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.FileInfo) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.lang.Object path_ = ""; - /** - *
-       * Path relative to the root artifact directory run.
-       * 
- * - * optional string path = 1; - */ - public boolean hasPath() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-       * Path relative to the root artifact directory run.
-       * 
- * - * optional string path = 1; - */ - public java.lang.String getPath() { - java.lang.Object ref = path_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - path_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - *
-       * Path relative to the root artifact directory run.
-       * 
- * - * optional string path = 1; - */ - public com.google.protobuf.ByteString - getPathBytes() { - java.lang.Object ref = path_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - path_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - *
-       * Path relative to the root artifact directory run.
-       * 
- * - * optional string path = 1; - */ - public Builder setPath( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - return this; - } - /** - *
-       * Path relative to the root artifact directory run.
-       * 
- * - * optional string path = 1; - */ - public Builder clearPath() { - bitField0_ = (bitField0_ & ~0x00000001); - path_ = getDefaultInstance().getPath(); - onChanged(); - return this; - } - /** - *
-       * Path relative to the root artifact directory run.
-       * 
- * - * optional string path = 1; - */ - public Builder setPathBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - path_ = value; - onChanged(); - return this; - } - - private boolean isDir_ ; - /** - *
-       * Whether the path is a directory.
-       * 
- * - * optional bool is_dir = 2; - */ - public boolean hasIsDir() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-       * Whether the path is a directory.
-       * 
- * - * optional bool is_dir = 2; - */ - public boolean getIsDir() { - return isDir_; - } - /** - *
-       * Whether the path is a directory.
-       * 
- * - * optional bool is_dir = 2; - */ - public Builder setIsDir(boolean value) { - bitField0_ |= 0x00000002; - isDir_ = value; - onChanged(); - return this; - } - /** - *
-       * Whether the path is a directory.
-       * 
- * - * optional bool is_dir = 2; - */ - public Builder clearIsDir() { - bitField0_ = (bitField0_ & ~0x00000002); - isDir_ = false; - onChanged(); - return this; - } - - private long fileSize_ ; - /** - *
-       * Size in bytes. Unset for directories.
-       * 
- * - * optional int64 file_size = 3; - */ - public boolean hasFileSize() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - *
-       * Size in bytes. Unset for directories.
-       * 
- * - * optional int64 file_size = 3; - */ - public long getFileSize() { - return fileSize_; - } - /** - *
-       * Size in bytes. Unset for directories.
-       * 
- * - * optional int64 file_size = 3; - */ - public Builder setFileSize(long value) { - bitField0_ |= 0x00000004; - fileSize_ = value; - onChanged(); - return this; - } - /** - *
-       * Size in bytes. Unset for directories.
-       * 
- * - * optional int64 file_size = 3; - */ - public Builder clearFileSize() { - bitField0_ = (bitField0_ & ~0x00000004); - fileSize_ = 0L; - onChanged(); - return this; - } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:mlflow.FileInfo) - } - - // @@protoc_insertion_point(class_scope:mlflow.FileInfo) - private static final org.mlflow.api.proto.Service.FileInfo DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.FileInfo(); - } - - public static org.mlflow.api.proto.Service.FileInfo getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public FileInfo parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new FileInfo(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.FileInfo getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public interface GetArtifactOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetArtifact) - com.google.protobuf.MessageOrBuilder { - - /** - *
-     * ID of the run from which to fetch the artifact.
-     * 
- * - * optional string run_uuid = 1; - */ - boolean hasRunUuid(); - /** - *
-     * ID of the run from which to fetch the artifact.
-     * 
- * - * optional string run_uuid = 1; - */ - java.lang.String getRunUuid(); - /** - *
-     * ID of the run from which to fetch the artifact.
-     * 
- * - * optional string run_uuid = 1; - */ - com.google.protobuf.ByteString - getRunUuidBytes(); - - /** - *
-     * Path of the artifact to fetch (relative to the root artifact directory for the run).
-     * 
- * - * optional string path = 2; - */ - boolean hasPath(); - /** - *
-     * Path of the artifact to fetch (relative to the root artifact directory for the run).
-     * 
- * - * optional string path = 2; - */ - java.lang.String getPath(); - /** - *
-     * Path of the artifact to fetch (relative to the root artifact directory for the run).
-     * 
- * - * optional string path = 2; - */ - com.google.protobuf.ByteString - getPathBytes(); - } - /** - * Protobuf type {@code mlflow.GetArtifact} - */ - public static final class GetArtifact extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetArtifact) - GetArtifactOrBuilder { - private static final long serialVersionUID = 0L; - // Use GetArtifact.newBuilder() to construct. - private GetArtifact(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private GetArtifact() { - runUuid_ = ""; - path_ = ""; - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private GetArtifact( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - runUuid_ = bs; - break; - } - case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; - path_ = bs; - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetArtifact.class, org.mlflow.api.proto.Service.GetArtifact.Builder.class); - } - - public interface ResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetArtifact.Response) - com.google.protobuf.MessageOrBuilder { - } - /** - *
-     * Empty because the data of the file will be streamed back in the HTTP response.
-     * The response will have an HTTP status code of 404 if the artifact path is not found.
-     * 
- * - * Protobuf type {@code mlflow.GetArtifact.Response} - */ - public static final class Response extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetArtifact.Response) - ResponseOrBuilder { - private static final long serialVersionUID = 0L; - // Use Response.newBuilder() to construct. - private Response(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private Response() { - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Response( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_Response_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_Response_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetArtifact.Response.class, org.mlflow.api.proto.Service.GetArtifact.Response.Builder.class); - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.mlflow.api.proto.Service.GetArtifact.Response)) { - return super.equals(obj); - } - org.mlflow.api.proto.Service.GetArtifact.Response other = (org.mlflow.api.proto.Service.GetArtifact.Response) obj; - - boolean result = true; - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.GetArtifact.Response parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetArtifact.Response prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - *
-       * Empty because the data of the file will be streamed back in the HTTP response.
-       * The response will have an HTTP status code of 404 if the artifact path is not found.
-       * 
- * - * Protobuf type {@code mlflow.GetArtifact.Response} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetArtifact.Response) - org.mlflow.api.proto.Service.GetArtifact.ResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_Response_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_Response_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetArtifact.Response.class, org.mlflow.api.proto.Service.GetArtifact.Response.Builder.class); - } - - // Construct using org.mlflow.api.proto.Service.GetArtifact.Response.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - } - } - @java.lang.Override - public Builder clear() { - super.clear(); - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_Response_descriptor; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.GetArtifact.Response getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetArtifact.Response.getDefaultInstance(); - } - - @java.lang.Override - public org.mlflow.api.proto.Service.GetArtifact.Response build() { - org.mlflow.api.proto.Service.GetArtifact.Response result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public org.mlflow.api.proto.Service.GetArtifact.Response buildPartial() { - org.mlflow.api.proto.Service.GetArtifact.Response result = new org.mlflow.api.proto.Service.GetArtifact.Response(this); - onBuilt(); - return result; - } + @java.lang.Override + public org.mlflow.api.proto.Service.GetMetricHistory.Response buildPartial() { + org.mlflow.api.proto.Service.GetMetricHistory.Response result = new org.mlflow.api.proto.Service.GetMetricHistory.Response(this); + int from_bitField0_ = bitField0_; + if (metricsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + metrics_ = java.util.Collections.unmodifiableList(metrics_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.metrics_ = metrics_; + } else { + result.metrics_ = metricsBuilder_.build(); + } + onBuilt(); + return result; + } @java.lang.Override public Builder clone() { @@ -44303,16 +38221,42 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetArtifact.Response) { - return mergeFrom((org.mlflow.api.proto.Service.GetArtifact.Response)other); + if (other instanceof org.mlflow.api.proto.Service.GetMetricHistory.Response) { + return mergeFrom((org.mlflow.api.proto.Service.GetMetricHistory.Response)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.GetArtifact.Response other) { - if (other == org.mlflow.api.proto.Service.GetArtifact.Response.getDefaultInstance()) return this; + public Builder mergeFrom(org.mlflow.api.proto.Service.GetMetricHistory.Response other) { + if (other == org.mlflow.api.proto.Service.GetMetricHistory.Response.getDefaultInstance()) return this; + if (metricsBuilder_ == null) { + if (!other.metrics_.isEmpty()) { + if (metrics_.isEmpty()) { + metrics_ = other.metrics_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureMetricsIsMutable(); + metrics_.addAll(other.metrics_); + } + onChanged(); + } + } else { + if (!other.metrics_.isEmpty()) { + if (metricsBuilder_.isEmpty()) { + metricsBuilder_.dispose(); + metricsBuilder_ = null; + metrics_ = other.metrics_; + bitField0_ = (bitField0_ & ~0x00000001); + metricsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getMetricsFieldBuilder() : null; + } else { + metricsBuilder_.addAllMessages(other.metrics_); + } + } + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -44328,11 +38272,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.GetArtifact.Response parsedMessage = null; + org.mlflow.api.proto.Service.GetMetricHistory.Response parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetArtifact.Response) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.GetMetricHistory.Response) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -44341,6 +38285,319 @@ public Builder mergeFrom( } return this; } + private int bitField0_; + + private java.util.List metrics_ = + java.util.Collections.emptyList(); + private void ensureMetricsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + metrics_ = new java.util.ArrayList(metrics_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder> metricsBuilder_; + + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public java.util.List getMetricsList() { + if (metricsBuilder_ == null) { + return java.util.Collections.unmodifiableList(metrics_); + } else { + return metricsBuilder_.getMessageList(); + } + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public int getMetricsCount() { + if (metricsBuilder_ == null) { + return metrics_.size(); + } else { + return metricsBuilder_.getCount(); + } + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public org.mlflow.api.proto.Service.Metric getMetrics(int index) { + if (metricsBuilder_ == null) { + return metrics_.get(index); + } else { + return metricsBuilder_.getMessage(index); + } + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public Builder setMetrics( + int index, org.mlflow.api.proto.Service.Metric value) { + if (metricsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetricsIsMutable(); + metrics_.set(index, value); + onChanged(); + } else { + metricsBuilder_.setMessage(index, value); + } + return this; + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public Builder setMetrics( + int index, org.mlflow.api.proto.Service.Metric.Builder builderForValue) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + metrics_.set(index, builderForValue.build()); + onChanged(); + } else { + metricsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public Builder addMetrics(org.mlflow.api.proto.Service.Metric value) { + if (metricsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetricsIsMutable(); + metrics_.add(value); + onChanged(); + } else { + metricsBuilder_.addMessage(value); + } + return this; + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public Builder addMetrics( + int index, org.mlflow.api.proto.Service.Metric value) { + if (metricsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetricsIsMutable(); + metrics_.add(index, value); + onChanged(); + } else { + metricsBuilder_.addMessage(index, value); + } + return this; + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public Builder addMetrics( + org.mlflow.api.proto.Service.Metric.Builder builderForValue) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + metrics_.add(builderForValue.build()); + onChanged(); + } else { + metricsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public Builder addMetrics( + int index, org.mlflow.api.proto.Service.Metric.Builder builderForValue) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + metrics_.add(index, builderForValue.build()); + onChanged(); + } else { + metricsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public Builder addAllMetrics( + java.lang.Iterable values) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, metrics_); + onChanged(); + } else { + metricsBuilder_.addAllMessages(values); + } + return this; + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public Builder clearMetrics() { + if (metricsBuilder_ == null) { + metrics_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + metricsBuilder_.clear(); + } + return this; + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public Builder removeMetrics(int index) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + metrics_.remove(index); + onChanged(); + } else { + metricsBuilder_.remove(index); + } + return this; + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public org.mlflow.api.proto.Service.Metric.Builder getMetricsBuilder( + int index) { + return getMetricsFieldBuilder().getBuilder(index); + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public org.mlflow.api.proto.Service.MetricOrBuilder getMetricsOrBuilder( + int index) { + if (metricsBuilder_ == null) { + return metrics_.get(index); } else { + return metricsBuilder_.getMessageOrBuilder(index); + } + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public java.util.List + getMetricsOrBuilderList() { + if (metricsBuilder_ != null) { + return metricsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(metrics_); + } + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public org.mlflow.api.proto.Service.Metric.Builder addMetricsBuilder() { + return getMetricsFieldBuilder().addBuilder( + org.mlflow.api.proto.Service.Metric.getDefaultInstance()); + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public org.mlflow.api.proto.Service.Metric.Builder addMetricsBuilder( + int index) { + return getMetricsFieldBuilder().addBuilder( + index, org.mlflow.api.proto.Service.Metric.getDefaultInstance()); + } + /** + *
+         * All logged values for this metric.
+         * 
+ * + * repeated .mlflow.Metric metrics = 1; + */ + public java.util.List + getMetricsBuilderList() { + return getMetricsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder> + getMetricsFieldBuilder() { + if (metricsBuilder_ == null) { + metricsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder>( + metrics_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + metrics_ = null; + } + return metricsBuilder_; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -44354,16 +38611,16 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.GetArtifact.Response) + // @@protoc_insertion_point(builder_scope:mlflow.GetMetricHistory.Response) } - // @@protoc_insertion_point(class_scope:mlflow.GetArtifact.Response) - private static final org.mlflow.api.proto.Service.GetArtifact.Response DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.GetMetricHistory.Response) + private static final org.mlflow.api.proto.Service.GetMetricHistory.Response DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetArtifact.Response(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetMetricHistory.Response(); } - public static org.mlflow.api.proto.Service.GetArtifact.Response getDefaultInstance() { + public static org.mlflow.api.proto.Service.GetMetricHistory.Response getDefaultInstance() { return DEFAULT_INSTANCE; } @@ -44388,28 +38645,84 @@ public com.google.protobuf.Parser getParserForType() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetArtifact.Response getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.GetMetricHistory.Response getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; + public static final int RUN_ID_FIELD_NUMBER = 3; + private volatile java.lang.Object runId_; + /** + *
+     * ID of the run from which to fetch metric values. Must be provided.
+     * 
+ * + * optional string run_id = 3; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * ID of the run from which to fetch metric values. Must be provided.
+     * 
+ * + * optional string run_id = 3; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } + } + /** + *
+     * ID of the run from which to fetch metric values. Must be provided.
+     * 
+ * + * optional string run_id = 3; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + public static final int RUN_UUID_FIELD_NUMBER = 1; private volatile java.lang.Object runUuid_; /** *
-     * ID of the run from which to fetch the artifact.
+     * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+     * will be removed in a future MLflow version.
      * 
* * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-     * ID of the run from which to fetch the artifact.
+     * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+     * will be removed in a future MLflow version.
      * 
* * optional string run_uuid = 1; @@ -44430,7 +38743,8 @@ public java.lang.String getRunUuid() { } /** *
-     * ID of the run from which to fetch the artifact.
+     * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+     * will be removed in a future MLflow version.
      * 
* * optional string run_uuid = 1; @@ -44449,27 +38763,27 @@ public java.lang.String getRunUuid() { } } - public static final int PATH_FIELD_NUMBER = 2; - private volatile java.lang.Object path_; + public static final int METRIC_KEY_FIELD_NUMBER = 2; + private volatile java.lang.Object metricKey_; /** *
-     * Path of the artifact to fetch (relative to the root artifact directory for the run).
+     * Name of the metric.
      * 
* - * optional string path = 2; + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ - public boolean hasPath() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public boolean hasMetricKey() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
-     * Path of the artifact to fetch (relative to the root artifact directory for the run).
+     * Name of the metric.
      * 
* - * optional string path = 2; + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ - public java.lang.String getPath() { - java.lang.Object ref = path_; + public java.lang.String getMetricKey() { + java.lang.Object ref = metricKey_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -44477,26 +38791,26 @@ public java.lang.String getPath() { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - path_ = s; + metricKey_ = s; } return s; } } /** *
-     * Path of the artifact to fetch (relative to the root artifact directory for the run).
+     * Name of the metric.
      * 
* - * optional string path = 2; + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString - getPathBytes() { - java.lang.Object ref = path_; + getMetricKeyBytes() { + java.lang.Object ref = metricKey_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - path_ = b; + metricKey_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -44517,11 +38831,14 @@ public final boolean isInitialized() { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, path_); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, metricKey_); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, runId_); } unknownFields.writeTo(output); } @@ -44532,11 +38849,14 @@ public int getSerializedSize() { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, path_); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, metricKey_); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, runId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -44548,21 +38868,26 @@ public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof org.mlflow.api.proto.Service.GetArtifact)) { + if (!(obj instanceof org.mlflow.api.proto.Service.GetMetricHistory)) { return super.equals(obj); } - org.mlflow.api.proto.Service.GetArtifact other = (org.mlflow.api.proto.Service.GetArtifact) obj; + org.mlflow.api.proto.Service.GetMetricHistory other = (org.mlflow.api.proto.Service.GetMetricHistory) obj; boolean result = true; + result = result && (hasRunId() == other.hasRunId()); + if (hasRunId()) { + result = result && getRunId() + .equals(other.getRunId()); + } result = result && (hasRunUuid() == other.hasRunUuid()); if (hasRunUuid()) { result = result && getRunUuid() .equals(other.getRunUuid()); } - result = result && (hasPath() == other.hasPath()); - if (hasPath()) { - result = result && getPath() - .equals(other.getPath()); + result = result && (hasMetricKey() == other.hasMetricKey()); + if (hasMetricKey()) { + result = result && getMetricKey() + .equals(other.getMetricKey()); } result = result && unknownFields.equals(other.unknownFields); return result; @@ -44575,82 +38900,86 @@ public int hashCode() { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRunId()) { + hash = (37 * hash) + RUN_ID_FIELD_NUMBER; + hash = (53 * hash) + getRunId().hashCode(); + } if (hasRunUuid()) { hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; hash = (53 * hash) + getRunUuid().hashCode(); } - if (hasPath()) { - hash = (37 * hash) + PATH_FIELD_NUMBER; - hash = (53 * hash) + getPath().hashCode(); + if (hasMetricKey()) { + hash = (37 * hash) + METRIC_KEY_FIELD_NUMBER; + hash = (53 * hash) + getMetricKey().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom( + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom( + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom( + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom( + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom(byte[] data) + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom( + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom( + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetArtifact parseDelimitedFrom(java.io.InputStream input) + public static org.mlflow.api.proto.Service.GetMetricHistory parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetArtifact parseDelimitedFrom( + public static org.mlflow.api.proto.Service.GetMetricHistory parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom( + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static org.mlflow.api.proto.Service.GetArtifact parseFrom( + public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -44663,7 +38992,7 @@ public static org.mlflow.api.proto.Service.GetArtifact parseFrom( public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetArtifact prototype) { + public static Builder newBuilder(org.mlflow.api.proto.Service.GetMetricHistory prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -44679,26 +39008,26 @@ protected Builder newBuilderForType( return builder; } /** - * Protobuf type {@code mlflow.GetArtifact} + * Protobuf type {@code mlflow.GetMetricHistory} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetArtifact) - org.mlflow.api.proto.Service.GetArtifactOrBuilder { + // @@protoc_insertion_point(builder_implements:mlflow.GetMetricHistory) + org.mlflow.api.proto.Service.GetMetricHistoryOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetArtifact.class, org.mlflow.api.proto.Service.GetArtifact.Builder.class); + org.mlflow.api.proto.Service.GetMetricHistory.class, org.mlflow.api.proto.Service.GetMetricHistory.Builder.class); } - // Construct using org.mlflow.api.proto.Service.GetArtifact.newBuilder() + // Construct using org.mlflow.api.proto.Service.GetMetricHistory.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -44716,27 +39045,29 @@ private void maybeForceBuilderInitialization() { @java.lang.Override public Builder clear() { super.clear(); - runUuid_ = ""; + runId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - path_ = ""; + runUuid_ = ""; bitField0_ = (bitField0_ & ~0x00000002); + metricKey_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetArtifact_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_descriptor; } @java.lang.Override - public org.mlflow.api.proto.Service.GetArtifact getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetArtifact.getDefaultInstance(); + public org.mlflow.api.proto.Service.GetMetricHistory getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.GetMetricHistory.getDefaultInstance(); } @java.lang.Override - public org.mlflow.api.proto.Service.GetArtifact build() { - org.mlflow.api.proto.Service.GetArtifact result = buildPartial(); + public org.mlflow.api.proto.Service.GetMetricHistory build() { + org.mlflow.api.proto.Service.GetMetricHistory result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -44744,18 +39075,22 @@ public org.mlflow.api.proto.Service.GetArtifact build() { } @java.lang.Override - public org.mlflow.api.proto.Service.GetArtifact buildPartial() { - org.mlflow.api.proto.Service.GetArtifact result = new org.mlflow.api.proto.Service.GetArtifact(this); + public org.mlflow.api.proto.Service.GetMetricHistory buildPartial() { + org.mlflow.api.proto.Service.GetMetricHistory result = new org.mlflow.api.proto.Service.GetMetricHistory(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.runUuid_ = runUuid_; + result.runId_ = runId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.path_ = path_; + result.runUuid_ = runUuid_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.metricKey_ = metricKey_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -44795,24 +39130,29 @@ public Builder addRepeatedField( } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetArtifact) { - return mergeFrom((org.mlflow.api.proto.Service.GetArtifact)other); + if (other instanceof org.mlflow.api.proto.Service.GetMetricHistory) { + return mergeFrom((org.mlflow.api.proto.Service.GetMetricHistory)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.mlflow.api.proto.Service.GetArtifact other) { - if (other == org.mlflow.api.proto.Service.GetArtifact.getDefaultInstance()) return this; - if (other.hasRunUuid()) { + public Builder mergeFrom(org.mlflow.api.proto.Service.GetMetricHistory other) { + if (other == org.mlflow.api.proto.Service.GetMetricHistory.getDefaultInstance()) return this; + if (other.hasRunId()) { bitField0_ |= 0x00000001; - runUuid_ = other.runUuid_; + runId_ = other.runId_; onChanged(); } - if (other.hasPath()) { + if (other.hasRunUuid()) { bitField0_ |= 0x00000002; - path_ = other.path_; + runUuid_ = other.runUuid_; + onChanged(); + } + if (other.hasMetricKey()) { + bitField0_ |= 0x00000004; + metricKey_ = other.metricKey_; onChanged(); } this.mergeUnknownFields(other.unknownFields); @@ -44830,11 +39170,11 @@ public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.mlflow.api.proto.Service.GetArtifact parsedMessage = null; + org.mlflow.api.proto.Service.GetMetricHistory parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetArtifact) e.getUnfinishedMessage(); + parsedMessage = (org.mlflow.api.proto.Service.GetMetricHistory) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -44845,20 +39185,122 @@ public Builder mergeFrom( } private int bitField0_; + private java.lang.Object runId_ = ""; + /** + *
+       * ID of the run from which to fetch metric values. Must be provided.
+       * 
+ * + * optional string run_id = 3; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+       * ID of the run from which to fetch metric values. Must be provided.
+       * 
+ * + * optional string run_id = 3; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
+       * ID of the run from which to fetch metric values. Must be provided.
+       * 
+ * + * optional string run_id = 3; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
+       * ID of the run from which to fetch metric values. Must be provided.
+       * 
+ * + * optional string run_id = 3; + */ + public Builder setRunId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + /** + *
+       * ID of the run from which to fetch metric values. Must be provided.
+       * 
+ * + * optional string run_id = 3; + */ + public Builder clearRunId() { + bitField0_ = (bitField0_ & ~0x00000001); + runId_ = getDefaultInstance().getRunId(); + onChanged(); + return this; + } + /** + *
+       * ID of the run from which to fetch metric values. Must be provided.
+       * 
+ * + * optional string run_id = 3; + */ + public Builder setRunIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + private java.lang.Object runUuid_ = ""; /** *
-       * ID of the run from which to fetch the artifact.
+       * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+       * will be removed in a future MLflow version.
        * 
* * optional string run_uuid = 1; */ public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** *
-       * ID of the run from which to fetch the artifact.
+       * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+       * will be removed in a future MLflow version.
        * 
* * optional string run_uuid = 1; @@ -44879,7 +39321,8 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run from which to fetch the artifact.
+       * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+       * will be removed in a future MLflow version.
        * 
* * optional string run_uuid = 1; @@ -44899,7 +39342,8 @@ public java.lang.String getRunUuid() { } /** *
-       * ID of the run from which to fetch the artifact.
+       * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+       * will be removed in a future MLflow version.
        * 
* * optional string run_uuid = 1; @@ -44909,27 +39353,29 @@ public Builder setRunUuid( if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; } /** *
-       * ID of the run from which to fetch the artifact.
+       * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+       * will be removed in a future MLflow version.
        * 
* * optional string run_uuid = 1; */ public Builder clearRunUuid() { - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); runUuid_ = getDefaultInstance().getRunUuid(); onChanged(); return this; } /** *
-       * ID of the run from which to fetch the artifact.
+       * [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field
+       * will be removed in a future MLflow version.
        * 
* * optional string run_uuid = 1; @@ -44939,38 +39385,38 @@ public Builder setRunUuidBytes( if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; runUuid_ = value; onChanged(); return this; } - private java.lang.Object path_ = ""; + private java.lang.Object metricKey_ = ""; /** *
-       * Path of the artifact to fetch (relative to the root artifact directory for the run).
+       * Name of the metric.
        * 
* - * optional string path = 2; + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ - public boolean hasPath() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public boolean hasMetricKey() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** *
-       * Path of the artifact to fetch (relative to the root artifact directory for the run).
+       * Name of the metric.
        * 
* - * optional string path = 2; + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ - public java.lang.String getPath() { - java.lang.Object ref = path_; + public java.lang.String getMetricKey() { + java.lang.Object ref = metricKey_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - path_ = s; + metricKey_ = s; } return s; } else { @@ -44979,19 +39425,19 @@ public java.lang.String getPath() { } /** *
-       * Path of the artifact to fetch (relative to the root artifact directory for the run).
+       * Name of the metric.
        * 
* - * optional string path = 2; + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ public com.google.protobuf.ByteString - getPathBytes() { - java.lang.Object ref = path_; + getMetricKeyBytes() { + java.lang.Object ref = metricKey_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - path_ = b; + metricKey_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -44999,48 +39445,48 @@ public java.lang.String getPath() { } /** *
-       * Path of the artifact to fetch (relative to the root artifact directory for the run).
+       * Name of the metric.
        * 
* - * optional string path = 2; + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ - public Builder setPath( + public Builder setMetricKey( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; - path_ = value; + bitField0_ |= 0x00000004; + metricKey_ = value; onChanged(); return this; } /** *
-       * Path of the artifact to fetch (relative to the root artifact directory for the run).
+       * Name of the metric.
        * 
* - * optional string path = 2; + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ - public Builder clearPath() { - bitField0_ = (bitField0_ & ~0x00000002); - path_ = getDefaultInstance().getPath(); + public Builder clearMetricKey() { + bitField0_ = (bitField0_ & ~0x00000004); + metricKey_ = getDefaultInstance().getMetricKey(); onChanged(); return this; } /** *
-       * Path of the artifact to fetch (relative to the root artifact directory for the run).
+       * Name of the metric.
        * 
* - * optional string path = 2; + * optional string metric_key = 2 [(.mlflow.validate_required) = true]; */ - public Builder setPathBytes( + public Builder setMetricKeyBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; - path_ = value; + bitField0_ |= 0x00000004; + metricKey_ = value; onChanged(); return this; } @@ -45057,117 +39503,240 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.GetArtifact) + // @@protoc_insertion_point(builder_scope:mlflow.GetMetricHistory) } - // @@protoc_insertion_point(class_scope:mlflow.GetArtifact) - private static final org.mlflow.api.proto.Service.GetArtifact DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.GetMetricHistory) + private static final org.mlflow.api.proto.Service.GetMetricHistory DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetArtifact(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetMetricHistory(); } - public static org.mlflow.api.proto.Service.GetArtifact getDefaultInstance() { + public static org.mlflow.api.proto.Service.GetMetricHistory getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Deprecated public static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override - public GetArtifact parsePartialFrom( + public GetMetricHistory parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetArtifact(input, extensionRegistry); + return new GetMetricHistory(input, extensionRegistry); } }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override - public org.mlflow.api.proto.Service.GetArtifact getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.GetMetricHistory getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } - public interface GetMetricHistoryOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetMetricHistory) + public interface LogBatchOrBuilder extends + // @@protoc_insertion_point(interface_extends:mlflow.LogBatch) com.google.protobuf.MessageOrBuilder { /** *
-     * ID of the run from which to fetch metric values.
+     * ID of the run to log under
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1; */ - boolean hasRunUuid(); + boolean hasRunId(); /** *
-     * ID of the run from which to fetch metric values.
+     * ID of the run to log under
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1; */ - java.lang.String getRunUuid(); + java.lang.String getRunId(); /** *
-     * ID of the run from which to fetch metric values.
+     * ID of the run to log under
      * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * optional string run_id = 1; */ com.google.protobuf.ByteString - getRunUuidBytes(); + getRunIdBytes(); /** *
-     * Name of the metric.
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
      * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * repeated .mlflow.Metric metrics = 2; */ - boolean hasMetricKey(); + java.util.List + getMetricsList(); /** *
-     * Name of the metric.
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
      * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * repeated .mlflow.Metric metrics = 2; */ - java.lang.String getMetricKey(); + org.mlflow.api.proto.Service.Metric getMetrics(int index); /** *
-     * Name of the metric.
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
      * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * repeated .mlflow.Metric metrics = 2; */ - com.google.protobuf.ByteString - getMetricKeyBytes(); + int getMetricsCount(); + /** + *
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + java.util.List + getMetricsOrBuilderList(); + /** + *
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + org.mlflow.api.proto.Service.MetricOrBuilder getMetricsOrBuilder( + int index); + + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + java.util.List + getParamsList(); + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + org.mlflow.api.proto.Service.Param getParams(int index); + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + int getParamsCount(); + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + java.util.List + getParamsOrBuilderList(); + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + org.mlflow.api.proto.Service.ParamOrBuilder getParamsOrBuilder( + int index); + + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + java.util.List + getTagsList(); + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + org.mlflow.api.proto.Service.RunTag getTags(int index); + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + int getTagsCount(); + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + java.util.List + getTagsOrBuilderList(); + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + org.mlflow.api.proto.Service.RunTagOrBuilder getTagsOrBuilder( + int index); } /** - * Protobuf type {@code mlflow.GetMetricHistory} + * Protobuf type {@code mlflow.LogBatch} */ - public static final class GetMetricHistory extends + public static final class LogBatch extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetMetricHistory) - GetMetricHistoryOrBuilder { + // @@protoc_insertion_point(message_implements:mlflow.LogBatch) + LogBatchOrBuilder { private static final long serialVersionUID = 0L; - // Use GetMetricHistory.newBuilder() to construct. - private GetMetricHistory(com.google.protobuf.GeneratedMessageV3.Builder builder) { + // Use LogBatch.newBuilder() to construct. + private LogBatch(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private GetMetricHistory() { - runUuid_ = ""; - metricKey_ = ""; + private LogBatch() { + runId_ = ""; + metrics_ = java.util.Collections.emptyList(); + params_ = java.util.Collections.emptyList(); + tags_ = java.util.Collections.emptyList(); } @java.lang.Override @@ -45175,7 +39744,7 @@ private GetMetricHistory() { getUnknownFields() { return this.unknownFields; } - private GetMetricHistory( + private LogBatch( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -45197,13 +39766,34 @@ private GetMetricHistory( case 10: { com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - runUuid_ = bs; + runId_ = bs; break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; - metricKey_ = bs; + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + metrics_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + metrics_.add( + input.readMessage(org.mlflow.api.proto.Service.Metric.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + params_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + params_.add( + input.readMessage(org.mlflow.api.proto.Service.Param.PARSER, extensionRegistry)); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tags_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + tags_.add( + input.readMessage(org.mlflow.api.proto.Service.RunTag.PARSER, extensionRegistry)); break; } default: { @@ -45221,77 +39811,42 @@ private GetMetricHistory( throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + metrics_ = java.util.Collections.unmodifiableList(metrics_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + params_ = java.util.Collections.unmodifiableList(params_); + } + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tags_ = java.util.Collections.unmodifiableList(tags_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetMetricHistory.class, org.mlflow.api.proto.Service.GetMetricHistory.Builder.class); + org.mlflow.api.proto.Service.LogBatch.class, org.mlflow.api.proto.Service.LogBatch.Builder.class); } public interface ResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:mlflow.GetMetricHistory.Response) + // @@protoc_insertion_point(interface_extends:mlflow.LogBatch.Response) com.google.protobuf.MessageOrBuilder { - - /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - java.util.List - getMetricsList(); - /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - org.mlflow.api.proto.Service.Metric getMetrics(int index); - /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - int getMetricsCount(); - /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - java.util.List - getMetricsOrBuilderList(); - /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - org.mlflow.api.proto.Service.MetricOrBuilder getMetricsOrBuilder( - int index); } /** - * Protobuf type {@code mlflow.GetMetricHistory.Response} + * Protobuf type {@code mlflow.LogBatch.Response} */ public static final class Response extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:mlflow.GetMetricHistory.Response) + // @@protoc_insertion_point(message_implements:mlflow.LogBatch.Response) ResponseOrBuilder { private static final long serialVersionUID = 0L; // Use Response.newBuilder() to construct. @@ -45299,7 +39854,6 @@ private Response(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private Response() { - metrics_ = java.util.Collections.emptyList(); } @java.lang.Override @@ -45315,7 +39869,6 @@ private Response( if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -45326,15 +39879,6 @@ private Response( case 0: done = true; break; - case 10: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - metrics_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - metrics_.add( - input.readMessage(org.mlflow.api.proto.Service.Metric.PARSER, extensionRegistry)); - break; - } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -45350,1437 +39894,2165 @@ private Response( throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - metrics_ = java.util.Collections.unmodifiableList(metrics_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_descriptor; + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_Response_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_fieldAccessorTable + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_Response_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetMetricHistory.Response.class, org.mlflow.api.proto.Service.GetMetricHistory.Response.Builder.class); + org.mlflow.api.proto.Service.LogBatch.Response.class, org.mlflow.api.proto.Service.LogBatch.Response.Builder.class); } - public static final int METRICS_FIELD_NUMBER = 1; - private java.util.List metrics_; - /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public java.util.List getMetricsList() { - return metrics_; + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; } - /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public java.util.List - getMetricsOrBuilderList() { - return metrics_; + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.mlflow.api.proto.Service.LogBatch.Response)) { + return super.equals(obj); + } + org.mlflow.api.proto.Service.LogBatch.Response other = (org.mlflow.api.proto.Service.LogBatch.Response) obj; + + boolean result = true; + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.mlflow.api.proto.Service.LogBatch.Response parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.mlflow.api.proto.Service.LogBatch.Response prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; } /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; + * Protobuf type {@code mlflow.LogBatch.Response} */ - public int getMetricsCount() { - return metrics_.size(); + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:mlflow.LogBatch.Response) + org.mlflow.api.proto.Service.LogBatch.ResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_Response_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_Response_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.mlflow.api.proto.Service.LogBatch.Response.class, org.mlflow.api.proto.Service.LogBatch.Response.Builder.class); + } + + // Construct using org.mlflow.api.proto.Service.LogBatch.Response.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_Response_descriptor; + } + + @java.lang.Override + public org.mlflow.api.proto.Service.LogBatch.Response getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.LogBatch.Response.getDefaultInstance(); + } + + @java.lang.Override + public org.mlflow.api.proto.Service.LogBatch.Response build() { + org.mlflow.api.proto.Service.LogBatch.Response result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.mlflow.api.proto.Service.LogBatch.Response buildPartial() { + org.mlflow.api.proto.Service.LogBatch.Response result = new org.mlflow.api.proto.Service.LogBatch.Response(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.mlflow.api.proto.Service.LogBatch.Response) { + return mergeFrom((org.mlflow.api.proto.Service.LogBatch.Response)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.mlflow.api.proto.Service.LogBatch.Response other) { + if (other == org.mlflow.api.proto.Service.LogBatch.Response.getDefaultInstance()) return this; + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.mlflow.api.proto.Service.LogBatch.Response parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.mlflow.api.proto.Service.LogBatch.Response) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:mlflow.LogBatch.Response) } - /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public org.mlflow.api.proto.Service.Metric getMetrics(int index) { - return metrics_.get(index); + + // @@protoc_insertion_point(class_scope:mlflow.LogBatch.Response) + private static final org.mlflow.api.proto.Service.LogBatch.Response DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.LogBatch.Response(); } - /** - *
-       * All logged values for this metric.
-       * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public org.mlflow.api.proto.Service.MetricOrBuilder getMetricsOrBuilder( - int index) { - return metrics_.get(index); + + public static org.mlflow.api.proto.Service.LogBatch.Response getDefaultInstance() { + return DEFAULT_INSTANCE; } - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + @java.lang.Deprecated public static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Response parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Response(input, extensionRegistry); + } + }; - memoizedIsInitialized = 1; - return true; + public static com.google.protobuf.Parser parser() { + return PARSER; } @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - for (int i = 0; i < metrics_.size(); i++) { - output.writeMessage(1, metrics_.get(i)); - } - unknownFields.writeTo(output); + public com.google.protobuf.Parser getParserForType() { + return PARSER; } @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; + public org.mlflow.api.proto.Service.LogBatch.Response getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } - size = 0; - for (int i = 0; i < metrics_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, metrics_.get(i)); + } + + private int bitField0_; + public static final int RUN_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object runId_; + /** + *
+     * ID of the run to log under
+     * 
+ * + * optional string run_id = 1; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+     * ID of the run to log under
+     * 
+ * + * optional string run_id = 1; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; + return s; + } + } + /** + *
+     * ID of the run to log under
+     * 
+ * + * optional string run_id = 1; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } + } - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.mlflow.api.proto.Service.GetMetricHistory.Response)) { - return super.equals(obj); - } - org.mlflow.api.proto.Service.GetMetricHistory.Response other = (org.mlflow.api.proto.Service.GetMetricHistory.Response) obj; + public static final int METRICS_FIELD_NUMBER = 2; + private java.util.List metrics_; + /** + *
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public java.util.List getMetricsList() { + return metrics_; + } + /** + *
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public java.util.List + getMetricsOrBuilderList() { + return metrics_; + } + /** + *
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public int getMetricsCount() { + return metrics_.size(); + } + /** + *
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public org.mlflow.api.proto.Service.Metric getMetrics(int index) { + return metrics_.get(index); + } + /** + *
+     * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public org.mlflow.api.proto.Service.MetricOrBuilder getMetricsOrBuilder( + int index) { + return metrics_.get(index); + } - boolean result = true; - result = result && getMetricsList() - .equals(other.getMetricsList()); - result = result && unknownFields.equals(other.unknownFields); - return result; - } + public static final int PARAMS_FIELD_NUMBER = 3; + private java.util.List params_; + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + public java.util.List getParamsList() { + return params_; + } + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + public java.util.List + getParamsOrBuilderList() { + return params_; + } + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + public int getParamsCount() { + return params_.size(); + } + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + public org.mlflow.api.proto.Service.Param getParams(int index) { + return params_.get(index); + } + /** + *
+     * Params to log. A single request can contain up to 100 params, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.Param params = 3; + */ + public org.mlflow.api.proto.Service.ParamOrBuilder getParamsOrBuilder( + int index) { + return params_.get(index); + } - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (getMetricsCount() > 0) { - hash = (37 * hash) + METRICS_FIELD_NUMBER; - hash = (53 * hash) + getMetricsList().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } + public static final int TAGS_FIELD_NUMBER = 4; + private java.util.List tags_; + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public java.util.List getTagsList() { + return tags_; + } + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public java.util.List + getTagsOrBuilderList() { + return tags_; + } + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public int getTagsCount() { + return tags_.size(); + } + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public org.mlflow.api.proto.Service.RunTag getTags(int index) { + return tags_.get(index); + } + /** + *
+     * Tags to log. A single request can contain up to 100 tags, and up to 1000
+     * metrics, params, and tags in total.
+     * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public org.mlflow.api.proto.Service.RunTagOrBuilder getTagsOrBuilder( + int index) { + return tags_.get(index); + } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runId_); } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); + for (int i = 0; i < metrics_.size(); i++) { + output.writeMessage(2, metrics_.get(i)); } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); + for (int i = 0; i < params_.size(); i++) { + output.writeMessage(3, params_.get(i)); } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); + for (int i = 0; i < tags_.size(); i++) { + output.writeMessage(4, tags_.get(i)); } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runId_); } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + for (int i = 0; i < metrics_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, metrics_.get(i)); } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + for (int i = 0; i < params_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, params_.get(i)); } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + for (int i = 0; i < tags_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tags_.get(i)); } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + if (!(obj instanceof org.mlflow.api.proto.Service.LogBatch)) { + return super.equals(obj); } - public static org.mlflow.api.proto.Service.GetMetricHistory.Response parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + org.mlflow.api.proto.Service.LogBatch other = (org.mlflow.api.proto.Service.LogBatch) obj; + + boolean result = true; + result = result && (hasRunId() == other.hasRunId()); + if (hasRunId()) { + result = result && getRunId() + .equals(other.getRunId()); } + result = result && getMetricsList() + .equals(other.getMetricsList()); + result = result && getParamsList() + .equals(other.getParamsList()); + result = result && getTagsList() + .equals(other.getTagsList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetMetricHistory.Response prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRunId()) { + hash = (37 * hash) + RUN_ID_FIELD_NUMBER; + hash = (53 * hash) + getRunId().hashCode(); } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + if (getMetricsCount() > 0) { + hash = (37 * hash) + METRICS_FIELD_NUMBER; + hash = (53 * hash) + getMetricsList().hashCode(); + } + if (getParamsCount() > 0) { + hash = (37 * hash) + PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getParamsList().hashCode(); + } + if (getTagsCount() > 0) { + hash = (37 * hash) + TAGS_FIELD_NUMBER; + hash = (53 * hash) + getTagsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.mlflow.api.proto.Service.LogBatch parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.mlflow.api.proto.Service.LogBatch parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.mlflow.api.proto.Service.LogBatch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.mlflow.api.proto.Service.LogBatch parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.mlflow.api.proto.Service.LogBatch parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.mlflow.api.proto.Service.LogBatch parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.mlflow.api.proto.Service.LogBatch parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.mlflow.api.proto.Service.LogBatch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.mlflow.api.proto.Service.LogBatch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code mlflow.LogBatch} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:mlflow.LogBatch) + org.mlflow.api.proto.Service.LogBatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_descriptor; } @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.mlflow.api.proto.Service.LogBatch.class, org.mlflow.api.proto.Service.LogBatch.Builder.class); } - /** - * Protobuf type {@code mlflow.GetMetricHistory.Response} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetMetricHistory.Response) - org.mlflow.api.proto.Service.GetMetricHistory.ResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetMetricHistory.Response.class, org.mlflow.api.proto.Service.GetMetricHistory.Response.Builder.class); - } - // Construct using org.mlflow.api.proto.Service.GetMetricHistory.Response.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } + // Construct using org.mlflow.api.proto.Service.LogBatch.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getMetricsFieldBuilder(); + getParamsFieldBuilder(); + getTagsFieldBuilder(); } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { - getMetricsFieldBuilder(); - } + } + @java.lang.Override + public Builder clear() { + super.clear(); + runId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (metricsBuilder_ == null) { + metrics_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + metricsBuilder_.clear(); } - @java.lang.Override - public Builder clear() { - super.clear(); - if (metricsBuilder_ == null) { - metrics_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - metricsBuilder_.clear(); - } - return this; + if (paramsBuilder_ == null) { + params_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + paramsBuilder_.clear(); } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_Response_descriptor; + if (tagsBuilder_ == null) { + tags_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + tagsBuilder_.clear(); } + return this; + } - @java.lang.Override - public org.mlflow.api.proto.Service.GetMetricHistory.Response getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetMetricHistory.Response.getDefaultInstance(); - } + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.mlflow.api.proto.Service.internal_static_mlflow_LogBatch_descriptor; + } - @java.lang.Override - public org.mlflow.api.proto.Service.GetMetricHistory.Response build() { - org.mlflow.api.proto.Service.GetMetricHistory.Response result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } + @java.lang.Override + public org.mlflow.api.proto.Service.LogBatch getDefaultInstanceForType() { + return org.mlflow.api.proto.Service.LogBatch.getDefaultInstance(); + } - @java.lang.Override - public org.mlflow.api.proto.Service.GetMetricHistory.Response buildPartial() { - org.mlflow.api.proto.Service.GetMetricHistory.Response result = new org.mlflow.api.proto.Service.GetMetricHistory.Response(this); - int from_bitField0_ = bitField0_; - if (metricsBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - metrics_ = java.util.Collections.unmodifiableList(metrics_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.metrics_ = metrics_; - } else { - result.metrics_ = metricsBuilder_.build(); - } - onBuilt(); - return result; + @java.lang.Override + public org.mlflow.api.proto.Service.LogBatch build() { + org.mlflow.api.proto.Service.LogBatch result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } + return result; + } - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); + @java.lang.Override + public org.mlflow.api.proto.Service.LogBatch buildPartial() { + org.mlflow.api.proto.Service.LogBatch result = new org.mlflow.api.proto.Service.LogBatch(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); + result.runId_ = runId_; + if (metricsBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + metrics_ = java.util.Collections.unmodifiableList(metrics_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.metrics_ = metrics_; + } else { + result.metrics_ = metricsBuilder_.build(); } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); + if (paramsBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + params_ = java.util.Collections.unmodifiableList(params_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.params_ = params_; + } else { + result.params_ = paramsBuilder_.build(); } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetMetricHistory.Response) { - return mergeFrom((org.mlflow.api.proto.Service.GetMetricHistory.Response)other); - } else { - super.mergeFrom(other); - return this; + if (tagsBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + tags_ = java.util.Collections.unmodifiableList(tags_); + bitField0_ = (bitField0_ & ~0x00000008); } + result.tags_ = tags_; + } else { + result.tags_ = tagsBuilder_.build(); } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } - public Builder mergeFrom(org.mlflow.api.proto.Service.GetMetricHistory.Response other) { - if (other == org.mlflow.api.proto.Service.GetMetricHistory.Response.getDefaultInstance()) return this; - if (metricsBuilder_ == null) { - if (!other.metrics_.isEmpty()) { - if (metrics_.isEmpty()) { - metrics_ = other.metrics_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureMetricsIsMutable(); - metrics_.addAll(other.metrics_); - } - onChanged(); - } - } else { - if (!other.metrics_.isEmpty()) { - if (metricsBuilder_.isEmpty()) { - metricsBuilder_.dispose(); - metricsBuilder_ = null; - metrics_ = other.metrics_; - bitField0_ = (bitField0_ & ~0x00000001); - metricsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getMetricsFieldBuilder() : null; - } else { - metricsBuilder_.addAllMessages(other.metrics_); - } - } - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + @java.lang.Override + public Builder clone() { + return (Builder) super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return (Builder) super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.mlflow.api.proto.Service.LogBatch) { + return mergeFrom((org.mlflow.api.proto.Service.LogBatch)other); + } else { + super.mergeFrom(other); return this; } + } - @java.lang.Override - public final boolean isInitialized() { - return true; + public Builder mergeFrom(org.mlflow.api.proto.Service.LogBatch other) { + if (other == org.mlflow.api.proto.Service.LogBatch.getDefaultInstance()) return this; + if (other.hasRunId()) { + bitField0_ |= 0x00000001; + runId_ = other.runId_; + onChanged(); } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.GetMetricHistory.Response parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetMetricHistory.Response) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); + if (metricsBuilder_ == null) { + if (!other.metrics_.isEmpty()) { + if (metrics_.isEmpty()) { + metrics_ = other.metrics_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureMetricsIsMutable(); + metrics_.addAll(other.metrics_); } + onChanged(); } - return this; - } - private int bitField0_; - - private java.util.List metrics_ = - java.util.Collections.emptyList(); - private void ensureMetricsIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - metrics_ = new java.util.ArrayList(metrics_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder> metricsBuilder_; - - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public java.util.List getMetricsList() { - if (metricsBuilder_ == null) { - return java.util.Collections.unmodifiableList(metrics_); - } else { - return metricsBuilder_.getMessageList(); + } else { + if (!other.metrics_.isEmpty()) { + if (metricsBuilder_.isEmpty()) { + metricsBuilder_.dispose(); + metricsBuilder_ = null; + metrics_ = other.metrics_; + bitField0_ = (bitField0_ & ~0x00000002); + metricsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getMetricsFieldBuilder() : null; + } else { + metricsBuilder_.addAllMessages(other.metrics_); + } } } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public int getMetricsCount() { - if (metricsBuilder_ == null) { - return metrics_.size(); - } else { - return metricsBuilder_.getCount(); + if (paramsBuilder_ == null) { + if (!other.params_.isEmpty()) { + if (params_.isEmpty()) { + params_ = other.params_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureParamsIsMutable(); + params_.addAll(other.params_); + } + onChanged(); } - } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public org.mlflow.api.proto.Service.Metric getMetrics(int index) { - if (metricsBuilder_ == null) { - return metrics_.get(index); - } else { - return metricsBuilder_.getMessage(index); + } else { + if (!other.params_.isEmpty()) { + if (paramsBuilder_.isEmpty()) { + paramsBuilder_.dispose(); + paramsBuilder_ = null; + params_ = other.params_; + bitField0_ = (bitField0_ & ~0x00000004); + paramsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getParamsFieldBuilder() : null; + } else { + paramsBuilder_.addAllMessages(other.params_); + } } } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public Builder setMetrics( - int index, org.mlflow.api.proto.Service.Metric value) { - if (metricsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + if (tagsBuilder_ == null) { + if (!other.tags_.isEmpty()) { + if (tags_.isEmpty()) { + tags_ = other.tags_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureTagsIsMutable(); + tags_.addAll(other.tags_); } - ensureMetricsIsMutable(); - metrics_.set(index, value); onChanged(); - } else { - metricsBuilder_.setMessage(index, value); } - return this; - } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public Builder setMetrics( - int index, org.mlflow.api.proto.Service.Metric.Builder builderForValue) { - if (metricsBuilder_ == null) { - ensureMetricsIsMutable(); - metrics_.set(index, builderForValue.build()); - onChanged(); - } else { - metricsBuilder_.setMessage(index, builderForValue.build()); + } else { + if (!other.tags_.isEmpty()) { + if (tagsBuilder_.isEmpty()) { + tagsBuilder_.dispose(); + tagsBuilder_ = null; + tags_ = other.tags_; + bitField0_ = (bitField0_ & ~0x00000008); + tagsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getTagsFieldBuilder() : null; + } else { + tagsBuilder_.addAllMessages(other.tags_); + } } - return this; } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public Builder addMetrics(org.mlflow.api.proto.Service.Metric value) { - if (metricsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureMetricsIsMutable(); - metrics_.add(value); - onChanged(); - } else { - metricsBuilder_.addMessage(value); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.mlflow.api.proto.Service.LogBatch parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.mlflow.api.proto.Service.LogBatch) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } - return this; } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public Builder addMetrics( - int index, org.mlflow.api.proto.Service.Metric value) { - if (metricsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureMetricsIsMutable(); - metrics_.add(index, value); - onChanged(); - } else { - metricsBuilder_.addMessage(index, value); + return this; + } + private int bitField0_; + + private java.lang.Object runId_ = ""; + /** + *
+       * ID of the run to log under
+       * 
+ * + * optional string run_id = 1; + */ + public boolean hasRunId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + *
+       * ID of the run to log under
+       * 
+ * + * optional string run_id = 1; + */ + public java.lang.String getRunId() { + java.lang.Object ref = runId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + runId_ = s; } - return this; + return s; + } else { + return (java.lang.String) ref; } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public Builder addMetrics( - org.mlflow.api.proto.Service.Metric.Builder builderForValue) { - if (metricsBuilder_ == null) { - ensureMetricsIsMutable(); - metrics_.add(builderForValue.build()); - onChanged(); - } else { - metricsBuilder_.addMessage(builderForValue.build()); - } - return this; + } + /** + *
+       * ID of the run to log under
+       * 
+ * + * optional string run_id = 1; + */ + public com.google.protobuf.ByteString + getRunIdBytes() { + java.lang.Object ref = runId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + runId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public Builder addMetrics( - int index, org.mlflow.api.proto.Service.Metric.Builder builderForValue) { - if (metricsBuilder_ == null) { - ensureMetricsIsMutable(); - metrics_.add(index, builderForValue.build()); - onChanged(); - } else { - metricsBuilder_.addMessage(index, builderForValue.build()); - } - return this; + } + /** + *
+       * ID of the run to log under
+       * 
+ * + * optional string run_id = 1; + */ + public Builder setRunId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + /** + *
+       * ID of the run to log under
+       * 
+ * + * optional string run_id = 1; + */ + public Builder clearRunId() { + bitField0_ = (bitField0_ & ~0x00000001); + runId_ = getDefaultInstance().getRunId(); + onChanged(); + return this; + } + /** + *
+       * ID of the run to log under
+       * 
+ * + * optional string run_id = 1; + */ + public Builder setRunIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + runId_ = value; + onChanged(); + return this; + } + + private java.util.List metrics_ = + java.util.Collections.emptyList(); + private void ensureMetricsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + metrics_ = new java.util.ArrayList(metrics_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder> metricsBuilder_; + + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public java.util.List getMetricsList() { + if (metricsBuilder_ == null) { + return java.util.Collections.unmodifiableList(metrics_); + } else { + return metricsBuilder_.getMessageList(); } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public Builder addAllMetrics( - java.lang.Iterable values) { - if (metricsBuilder_ == null) { - ensureMetricsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, metrics_); - onChanged(); - } else { - metricsBuilder_.addAllMessages(values); - } - return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public int getMetricsCount() { + if (metricsBuilder_ == null) { + return metrics_.size(); + } else { + return metricsBuilder_.getCount(); } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public Builder clearMetrics() { - if (metricsBuilder_ == null) { - metrics_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - metricsBuilder_.clear(); - } - return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public org.mlflow.api.proto.Service.Metric getMetrics(int index) { + if (metricsBuilder_ == null) { + return metrics_.get(index); + } else { + return metricsBuilder_.getMessage(index); } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public Builder removeMetrics(int index) { - if (metricsBuilder_ == null) { - ensureMetricsIsMutable(); - metrics_.remove(index); - onChanged(); - } else { - metricsBuilder_.remove(index); + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public Builder setMetrics( + int index, org.mlflow.api.proto.Service.Metric value) { + if (metricsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } - return this; + ensureMetricsIsMutable(); + metrics_.set(index, value); + onChanged(); + } else { + metricsBuilder_.setMessage(index, value); } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public org.mlflow.api.proto.Service.Metric.Builder getMetricsBuilder( - int index) { - return getMetricsFieldBuilder().getBuilder(index); + return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public Builder setMetrics( + int index, org.mlflow.api.proto.Service.Metric.Builder builderForValue) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + metrics_.set(index, builderForValue.build()); + onChanged(); + } else { + metricsBuilder_.setMessage(index, builderForValue.build()); } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public org.mlflow.api.proto.Service.MetricOrBuilder getMetricsOrBuilder( - int index) { - if (metricsBuilder_ == null) { - return metrics_.get(index); } else { - return metricsBuilder_.getMessageOrBuilder(index); + return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public Builder addMetrics(org.mlflow.api.proto.Service.Metric value) { + if (metricsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureMetricsIsMutable(); + metrics_.add(value); + onChanged(); + } else { + metricsBuilder_.addMessage(value); } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public java.util.List - getMetricsOrBuilderList() { - if (metricsBuilder_ != null) { - return metricsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(metrics_); + return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public Builder addMetrics( + int index, org.mlflow.api.proto.Service.Metric value) { + if (metricsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureMetricsIsMutable(); + metrics_.add(index, value); + onChanged(); + } else { + metricsBuilder_.addMessage(index, value); } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public org.mlflow.api.proto.Service.Metric.Builder addMetricsBuilder() { - return getMetricsFieldBuilder().addBuilder( - org.mlflow.api.proto.Service.Metric.getDefaultInstance()); + return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public Builder addMetrics( + org.mlflow.api.proto.Service.Metric.Builder builderForValue) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + metrics_.add(builderForValue.build()); + onChanged(); + } else { + metricsBuilder_.addMessage(builderForValue.build()); } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public org.mlflow.api.proto.Service.Metric.Builder addMetricsBuilder( - int index) { - return getMetricsFieldBuilder().addBuilder( - index, org.mlflow.api.proto.Service.Metric.getDefaultInstance()); + return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public Builder addMetrics( + int index, org.mlflow.api.proto.Service.Metric.Builder builderForValue) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + metrics_.add(index, builderForValue.build()); + onChanged(); + } else { + metricsBuilder_.addMessage(index, builderForValue.build()); } - /** - *
-         * All logged values for this metric.
-         * 
- * - * repeated .mlflow.Metric metrics = 1; - */ - public java.util.List - getMetricsBuilderList() { - return getMetricsFieldBuilder().getBuilderList(); + return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public Builder addAllMetrics( + java.lang.Iterable values) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, metrics_); + onChanged(); + } else { + metricsBuilder_.addAllMessages(values); } - private com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder> - getMetricsFieldBuilder() { - if (metricsBuilder_ == null) { - metricsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder>( - metrics_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - metrics_ = null; - } - return metricsBuilder_; + return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public Builder clearMetrics() { + if (metricsBuilder_ == null) { + metrics_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + metricsBuilder_.clear(); } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); + return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public Builder removeMetrics(int index) { + if (metricsBuilder_ == null) { + ensureMetricsIsMutable(); + metrics_.remove(index); + onChanged(); + } else { + metricsBuilder_.remove(index); } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); + return this; + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public org.mlflow.api.proto.Service.Metric.Builder getMetricsBuilder( + int index) { + return getMetricsFieldBuilder().getBuilder(index); + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public org.mlflow.api.proto.Service.MetricOrBuilder getMetricsOrBuilder( + int index) { + if (metricsBuilder_ == null) { + return metrics_.get(index); } else { + return metricsBuilder_.getMessageOrBuilder(index); } - - - // @@protoc_insertion_point(builder_scope:mlflow.GetMetricHistory.Response) } - - // @@protoc_insertion_point(class_scope:mlflow.GetMetricHistory.Response) - private static final org.mlflow.api.proto.Service.GetMetricHistory.Response DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetMetricHistory.Response(); + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public java.util.List + getMetricsOrBuilderList() { + if (metricsBuilder_ != null) { + return metricsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(metrics_); + } } - - public static org.mlflow.api.proto.Service.GetMetricHistory.Response getDefaultInstance() { - return DEFAULT_INSTANCE; + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public org.mlflow.api.proto.Service.Metric.Builder addMetricsBuilder() { + return getMetricsFieldBuilder().addBuilder( + org.mlflow.api.proto.Service.Metric.getDefaultInstance()); } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public Response parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Response(input, extensionRegistry); + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public org.mlflow.api.proto.Service.Metric.Builder addMetricsBuilder( + int index) { + return getMetricsFieldBuilder().addBuilder( + index, org.mlflow.api.proto.Service.Metric.getDefaultInstance()); + } + /** + *
+       * Metrics to log. A single request can contain up to 1000 metrics, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Metric metrics = 2; + */ + public java.util.List + getMetricsBuilderList() { + return getMetricsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder> + getMetricsFieldBuilder() { + if (metricsBuilder_ == null) { + metricsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Metric, org.mlflow.api.proto.Service.Metric.Builder, org.mlflow.api.proto.Service.MetricOrBuilder>( + metrics_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + metrics_ = null; } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; + return metricsBuilder_; } - @java.lang.Override - public org.mlflow.api.proto.Service.GetMetricHistory.Response getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + private java.util.List params_ = + java.util.Collections.emptyList(); + private void ensureParamsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + params_ = new java.util.ArrayList(params_); + bitField0_ |= 0x00000004; + } } - } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Param, org.mlflow.api.proto.Service.Param.Builder, org.mlflow.api.proto.Service.ParamOrBuilder> paramsBuilder_; - private int bitField0_; - public static final int RUN_UUID_FIELD_NUMBER = 1; - private volatile java.lang.Object runUuid_; - /** - *
-     * ID of the run from which to fetch metric values.
-     * 
- * - * optional string run_uuid = 1 [(.validate_required) = true]; - */ - public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - *
-     * ID of the run from which to fetch metric values.
-     * 
- * - * optional string run_uuid = 1 [(.validate_required) = true]; - */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - runUuid_ = s; + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public java.util.List getParamsList() { + if (paramsBuilder_ == null) { + return java.util.Collections.unmodifiableList(params_); + } else { + return paramsBuilder_.getMessageList(); } - return s; - } - } - /** - *
-     * ID of the run from which to fetch metric values.
-     * 
- * - * optional string run_uuid = 1 [(.validate_required) = true]; - */ - public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - runUuid_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; } - } - - public static final int METRIC_KEY_FIELD_NUMBER = 2; - private volatile java.lang.Object metricKey_; - /** - *
-     * Name of the metric.
-     * 
- * - * optional string metric_key = 2 [(.validate_required) = true]; - */ - public boolean hasMetricKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - *
-     * Name of the metric.
-     * 
- * - * optional string metric_key = 2 [(.validate_required) = true]; - */ - public java.lang.String getMetricKey() { - java.lang.Object ref = metricKey_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - metricKey_ = s; + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public int getParamsCount() { + if (paramsBuilder_ == null) { + return params_.size(); + } else { + return paramsBuilder_.getCount(); } - return s; } - } - /** - *
-     * Name of the metric.
-     * 
- * - * optional string metric_key = 2 [(.validate_required) = true]; - */ - public com.google.protobuf.ByteString - getMetricKeyBytes() { - java.lang.Object ref = metricKey_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - metricKey_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public org.mlflow.api.proto.Service.Param getParams(int index) { + if (paramsBuilder_ == null) { + return params_.get(index); + } else { + return paramsBuilder_.getMessage(index); + } } - } - - private byte memoizedIsInitialized = -1; - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, runUuid_); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public Builder setParams( + int index, org.mlflow.api.proto.Service.Param value) { + if (paramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParamsIsMutable(); + params_.set(index, value); + onChanged(); + } else { + paramsBuilder_.setMessage(index, value); + } + return this; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, metricKey_); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public Builder setParams( + int index, org.mlflow.api.proto.Service.Param.Builder builderForValue) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + params_.set(index, builderForValue.build()); + onChanged(); + } else { + paramsBuilder_.setMessage(index, builderForValue.build()); + } + return this; } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, runUuid_); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public Builder addParams(org.mlflow.api.proto.Service.Param value) { + if (paramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParamsIsMutable(); + params_.add(value); + onChanged(); + } else { + paramsBuilder_.addMessage(value); + } + return this; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, metricKey_); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public Builder addParams( + int index, org.mlflow.api.proto.Service.Param value) { + if (paramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParamsIsMutable(); + params_.add(index, value); + onChanged(); + } else { + paramsBuilder_.addMessage(index, value); + } + return this; } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public Builder addParams( + org.mlflow.api.proto.Service.Param.Builder builderForValue) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + params_.add(builderForValue.build()); + onChanged(); + } else { + paramsBuilder_.addMessage(builderForValue.build()); + } + return this; } - if (!(obj instanceof org.mlflow.api.proto.Service.GetMetricHistory)) { - return super.equals(obj); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public Builder addParams( + int index, org.mlflow.api.proto.Service.Param.Builder builderForValue) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + params_.add(index, builderForValue.build()); + onChanged(); + } else { + paramsBuilder_.addMessage(index, builderForValue.build()); + } + return this; } - org.mlflow.api.proto.Service.GetMetricHistory other = (org.mlflow.api.proto.Service.GetMetricHistory) obj; - - boolean result = true; - result = result && (hasRunUuid() == other.hasRunUuid()); - if (hasRunUuid()) { - result = result && getRunUuid() - .equals(other.getRunUuid()); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public Builder addAllParams( + java.lang.Iterable values) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, params_); + onChanged(); + } else { + paramsBuilder_.addAllMessages(values); + } + return this; } - result = result && (hasMetricKey() == other.hasMetricKey()); - if (hasMetricKey()) { - result = result && getMetricKey() - .equals(other.getMetricKey()); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public Builder clearParams() { + if (paramsBuilder_ == null) { + params_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + paramsBuilder_.clear(); + } + return this; } - result = result && unknownFields.equals(other.unknownFields); - return result; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public Builder removeParams(int index) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + params_.remove(index); + onChanged(); + } else { + paramsBuilder_.remove(index); + } + return this; } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasRunUuid()) { - hash = (37 * hash) + RUN_UUID_FIELD_NUMBER; - hash = (53 * hash) + getRunUuid().hashCode(); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public org.mlflow.api.proto.Service.Param.Builder getParamsBuilder( + int index) { + return getParamsFieldBuilder().getBuilder(index); } - if (hasMetricKey()) { - hash = (37 * hash) + METRIC_KEY_FIELD_NUMBER; - hash = (53 * hash) + getMetricKey().hashCode(); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public org.mlflow.api.proto.Service.ParamOrBuilder getParamsOrBuilder( + int index) { + if (paramsBuilder_ == null) { + return params_.get(index); } else { + return paramsBuilder_.getMessageOrBuilder(index); + } } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); - } - public static org.mlflow.api.proto.Service.GetMetricHistory parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - public static Builder newBuilder(org.mlflow.api.proto.Service.GetMetricHistory prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code mlflow.GetMetricHistory} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:mlflow.GetMetricHistory) - org.mlflow.api.proto.Service.GetMetricHistoryOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_descriptor; + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public java.util.List + getParamsOrBuilderList() { + if (paramsBuilder_ != null) { + return paramsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(params_); + } } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.mlflow.api.proto.Service.GetMetricHistory.class, org.mlflow.api.proto.Service.GetMetricHistory.Builder.class); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public org.mlflow.api.proto.Service.Param.Builder addParamsBuilder() { + return getParamsFieldBuilder().addBuilder( + org.mlflow.api.proto.Service.Param.getDefaultInstance()); } - - // Construct using org.mlflow.api.proto.Service.GetMetricHistory.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public org.mlflow.api.proto.Service.Param.Builder addParamsBuilder( + int index) { + return getParamsFieldBuilder().addBuilder( + index, org.mlflow.api.proto.Service.Param.getDefaultInstance()); } - - private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + /** + *
+       * Params to log. A single request can contain up to 100 params, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.Param params = 3; + */ + public java.util.List + getParamsBuilderList() { + return getParamsFieldBuilder().getBuilderList(); } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Param, org.mlflow.api.proto.Service.Param.Builder, org.mlflow.api.proto.Service.ParamOrBuilder> + getParamsFieldBuilder() { + if (paramsBuilder_ == null) { + paramsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.Param, org.mlflow.api.proto.Service.Param.Builder, org.mlflow.api.proto.Service.ParamOrBuilder>( + params_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + params_ = null; } - } - @java.lang.Override - public Builder clear() { - super.clear(); - runUuid_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - metricKey_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; + return paramsBuilder_; } - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.mlflow.api.proto.Service.internal_static_mlflow_GetMetricHistory_descriptor; + private java.util.List tags_ = + java.util.Collections.emptyList(); + private void ensureTagsIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + tags_ = new java.util.ArrayList(tags_); + bitField0_ |= 0x00000008; + } } - @java.lang.Override - public org.mlflow.api.proto.Service.GetMetricHistory getDefaultInstanceForType() { - return org.mlflow.api.proto.Service.GetMetricHistory.getDefaultInstance(); - } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.RunTag, org.mlflow.api.proto.Service.RunTag.Builder, org.mlflow.api.proto.Service.RunTagOrBuilder> tagsBuilder_; - @java.lang.Override - public org.mlflow.api.proto.Service.GetMetricHistory build() { - org.mlflow.api.proto.Service.GetMetricHistory result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + *
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public java.util.List getTagsList() { + if (tagsBuilder_ == null) { + return java.util.Collections.unmodifiableList(tags_); + } else { + return tagsBuilder_.getMessageList(); } - return result; } - - @java.lang.Override - public org.mlflow.api.proto.Service.GetMetricHistory buildPartial() { - org.mlflow.api.proto.Service.GetMetricHistory result = new org.mlflow.api.proto.Service.GetMetricHistory(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.runUuid_ = runUuid_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; + /** + *
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public int getTagsCount() { + if (tagsBuilder_ == null) { + return tags_.size(); + } else { + return tagsBuilder_.getCount(); } - result.metricKey_ = metricKey_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return (Builder) super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); - } - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.mlflow.api.proto.Service.GetMetricHistory) { - return mergeFrom((org.mlflow.api.proto.Service.GetMetricHistory)other); + /** + *
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public org.mlflow.api.proto.Service.RunTag getTags(int index) { + if (tagsBuilder_ == null) { + return tags_.get(index); } else { - super.mergeFrom(other); - return this; + return tagsBuilder_.getMessage(index); } } - - public Builder mergeFrom(org.mlflow.api.proto.Service.GetMetricHistory other) { - if (other == org.mlflow.api.proto.Service.GetMetricHistory.getDefaultInstance()) return this; - if (other.hasRunUuid()) { - bitField0_ |= 0x00000001; - runUuid_ = other.runUuid_; + /** + *
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public Builder setTags( + int index, org.mlflow.api.proto.Service.RunTag value) { + if (tagsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTagsIsMutable(); + tags_.set(index, value); onChanged(); + } else { + tagsBuilder_.setMessage(index, value); } - if (other.hasMetricKey()) { - bitField0_ |= 0x00000002; - metricKey_ = other.metricKey_; + return this; + } + /** + *
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public Builder setTags( + int index, org.mlflow.api.proto.Service.RunTag.Builder builderForValue) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + tags_.set(index, builderForValue.build()); onChanged(); + } else { + tagsBuilder_.setMessage(index, builderForValue.build()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); return this; } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.mlflow.api.proto.Service.GetMetricHistory parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.mlflow.api.proto.Service.GetMetricHistory) e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); + /** + *
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
+       * 
+ * + * repeated .mlflow.RunTag tags = 4; + */ + public Builder addTags(org.mlflow.api.proto.Service.RunTag value) { + if (tagsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureTagsIsMutable(); + tags_.add(value); + onChanged(); + } else { + tagsBuilder_.addMessage(value); } return this; } - private int bitField0_; - - private java.lang.Object runUuid_ = ""; /** *
-       * ID of the run from which to fetch metric values.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public boolean hasRunUuid() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public Builder addTags( + int index, org.mlflow.api.proto.Service.RunTag value) { + if (tagsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTagsIsMutable(); + tags_.add(index, value); + onChanged(); + } else { + tagsBuilder_.addMessage(index, value); + } + return this; } /** *
-       * ID of the run from which to fetch metric values.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public java.lang.String getRunUuid() { - java.lang.Object ref = runUuid_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - runUuid_ = s; - } - return s; + public Builder addTags( + org.mlflow.api.proto.Service.RunTag.Builder builderForValue) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + tags_.add(builderForValue.build()); + onChanged(); } else { - return (java.lang.String) ref; + tagsBuilder_.addMessage(builderForValue.build()); } + return this; } /** *
-       * ID of the run from which to fetch metric values.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public com.google.protobuf.ByteString - getRunUuidBytes() { - java.lang.Object ref = runUuid_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - runUuid_ = b; - return b; + public Builder addTags( + int index, org.mlflow.api.proto.Service.RunTag.Builder builderForValue) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + tags_.add(index, builderForValue.build()); + onChanged(); } else { - return (com.google.protobuf.ByteString) ref; + tagsBuilder_.addMessage(index, builderForValue.build()); } + return this; } /** *
-       * ID of the run from which to fetch metric values.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public Builder setRunUuid( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - runUuid_ = value; - onChanged(); + public Builder addAllTags( + java.lang.Iterable values) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, tags_); + onChanged(); + } else { + tagsBuilder_.addAllMessages(values); + } return this; } /** *
-       * ID of the run from which to fetch metric values.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public Builder clearRunUuid() { - bitField0_ = (bitField0_ & ~0x00000001); - runUuid_ = getDefaultInstance().getRunUuid(); - onChanged(); + public Builder clearTags() { + if (tagsBuilder_ == null) { + tags_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + tagsBuilder_.clear(); + } return this; } /** *
-       * ID of the run from which to fetch metric values.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string run_uuid = 1 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public Builder setRunUuidBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - runUuid_ = value; - onChanged(); + public Builder removeTags(int index) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + tags_.remove(index); + onChanged(); + } else { + tagsBuilder_.remove(index); + } return this; } - - private java.lang.Object metricKey_ = ""; /** *
-       * Name of the metric.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public boolean hasMetricKey() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public org.mlflow.api.proto.Service.RunTag.Builder getTagsBuilder( + int index) { + return getTagsFieldBuilder().getBuilder(index); } /** *
-       * Name of the metric.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public java.lang.String getMetricKey() { - java.lang.Object ref = metricKey_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - metricKey_ = s; - } - return s; - } else { - return (java.lang.String) ref; + public org.mlflow.api.proto.Service.RunTagOrBuilder getTagsOrBuilder( + int index) { + if (tagsBuilder_ == null) { + return tags_.get(index); } else { + return tagsBuilder_.getMessageOrBuilder(index); } } /** *
-       * Name of the metric.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public com.google.protobuf.ByteString - getMetricKeyBytes() { - java.lang.Object ref = metricKey_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - metricKey_ = b; - return b; + public java.util.List + getTagsOrBuilderList() { + if (tagsBuilder_ != null) { + return tagsBuilder_.getMessageOrBuilderList(); } else { - return (com.google.protobuf.ByteString) ref; + return java.util.Collections.unmodifiableList(tags_); } } /** *
-       * Name of the metric.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public Builder setMetricKey( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - metricKey_ = value; - onChanged(); - return this; + public org.mlflow.api.proto.Service.RunTag.Builder addTagsBuilder() { + return getTagsFieldBuilder().addBuilder( + org.mlflow.api.proto.Service.RunTag.getDefaultInstance()); } /** *
-       * Name of the metric.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public Builder clearMetricKey() { - bitField0_ = (bitField0_ & ~0x00000002); - metricKey_ = getDefaultInstance().getMetricKey(); - onChanged(); - return this; + public org.mlflow.api.proto.Service.RunTag.Builder addTagsBuilder( + int index) { + return getTagsFieldBuilder().addBuilder( + index, org.mlflow.api.proto.Service.RunTag.getDefaultInstance()); } /** *
-       * Name of the metric.
+       * Tags to log. A single request can contain up to 100 tags, and up to 1000
+       * metrics, params, and tags in total.
        * 
* - * optional string metric_key = 2 [(.validate_required) = true]; + * repeated .mlflow.RunTag tags = 4; */ - public Builder setMetricKeyBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - metricKey_ = value; - onChanged(); - return this; + public java.util.List + getTagsBuilderList() { + return getTagsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.RunTag, org.mlflow.api.proto.Service.RunTag.Builder, org.mlflow.api.proto.Service.RunTagOrBuilder> + getTagsFieldBuilder() { + if (tagsBuilder_ == null) { + tagsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.mlflow.api.proto.Service.RunTag, org.mlflow.api.proto.Service.RunTag.Builder, org.mlflow.api.proto.Service.RunTagOrBuilder>( + tags_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + tags_ = null; + } + return tagsBuilder_; } @java.lang.Override public final Builder setUnknownFields( @@ -46795,41 +42067,41 @@ public final Builder mergeUnknownFields( } - // @@protoc_insertion_point(builder_scope:mlflow.GetMetricHistory) + // @@protoc_insertion_point(builder_scope:mlflow.LogBatch) } - // @@protoc_insertion_point(class_scope:mlflow.GetMetricHistory) - private static final org.mlflow.api.proto.Service.GetMetricHistory DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:mlflow.LogBatch) + private static final org.mlflow.api.proto.Service.LogBatch DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.GetMetricHistory(); + DEFAULT_INSTANCE = new org.mlflow.api.proto.Service.LogBatch(); } - public static org.mlflow.api.proto.Service.GetMetricHistory getDefaultInstance() { + public static org.mlflow.api.proto.Service.LogBatch getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Deprecated public static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override - public GetMetricHistory parsePartialFrom( + public LogBatch parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new GetMetricHistory(input, extensionRegistry); + return new LogBatch(input, extensionRegistry); } }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override - public org.mlflow.api.proto.Service.GetMetricHistory getDefaultInstanceForType() { + public org.mlflow.api.proto.Service.LogBatch getDefaultInstanceForType() { return DEFAULT_INSTANCE; } @@ -47001,65 +42273,25 @@ public org.mlflow.api.proto.Service.GetMetricHistory getDefaultInstanceForType() com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_mlflow_SetTag_Response_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetRun_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetRun_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetRun_Response_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetRun_Response_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetMetric_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetMetric_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetMetric_Response_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetMetric_Response_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetParam_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetParam_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetParam_Response_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetParam_Response_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_SearchExpression_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_SearchExpression_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_MetricSearchExpression_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_MetricSearchExpression_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_ParameterSearchExpression_descriptor; + internal_static_mlflow_DeleteTag_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_ParameterSearchExpression_fieldAccessorTable; + internal_static_mlflow_DeleteTag_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_StringClause_descriptor; + internal_static_mlflow_DeleteTag_Response_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_StringClause_fieldAccessorTable; + internal_static_mlflow_DeleteTag_Response_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_FloatClause_descriptor; + internal_static_mlflow_GetRun_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_FloatClause_fieldAccessorTable; + internal_static_mlflow_GetRun_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_DoubleClause_descriptor; + internal_static_mlflow_GetRun_Response_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_DoubleClause_fieldAccessorTable; + internal_static_mlflow_GetRun_Response_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor internal_static_mlflow_SearchRuns_descriptor; private static final @@ -47086,25 +42318,25 @@ public org.mlflow.api.proto.Service.GetMetricHistory getDefaultInstanceForType() com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_mlflow_FileInfo_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetArtifact_descriptor; + internal_static_mlflow_GetMetricHistory_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetArtifact_fieldAccessorTable; + internal_static_mlflow_GetMetricHistory_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetArtifact_Response_descriptor; + internal_static_mlflow_GetMetricHistory_Response_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetArtifact_Response_fieldAccessorTable; + internal_static_mlflow_GetMetricHistory_Response_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetMetricHistory_descriptor; + internal_static_mlflow_LogBatch_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetMetricHistory_fieldAccessorTable; + internal_static_mlflow_LogBatch_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_mlflow_GetMetricHistory_Response_descriptor; + internal_static_mlflow_LogBatch_Response_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_mlflow_GetMetricHistory_Response_fieldAccessorTable; + internal_static_mlflow_LogBatch_Response_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -47115,188 +42347,189 @@ public org.mlflow.api.proto.Service.GetMetricHistory getDefaultInstanceForType() static { java.lang.String[] descriptorData = { "\n\rservice.proto\022\006mlflow\032\025scalapb/scalapb" + - ".proto\032\020databricks.proto\"7\n\006Metric\022\013\n\003ke" + + ".proto\032\020databricks.proto\"H\n\006Metric\022\013\n\003ke" + "y\030\001 \001(\t\022\r\n\005value\030\002 \001(\001\022\021\n\ttimestamp\030\003 \001(" + - "\003\"#\n\005Param\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t\"C" + - "\n\003Run\022\035\n\004info\030\001 \001(\0132\017.mlflow.RunInfo\022\035\n\004" + - "data\030\002 \001(\0132\017.mlflow.RunData\"g\n\007RunData\022\037" + - "\n\007metrics\030\001 \003(\0132\016.mlflow.Metric\022\035\n\006param" + - "s\030\002 \003(\0132\r.mlflow.Param\022\034\n\004tags\030\003 \003(\0132\016.m" + - "lflow.RunTag\"$\n\006RunTag\022\013\n\003key\030\001 \001(\t\022\r\n\005v" + - "alue\030\002 \001(\t\"\271\002\n\007RunInfo\022\020\n\010run_uuid\030\001 \001(\t" + - "\022\025\n\rexperiment_id\030\002 \001(\003\022\014\n\004name\030\003 \001(\t\022\'\n" + - "\013source_type\030\004 \001(\0162\022.mlflow.SourceType\022\023" + - "\n\013source_name\030\005 \001(\t\022\017\n\007user_id\030\006 \001(\t\022!\n\006" + - "status\030\007 \001(\0162\021.mlflow.RunStatus\022\022\n\nstart" + - "_time\030\010 \001(\003\022\020\n\010end_time\030\t \001(\003\022\026\n\016source_" + - "version\030\n \001(\t\022\030\n\020entry_point_name\030\013 \001(\t\022" + - "\024\n\014artifact_uri\030\r \001(\t\022\027\n\017lifecycle_stage" + - "\030\016 \001(\t\"\226\001\n\nExperiment\022\025\n\rexperiment_id\030\001" + - " \001(\003\022\014\n\004name\030\002 \001(\t\022\031\n\021artifact_location\030" + - "\003 \001(\t\022\027\n\017lifecycle_stage\030\004 \001(\t\022\030\n\020last_u" + - "pdate_time\030\005 \001(\003\022\025\n\rcreation_time\030\006 \001(\003\"" + - "\221\001\n\020CreateExperiment\022\022\n\004name\030\001 \001(\tB\004\210\265\030\001" + - "\022\031\n\021artifact_location\030\002 \001(\t\032!\n\010Response\022" + - "\025\n\rexperiment_id\030\001 \001(\003:+\342?(\n&com.databri" + - "cks.rpc.RPC[$this.Response]\"\230\001\n\017ListExpe" + - "riments\022#\n\tview_type\030\001 \001(\0162\020.mlflow.View" + - "Type\0323\n\010Response\022\'\n\013experiments\030\001 \003(\0132\022." + - "mlflow.Experiment:+\342?(\n&com.databricks.r" + - "pc.RPC[$this.Response]\"\254\001\n\rGetExperiment" + - "\022\033\n\rexperiment_id\030\001 \001(\003B\004\210\265\030\001\032Q\n\010Respons" + - "e\022&\n\nexperiment\030\001 \001(\0132\022.mlflow.Experimen" + - "t\022\035\n\004runs\030\002 \003(\0132\017.mlflow.RunInfo:+\342?(\n&c" + - "om.databricks.rpc.RPC[$this.Response]\"h\n" + - "\020DeleteExperiment\022\033\n\rexperiment_id\030\001 \001(\003" + - "B\004\210\265\030\001\032\n\n\010Response:+\342?(\n&com.databricks." + - "rpc.RPC[$this.Response]\"i\n\021RestoreExperi" + - "ment\022\033\n\rexperiment_id\030\001 \001(\003B\004\210\265\030\001\032\n\n\010Res" + + "\003\022\017\n\004step\030\004 \001(\003:\0010\"#\n\005Param\022\013\n\003key\030\001 \001(\t" + + "\022\r\n\005value\030\002 \001(\t\"C\n\003Run\022\035\n\004info\030\001 \001(\0132\017.m" + + "lflow.RunInfo\022\035\n\004data\030\002 \001(\0132\017.mlflow.Run" + + "Data\"g\n\007RunData\022\037\n\007metrics\030\001 \003(\0132\016.mlflo" + + "w.Metric\022\035\n\006params\030\002 \003(\0132\r.mlflow.Param\022" + + "\034\n\004tags\030\003 \003(\0132\016.mlflow.RunTag\"$\n\006RunTag\022" + + "\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t\"\313\001\n\007RunInfo\022" + + "\016\n\006run_id\030\017 \001(\t\022\020\n\010run_uuid\030\001 \001(\t\022\025\n\rexp" + + "eriment_id\030\002 \001(\t\022\017\n\007user_id\030\006 \001(\t\022!\n\006sta" + + "tus\030\007 \001(\0162\021.mlflow.RunStatus\022\022\n\nstart_ti" + + "me\030\010 \001(\003\022\020\n\010end_time\030\t \001(\003\022\024\n\014artifact_u" + + "ri\030\r \001(\t\022\027\n\017lifecycle_stage\030\016 \001(\t\"\226\001\n\nEx" + + "periment\022\025\n\rexperiment_id\030\001 \001(\t\022\014\n\004name\030" + + "\002 \001(\t\022\031\n\021artifact_location\030\003 \001(\t\022\027\n\017life" + + "cycle_stage\030\004 \001(\t\022\030\n\020last_update_time\030\005 " + + "\001(\003\022\025\n\rcreation_time\030\006 \001(\003\"\221\001\n\020CreateExp" + + "eriment\022\022\n\004name\030\001 \001(\tB\004\370\206\031\001\022\031\n\021artifact_" + + "location\030\002 \001(\t\032!\n\010Response\022\025\n\rexperiment" + + "_id\030\001 \001(\t:+\342?(\n&com.databricks.rpc.RPC[$" + + "this.Response]\"\230\001\n\017ListExperiments\022#\n\tvi" + + "ew_type\030\001 \001(\0162\020.mlflow.ViewType\0323\n\010Respo" + + "nse\022\'\n\013experiments\030\001 \003(\0132\022.mlflow.Experi" + + "ment:+\342?(\n&com.databricks.rpc.RPC[$this." + + "Response]\"\254\001\n\rGetExperiment\022\033\n\rexperimen" + + "t_id\030\001 \001(\tB\004\370\206\031\001\032Q\n\010Response\022&\n\nexperime" + + "nt\030\001 \001(\0132\022.mlflow.Experiment\022\035\n\004runs\030\002 \003" + + "(\0132\017.mlflow.RunInfo:+\342?(\n&com.databricks" + + ".rpc.RPC[$this.Response]\"h\n\020DeleteExperi" + + "ment\022\033\n\rexperiment_id\030\001 \001(\tB\004\370\206\031\001\032\n\n\010Res" + "ponse:+\342?(\n&com.databricks.rpc.RPC[$this" + - ".Response]\"z\n\020UpdateExperiment\022\033\n\rexperi" + - "ment_id\030\001 \001(\003B\004\210\265\030\001\022\020\n\010new_name\030\002 \001(\t\032\n\n" + - "\010Response:+\342?(\n&com.databricks.rpc.RPC[$" + - "this.Response]\"\321\002\n\tCreateRun\022\025\n\rexperime" + - "nt_id\030\001 \001(\003\022\017\n\007user_id\030\002 \001(\t\022\020\n\010run_name" + - "\030\003 \001(\t\022\'\n\013source_type\030\004 \001(\0162\022.mlflow.Sou" + - "rceType\022\023\n\013source_name\030\005 \001(\t\022\030\n\020entry_po" + - "int_name\030\006 \001(\t\022\022\n\nstart_time\030\007 \001(\003\022\026\n\016so" + - "urce_version\030\010 \001(\t\022\034\n\004tags\030\t \003(\0132\016.mlflo" + - "w.RunTag\022\025\n\rparent_run_id\030\n \001(\t\032$\n\010Respo" + - "nse\022\030\n\003run\030\001 \001(\0132\013.mlflow.Run:+\342?(\n&com." + - "databricks.rpc.RPC[$this.Response]\"\264\001\n\tU" + - "pdateRun\022\026\n\010run_uuid\030\001 \001(\tB\004\210\265\030\001\022!\n\006stat" + - "us\030\002 \001(\0162\021.mlflow.RunStatus\022\020\n\010end_time\030" + - "\003 \001(\003\032-\n\010Response\022!\n\010run_info\030\001 \001(\0132\017.ml" + - "flow.RunInfo:+\342?(\n&com.databricks.rpc.RP" + - "C[$this.Response]\"Z\n\tDeleteRun\022\024\n\006run_id" + - "\030\001 \001(\tB\004\210\265\030\001\032\n\n\010Response:+\342?(\n&com.datab" + - "ricks.rpc.RPC[$this.Response]\"[\n\nRestore" + - "Run\022\024\n\006run_id\030\001 \001(\tB\004\210\265\030\001\032\n\n\010Response:+\342" + - "?(\n&com.databricks.rpc.RPC[$this.Respons" + - "e]\"\235\001\n\tLogMetric\022\026\n\010run_uuid\030\001 \001(\tB\004\210\265\030\001" + - "\022\021\n\003key\030\002 \001(\tB\004\210\265\030\001\022\023\n\005value\030\003 \001(\001B\004\210\265\030\001" + - "\022\027\n\ttimestamp\030\004 \001(\003B\004\210\265\030\001\032\n\n\010Response:+\342" + - "?(\n&com.databricks.rpc.RPC[$this.Respons" + - "e]\"\203\001\n\010LogParam\022\026\n\010run_uuid\030\001 \001(\tB\004\210\265\030\001\022" + - "\021\n\003key\030\002 \001(\tB\004\210\265\030\001\022\023\n\005value\030\003 \001(\tB\004\210\265\030\001\032" + - "\n\n\010Response:+\342?(\n&com.databricks.rpc.RPC" + - "[$this.Response]\"\201\001\n\006SetTag\022\026\n\010run_uuid\030" + - "\001 \001(\tB\004\210\265\030\001\022\021\n\003key\030\002 \001(\tB\004\210\265\030\001\022\023\n\005value\030" + - "\003 \001(\tB\004\210\265\030\001\032\n\n\010Response:+\342?(\n&com.databr" + - "icks.rpc.RPC[$this.Response]\"s\n\006GetRun\022\026" + - "\n\010run_uuid\030\001 \001(\tB\004\210\265\030\001\032$\n\010Response\022\030\n\003ru" + - "n\030\001 \001(\0132\013.mlflow.Run:+\342?(\n&com.databrick" + - "s.rpc.RPC[$this.Response]\"\226\001\n\tGetMetric\022" + - "\026\n\010run_uuid\030\001 \001(\tB\004\210\265\030\001\022\030\n\nmetric_key\030\002 " + - "\001(\tB\004\210\265\030\001\032*\n\010Response\022\036\n\006metric\030\001 \001(\0132\016." + - "mlflow.Metric:+\342?(\n&com.databricks.rpc.R" + - "PC[$this.Response]\"\227\001\n\010GetParam\022\026\n\010run_u" + - "uid\030\001 \001(\tB\004\210\265\030\001\022\030\n\nparam_name\030\002 \001(\tB\004\210\265\030" + - "\001\032,\n\010Response\022 \n\tparameter\030\001 \001(\0132\r.mlflo" + - "w.Param:+\342?(\n&com.databricks.rpc.RPC[$th" + - "is.Response]\"\212\001\n\020SearchExpression\0220\n\006met" + - "ric\030\001 \001(\0132\036.mlflow.MetricSearchExpressio" + - "nH\000\0226\n\tparameter\030\002 \001(\0132!.mlflow.Paramete" + - "rSearchExpressionH\000B\014\n\nexpression\"}\n\026Met" + - "ricSearchExpression\022\013\n\003key\030\001 \001(\t\022$\n\005floa" + - "t\030\002 \001(\0132\023.mlflow.FloatClauseH\000\022&\n\006double" + - "\030\003 \001(\0132\024.mlflow.DoubleClauseH\000B\010\n\006clause" + - "\"Z\n\031ParameterSearchExpression\022\013\n\003key\030\001 \001" + - "(\t\022&\n\006string\030\002 \001(\0132\024.mlflow.StringClause" + - "H\000B\010\n\006clause\"1\n\014StringClause\022\022\n\ncomparat" + - "or\030\001 \001(\t\022\r\n\005value\030\002 \001(\t\"0\n\013FloatClause\022\022" + - "\n\ncomparator\030\001 \001(\t\022\r\n\005value\030\002 \001(\002\"1\n\014Dou" + - "bleClause\022\022\n\ncomparator\030\001 \001(\t\022\r\n\005value\030\002" + - " \001(\001\"\343\001\n\nSearchRuns\022\026\n\016experiment_ids\030\001 " + - "\003(\003\0223\n\021anded_expressions\030\002 \003(\0132\030.mlflow." + - "SearchExpression\0224\n\rrun_view_type\030\003 \001(\0162" + - "\020.mlflow.ViewType:\013ACTIVE_ONLY\032%\n\010Respon" + - "se\022\031\n\004runs\030\001 \003(\0132\013.mlflow.Run:+\342?(\n&com." + - "databricks.rpc.RPC[$this.Response]\"\233\001\n\rL" + - "istArtifacts\022\020\n\010run_uuid\030\001 \001(\t\022\014\n\004path\030\002" + - " \001(\t\032=\n\010Response\022\020\n\010root_uri\030\001 \001(\t\022\037\n\005fi" + - "les\030\002 \003(\0132\020.mlflow.FileInfo:+\342?(\n&com.da" + - "tabricks.rpc.RPC[$this.Response]\";\n\010File" + - "Info\022\014\n\004path\030\001 \001(\t\022\016\n\006is_dir\030\002 \001(\010\022\021\n\tfi" + - "le_size\030\003 \001(\003\"f\n\013GetArtifact\022\020\n\010run_uuid" + - "\030\001 \001(\t\022\014\n\004path\030\002 \001(\t\032\n\n\010Response:+\342?(\n&c" + - "om.databricks.rpc.RPC[$this.Response]\"\236\001" + - "\n\020GetMetricHistory\022\026\n\010run_uuid\030\001 \001(\tB\004\210\265" + - "\030\001\022\030\n\nmetric_key\030\002 \001(\tB\004\210\265\030\001\032+\n\010Response" + - "\022\037\n\007metrics\030\001 \003(\0132\016.mlflow.Metric:+\342?(\n&" + - "com.databricks.rpc.RPC[$this.Response]*6" + - "\n\010ViewType\022\017\n\013ACTIVE_ONLY\020\001\022\020\n\014DELETED_O" + - "NLY\020\002\022\007\n\003ALL\020\003*I\n\nSourceType\022\014\n\010NOTEBOOK" + - "\020\001\022\007\n\003JOB\020\002\022\013\n\007PROJECT\020\003\022\t\n\005LOCAL\020\004\022\014\n\007U" + - "NKNOWN\020\350\007*M\n\tRunStatus\022\013\n\007RUNNING\020\001\022\r\n\tS" + - "CHEDULED\020\002\022\014\n\010FINISHED\020\003\022\n\n\006FAILED\020\004\022\n\n\006" + - "KILLED\020\0052\256\024\n\rMlflowService\022\234\001\n\020createExp" + - "eriment\022\030.mlflow.CreateExperiment\032!.mlfl" + - "ow.CreateExperiment.Response\"K\202\265\030G\n0\n\004PO" + - "ST\022\"/preview/mlflow/experiments/create\032\004" + - "\010\002\020\000\020\001*\021Create Experiment\022\225\001\n\017listExperi" + - "ments\022\027.mlflow.ListExperiments\032 .mlflow." + - "ListExperiments.Response\"G\202\265\030C\n-\n\003GET\022 /" + - "preview/mlflow/experiments/list\032\004\010\002\020\000\020\001*" + - "\020List Experiments\022\214\001\n\rgetExperiment\022\025.ml" + - "flow.GetExperiment\032\036.mlflow.GetExperimen" + - "t.Response\"D\202\265\030@\n,\n\003GET\022\037/preview/mlflow" + - "/experiments/get\032\004\010\002\020\000\020\001*\016Get Experiment" + - "\022\234\001\n\020deleteExperiment\022\030.mlflow.DeleteExp" + - "eriment\032!.mlflow.DeleteExperiment.Respon" + - "se\"K\202\265\030G\n0\n\004POST\022\"/preview/mlflow/experi" + - "ments/delete\032\004\010\002\020\000\020\001*\021Delete Experiment\022" + - "\241\001\n\021restoreExperiment\022\031.mlflow.RestoreEx" + - "periment\032\".mlflow.RestoreExperiment.Resp" + - "onse\"M\202\265\030I\n1\n\004POST\022#/preview/mlflow/expe" + - "riments/restore\032\004\010\002\020\000\020\001*\022Restore Experim" + - "ent\022\234\001\n\020updateExperiment\022\030.mlflow.Update" + - "Experiment\032!.mlflow.UpdateExperiment.Res" + - "ponse\"K\202\265\030G\n0\n\004POST\022\"/preview/mlflow/exp" + - "eriments/update\032\004\010\002\020\000\020\001*\021Update Experime" + - "nt\022y\n\tcreateRun\022\021.mlflow.CreateRun\032\032.mlf" + - "low.CreateRun.Response\"=\202\265\0309\n)\n\004POST\022\033/p" + - "review/mlflow/runs/create\032\004\010\002\020\000\020\001*\nCreat" + - "e Run\022y\n\tupdateRun\022\021.mlflow.UpdateRun\032\032." + - "mlflow.UpdateRun.Response\"=\202\265\0309\n)\n\004POST\022" + - "\033/preview/mlflow/runs/update\032\004\010\002\020\000\020\001*\nUp" + - "date Run\022m\n\tdeleteRun\022\021.mlflow.DeleteRun" + - "\032\032.mlflow.DeleteRun.Response\"1\202\265\030-\n)\n\004PO" + - "ST\022\033/preview/mlflow/runs/delete\032\004\010\002\020\000\020\001\022" + - "q\n\nrestoreRun\022\022.mlflow.RestoreRun\032\033.mlfl" + - "ow.RestoreRun.Response\"2\202\265\030.\n*\n\004POST\022\034/p" + - "review/mlflow/runs/restore\032\004\010\002\020\000\020\001\022}\n\tlo" + - "gMetric\022\021.mlflow.LogMetric\032\032.mlflow.LogM" + - "etric.Response\"A\202\265\030=\n-\n\004POST\022\037/preview/m" + - "lflow/runs/log-metric\032\004\010\002\020\000\020\001*\nLog Metri" + - "c\022|\n\010logParam\022\020.mlflow.LogParam\032\031.mlflow" + - ".LogParam.Response\"C\202\265\030?\n0\n\004POST\022\"/previ" + - "ew/mlflow/runs/log-parameter\032\004\010\002\020\000\020\001*\tLo" + - "g Param\022n\n\006setTag\022\016.mlflow.SetTag\032\027.mlfl" + - "ow.SetTag.Response\";\202\265\0307\n*\n\004POST\022\034/previ" + - "ew/mlflow/runs/set-tag\032\004\010\002\020\000\020\001*\007Set Tag\022" + - "i\n\006getRun\022\016.mlflow.GetRun\032\027.mlflow.GetRu" + - "n.Response\"6\202\265\0302\n%\n\003GET\022\030/preview/mlflow" + - "/runs/get\032\004\010\002\020\000\020\001*\007Get Run\022x\n\tgetMetric\022" + - "\021.mlflow.GetMetric\032\032.mlflow.GetMetric.Re" + - "sponse\"<\202\265\0308\n(\n\003GET\022\033/preview/mlflow/met" + - "rics/get\032\004\010\002\020\000\020\001*\nGet Metric\022s\n\010getParam" + - "\022\020.mlflow.GetParam\032\031.mlflow.GetParam.Res" + - "ponse\":\202\265\0306\n\'\n\003GET\022\032/preview/mlflow/para" + - "ms/get\032\004\010\002\020\000\020\001*\tGet Param\022\247\001\n\nsearchRuns" + - "\022\022.mlflow.SearchRuns\032\033.mlflow.SearchRuns" + - ".Response\"h\202\265\030d\n)\n\004POST\022\033/preview/mlflow" + - "/runs/search\032\004\010\002\020\000\n(\n\003GET\022\033/preview/mlfl" + - "ow/runs/search\032\004\010\002\020\000\020\001*\013Search Runs\022\213\001\n\r" + - "listArtifacts\022\025.mlflow.ListArtifacts\032\036.m" + - "lflow.ListArtifacts.Response\"C\202\265\030?\n+\n\003GE" + - "T\022\036/preview/mlflow/artifacts/list\032\004\010\002\020\000\020" + - "\001*\016List Artifacts\022\235\001\n\020getMetricHistory\022\030" + - ".mlflow.GetMetricHistory\032!.mlflow.GetMet" + - "ricHistory.Response\"L\202\265\030H\n0\n\003GET\022#/previ" + + ".Response]\"i\n\021RestoreExperiment\022\033\n\rexper" + + "iment_id\030\001 \001(\tB\004\370\206\031\001\032\n\n\010Response:+\342?(\n&c" + + "om.databricks.rpc.RPC[$this.Response]\"z\n" + + "\020UpdateExperiment\022\033\n\rexperiment_id\030\001 \001(\t" + + "B\004\370\206\031\001\022\020\n\010new_name\030\002 \001(\t\032\n\n\010Response:+\342?" + + "(\n&com.databricks.rpc.RPC[$this.Response" + + "]\"\270\001\n\tCreateRun\022\025\n\rexperiment_id\030\001 \001(\t\022\017" + + "\n\007user_id\030\002 \001(\t\022\022\n\nstart_time\030\007 \001(\003\022\034\n\004t" + + "ags\030\t \003(\0132\016.mlflow.RunTag\032$\n\010Response\022\030\n" + + "\003run\030\001 \001(\0132\013.mlflow.Run:+\342?(\n&com.databr" + + "icks.rpc.RPC[$this.Response]\"\276\001\n\tUpdateR" + + "un\022\016\n\006run_id\030\004 \001(\t\022\020\n\010run_uuid\030\001 \001(\t\022!\n\006" + + "status\030\002 \001(\0162\021.mlflow.RunStatus\022\020\n\010end_t" + + "ime\030\003 \001(\003\032-\n\010Response\022!\n\010run_info\030\001 \001(\0132" + + "\017.mlflow.RunInfo:+\342?(\n&com.databricks.rp" + + "c.RPC[$this.Response]\"Z\n\tDeleteRun\022\024\n\006ru" + + "n_id\030\001 \001(\tB\004\370\206\031\001\032\n\n\010Response:+\342?(\n&com.d" + + "atabricks.rpc.RPC[$this.Response]\"[\n\nRes" + + "toreRun\022\024\n\006run_id\030\001 \001(\tB\004\370\206\031\001\032\n\n\010Respons" + + "e:+\342?(\n&com.databricks.rpc.RPC[$this.Res" + + "ponse]\"\270\001\n\tLogMetric\022\016\n\006run_id\030\006 \001(\t\022\020\n\010" + + "run_uuid\030\001 \001(\t\022\021\n\003key\030\002 \001(\tB\004\370\206\031\001\022\023\n\005val" + + "ue\030\003 \001(\001B\004\370\206\031\001\022\027\n\ttimestamp\030\004 \001(\003B\004\370\206\031\001\022" + + "\017\n\004step\030\005 \001(\003:\0010\032\n\n\010Response:+\342?(\n&com.d" + + "atabricks.rpc.RPC[$this.Response]\"\215\001\n\010Lo" + + "gParam\022\016\n\006run_id\030\004 \001(\t\022\020\n\010run_uuid\030\001 \001(\t" + + "\022\021\n\003key\030\002 \001(\tB\004\370\206\031\001\022\023\n\005value\030\003 \001(\tB\004\370\206\031\001" + + "\032\n\n\010Response:+\342?(\n&com.databricks.rpc.RP" + + "C[$this.Response]\"\213\001\n\006SetTag\022\016\n\006run_id\030\004" + + " \001(\t\022\020\n\010run_uuid\030\001 \001(\t\022\021\n\003key\030\002 \001(\tB\004\370\206\031" + + "\001\022\023\n\005value\030\003 \001(\tB\004\370\206\031\001\032\n\n\010Response:+\342?(\n" + + "&com.databricks.rpc.RPC[$this.Response]\"" + + "m\n\tDeleteTag\022\024\n\006run_id\030\001 \001(\tB\004\370\206\031\001\022\021\n\003ke" + + "y\030\002 \001(\tB\004\370\206\031\001\032\n\n\010Response:+\342?(\n&com.data" + + "bricks.rpc.RPC[$this.Response]\"}\n\006GetRun" + + "\022\016\n\006run_id\030\002 \001(\t\022\020\n\010run_uuid\030\001 \001(\t\032$\n\010Re" + + "sponse\022\030\n\003run\030\001 \001(\0132\013.mlflow.Run:+\342?(\n&c" + + "om.databricks.rpc.RPC[$this.Response]\"\230\002" + + "\n\nSearchRuns\022\026\n\016experiment_ids\030\001 \003(\t\022\016\n\006" + + "filter\030\004 \001(\t\0224\n\rrun_view_type\030\003 \001(\0162\020.ml" + + "flow.ViewType:\013ACTIVE_ONLY\022\031\n\013max_result" + + "s\030\005 \001(\005:\0041000\022\020\n\010order_by\030\006 \003(\t\022\022\n\npage_" + + "token\030\007 \001(\t\032>\n\010Response\022\031\n\004runs\030\001 \003(\0132\013." + + "mlflow.Run\022\027\n\017next_page_token\030\002 \001(\t:+\342?(" + + "\n&com.databricks.rpc.RPC[$this.Response]" + + "\"\253\001\n\rListArtifacts\022\016\n\006run_id\030\003 \001(\t\022\020\n\010ru" + + "n_uuid\030\001 \001(\t\022\014\n\004path\030\002 \001(\t\032=\n\010Response\022\020" + + "\n\010root_uri\030\001 \001(\t\022\037\n\005files\030\002 \003(\0132\020.mlflow" + + ".FileInfo:+\342?(\n&com.databricks.rpc.RPC[$" + + "this.Response]\";\n\010FileInfo\022\014\n\004path\030\001 \001(\t" + + "\022\016\n\006is_dir\030\002 \001(\010\022\021\n\tfile_size\030\003 \001(\003\"\250\001\n\020" + + "GetMetricHistory\022\016\n\006run_id\030\003 \001(\t\022\020\n\010run_" + + "uuid\030\001 \001(\t\022\030\n\nmetric_key\030\002 \001(\tB\004\370\206\031\001\032+\n\010" + + "Response\022\037\n\007metrics\030\001 \003(\0132\016.mlflow.Metri" + + "c:+\342?(\n&com.databricks.rpc.RPC[$this.Res" + + "ponse]\"\261\001\n\010LogBatch\022\016\n\006run_id\030\001 \001(\t\022\037\n\007m" + + "etrics\030\002 \003(\0132\016.mlflow.Metric\022\035\n\006params\030\003" + + " \003(\0132\r.mlflow.Param\022\034\n\004tags\030\004 \003(\0132\016.mlfl" + + "ow.RunTag\032\n\n\010Response:+\342?(\n&com.databric" + + "ks.rpc.RPC[$this.Response]*6\n\010ViewType\022\017" + + "\n\013ACTIVE_ONLY\020\001\022\020\n\014DELETED_ONLY\020\002\022\007\n\003ALL" + + "\020\003*I\n\nSourceType\022\014\n\010NOTEBOOK\020\001\022\007\n\003JOB\020\002\022" + + "\013\n\007PROJECT\020\003\022\t\n\005LOCAL\020\004\022\014\n\007UNKNOWN\020\350\007*M\n" + + "\tRunStatus\022\013\n\007RUNNING\020\001\022\r\n\tSCHEDULED\020\002\022\014" + + "\n\010FINISHED\020\003\022\n\n\006FAILED\020\004\022\n\n\006KILLED\020\0052\263\032\n" + + "\rMlflowService\022\306\001\n\020createExperiment\022\030.ml" + + "flow.CreateExperiment\032!.mlflow.CreateExp" + + "eriment.Response\"u\362\206\031q\n(\n\004POST\022\032/mlflow/" + + "experiments/create\032\004\010\002\020\000\n0\n\004POST\022\"/previ" + + "ew/mlflow/experiments/create\032\004\010\002\020\000\020\001*\021Cr" + + "eate Experiment\022\274\001\n\017listExperiments\022\027.ml" + + "flow.ListExperiments\032 .mlflow.ListExperi" + + "ments.Response\"n\362\206\031j\n%\n\003GET\022\030/mlflow/exp" + + "eriments/list\032\004\010\002\020\000\n-\n\003GET\022 /preview/mlf" + + "low/experiments/list\032\004\010\002\020\000\020\001*\020List Exper" + + "iments\022\262\001\n\rgetExperiment\022\025.mlflow.GetExp" + + "eriment\032\036.mlflow.GetExperiment.Response\"" + + "j\362\206\031f\n$\n\003GET\022\027/mlflow/experiments/get\032\004\010" + + "\002\020\000\n,\n\003GET\022\037/preview/mlflow/experiments/" + + "get\032\004\010\002\020\000\020\001*\016Get Experiment\022\306\001\n\020deleteEx" + + "periment\022\030.mlflow.DeleteExperiment\032!.mlf" + + "low.DeleteExperiment.Response\"u\362\206\031q\n(\n\004P" + + "OST\022\032/mlflow/experiments/delete\032\004\010\002\020\000\n0\n" + + "\004POST\022\"/preview/mlflow/experiments/delet" + + "e\032\004\010\002\020\000\020\001*\021Delete Experiment\022\314\001\n\021restore" + + "Experiment\022\031.mlflow.RestoreExperiment\032\"." + + "mlflow.RestoreExperiment.Response\"x\362\206\031t\n" + + ")\n\004POST\022\033/mlflow/experiments/restore\032\004\010\002" + + "\020\000\n1\n\004POST\022#/preview/mlflow/experiments/" + + "restore\032\004\010\002\020\000\020\001*\022Restore Experiment\022\306\001\n\020" + + "updateExperiment\022\030.mlflow.UpdateExperime" + + "nt\032!.mlflow.UpdateExperiment.Response\"u\362" + + "\206\031q\n(\n\004POST\022\032/mlflow/experiments/update\032" + + "\004\010\002\020\000\n0\n\004POST\022\"/preview/mlflow/experimen" + + "ts/update\032\004\010\002\020\000\020\001*\021Update Experiment\022\234\001\n" + + "\tcreateRun\022\021.mlflow.CreateRun\032\032.mlflow.C" + + "reateRun.Response\"`\362\206\031\\\n!\n\004POST\022\023/mlflow" + + "/runs/create\032\004\010\002\020\000\n)\n\004POST\022\033/preview/mlf" + + "low/runs/create\032\004\010\002\020\000\020\001*\nCreate Run\022\234\001\n\t" + + "updateRun\022\021.mlflow.UpdateRun\032\032.mlflow.Up" + + "dateRun.Response\"`\362\206\031\\\n!\n\004POST\022\023/mlflow/" + + "runs/update\032\004\010\002\020\000\n)\n\004POST\022\033/preview/mlfl" + + "ow/runs/update\032\004\010\002\020\000\020\001*\nUpdate Run\022\234\001\n\td" + + "eleteRun\022\021.mlflow.DeleteRun\032\032.mlflow.Del" + + "eteRun.Response\"`\362\206\031\\\n!\n\004POST\022\023/mlflow/r" + + "uns/delete\032\004\010\002\020\000\n)\n\004POST\022\033/preview/mlflo" + + "w/runs/delete\032\004\010\002\020\000\020\001*\nDelete Run\022\242\001\n\nre" + + "storeRun\022\022.mlflow.RestoreRun\032\033.mlflow.Re" + + "storeRun.Response\"c\362\206\031_\n\"\n\004POST\022\024/mlflow" + + "/runs/restore\032\004\010\002\020\000\n*\n\004POST\022\034/preview/ml" + + "flow/runs/restore\032\004\010\002\020\000\020\001*\013Restore Run\022\244" + + "\001\n\tlogMetric\022\021.mlflow.LogMetric\032\032.mlflow" + + ".LogMetric.Response\"h\362\206\031d\n%\n\004POST\022\027/mlfl" + + "ow/runs/log-metric\032\004\010\002\020\000\n-\n\004POST\022\037/previ" + + "ew/mlflow/runs/log-metric\032\004\010\002\020\000\020\001*\nLog M" + + "etric\022\246\001\n\010logParam\022\020.mlflow.LogParam\032\031.m" + + "lflow.LogParam.Response\"m\362\206\031i\n(\n\004POST\022\032/" + + "mlflow/runs/log-parameter\032\004\010\002\020\000\n0\n\004POST\022" + + "\"/preview/mlflow/runs/log-parameter\032\004\010\002\020" + + "\000\020\001*\tLog Param\022\222\001\n\006setTag\022\016.mlflow.SetTa" + + "g\032\027.mlflow.SetTag.Response\"_\362\206\031[\n\"\n\004POST" + + "\022\024/mlflow/runs/set-tag\032\004\010\002\020\000\n*\n\004POST\022\034/p" + + "review/mlflow/runs/set-tag\032\004\010\002\020\000\020\001*\007Set " + + "Tag\022\244\001\n\tdeleteTag\022\021.mlflow.DeleteTag\032\032.m" + + "lflow.DeleteTag.Response\"h\362\206\031d\n%\n\004POST\022\027" + + "/mlflow/runs/delete-tag\032\004\010\002\020\000\n-\n\004POST\022\037/" + + "preview/mlflow/runs/delete-tag\032\004\010\002\020\000\020\001*\n" + + "Delete Tag\022\210\001\n\006getRun\022\016.mlflow.GetRun\032\027." + + "mlflow.GetRun.Response\"U\362\206\031Q\n\035\n\003GET\022\020/ml" + + "flow/runs/get\032\004\010\002\020\000\n%\n\003GET\022\030/preview/mlf" + + "low/runs/get\032\004\010\002\020\000\020\001*\007Get Run\022\314\001\n\nsearch" + + "Runs\022\022.mlflow.SearchRuns\032\033.mlflow.Search" + + "Runs.Response\"\214\001\362\206\031\207\001\n!\n\004POST\022\023/mlflow/r" + + "uns/search\032\004\010\002\020\000\n)\n\004POST\022\033/preview/mlflo" + + "w/runs/search\032\004\010\002\020\000\n(\n\003GET\022\033/preview/mlf" + + "low/runs/search\032\004\010\002\020\000\020\001*\013Search Runs\022\260\001\n" + + "\rlistArtifacts\022\025.mlflow.ListArtifacts\032\036." + + "mlflow.ListArtifacts.Response\"h\362\206\031d\n#\n\003G" + + "ET\022\026/mlflow/artifacts/list\032\004\010\002\020\000\n+\n\003GET\022" + + "\036/preview/mlflow/artifacts/list\032\004\010\002\020\000\020\001*" + + "\016List Artifacts\022\307\001\n\020getMetricHistory\022\030.m" + + "lflow.GetMetricHistory\032!.mlflow.GetMetri" + + "cHistory.Response\"v\362\206\031r\n(\n\003GET\022\033/mlflow/" + + "metrics/get-history\032\004\010\002\020\000\n0\n\003GET\022#/previ" + "ew/mlflow/metrics/get-history\032\004\010\002\020\000\020\001*\022G" + - "et Metric HistoryB\036\n\024org.mlflow.api.prot" + - "o\220\001\001\342?\002\020\001" + "et Metric History\022\236\001\n\010logBatch\022\020.mlflow." + + "LogBatch\032\031.mlflow.LogBatch.Response\"e\362\206\031" + + "a\n$\n\004POST\022\026/mlflow/runs/log-batch\032\004\010\002\020\000\n" + + ",\n\004POST\022\036/preview/mlflow/runs/log-batch\032" + + "\004\010\002\020\000\020\001*\tLog BatchB\036\n\024org.mlflow.api.pro" + + "to\220\001\001\342?\002\020\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -47317,7 +42550,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_mlflow_Metric_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_Metric_descriptor, - new java.lang.String[] { "Key", "Value", "Timestamp", }); + new java.lang.String[] { "Key", "Value", "Timestamp", "Step", }); internal_static_mlflow_Param_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_mlflow_Param_fieldAccessorTable = new @@ -47347,7 +42580,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_mlflow_RunInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_RunInfo_descriptor, - new java.lang.String[] { "RunUuid", "ExperimentId", "Name", "SourceType", "SourceName", "UserId", "Status", "StartTime", "EndTime", "SourceVersion", "EntryPointName", "ArtifactUri", "LifecycleStage", }); + new java.lang.String[] { "RunId", "RunUuid", "ExperimentId", "UserId", "Status", "StartTime", "EndTime", "ArtifactUri", "LifecycleStage", }); internal_static_mlflow_Experiment_descriptor = getDescriptor().getMessageTypes().get(6); internal_static_mlflow_Experiment_fieldAccessorTable = new @@ -47431,7 +42664,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_mlflow_CreateRun_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_CreateRun_descriptor, - new java.lang.String[] { "ExperimentId", "UserId", "RunName", "SourceType", "SourceName", "EntryPointName", "StartTime", "SourceVersion", "Tags", "ParentRunId", }); + new java.lang.String[] { "ExperimentId", "UserId", "StartTime", "Tags", }); internal_static_mlflow_CreateRun_Response_descriptor = internal_static_mlflow_CreateRun_descriptor.getNestedTypes().get(0); internal_static_mlflow_CreateRun_Response_fieldAccessorTable = new @@ -47443,7 +42676,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_mlflow_UpdateRun_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_UpdateRun_descriptor, - new java.lang.String[] { "RunUuid", "Status", "EndTime", }); + new java.lang.String[] { "RunId", "RunUuid", "Status", "EndTime", }); internal_static_mlflow_UpdateRun_Response_descriptor = internal_static_mlflow_UpdateRun_descriptor.getNestedTypes().get(0); internal_static_mlflow_UpdateRun_Response_fieldAccessorTable = new @@ -47479,7 +42712,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_mlflow_LogMetric_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_LogMetric_descriptor, - new java.lang.String[] { "RunUuid", "Key", "Value", "Timestamp", }); + new java.lang.String[] { "RunId", "RunUuid", "Key", "Value", "Timestamp", "Step", }); internal_static_mlflow_LogMetric_Response_descriptor = internal_static_mlflow_LogMetric_descriptor.getNestedTypes().get(0); internal_static_mlflow_LogMetric_Response_fieldAccessorTable = new @@ -47491,7 +42724,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_mlflow_LogParam_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_LogParam_descriptor, - new java.lang.String[] { "RunUuid", "Key", "Value", }); + new java.lang.String[] { "RunId", "RunUuid", "Key", "Value", }); internal_static_mlflow_LogParam_Response_descriptor = internal_static_mlflow_LogParam_descriptor.getNestedTypes().get(0); internal_static_mlflow_LogParam_Response_fieldAccessorTable = new @@ -47503,103 +42736,55 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_mlflow_SetTag_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_SetTag_descriptor, - new java.lang.String[] { "RunUuid", "Key", "Value", }); + new java.lang.String[] { "RunId", "RunUuid", "Key", "Value", }); internal_static_mlflow_SetTag_Response_descriptor = internal_static_mlflow_SetTag_descriptor.getNestedTypes().get(0); internal_static_mlflow_SetTag_Response_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_SetTag_Response_descriptor, new java.lang.String[] { }); - internal_static_mlflow_GetRun_descriptor = + internal_static_mlflow_DeleteTag_descriptor = getDescriptor().getMessageTypes().get(20); + internal_static_mlflow_DeleteTag_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_mlflow_DeleteTag_descriptor, + new java.lang.String[] { "RunId", "Key", }); + internal_static_mlflow_DeleteTag_Response_descriptor = + internal_static_mlflow_DeleteTag_descriptor.getNestedTypes().get(0); + internal_static_mlflow_DeleteTag_Response_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_mlflow_DeleteTag_Response_descriptor, + new java.lang.String[] { }); + internal_static_mlflow_GetRun_descriptor = + getDescriptor().getMessageTypes().get(21); internal_static_mlflow_GetRun_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_GetRun_descriptor, - new java.lang.String[] { "RunUuid", }); + new java.lang.String[] { "RunId", "RunUuid", }); internal_static_mlflow_GetRun_Response_descriptor = internal_static_mlflow_GetRun_descriptor.getNestedTypes().get(0); internal_static_mlflow_GetRun_Response_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_GetRun_Response_descriptor, new java.lang.String[] { "Run", }); - internal_static_mlflow_GetMetric_descriptor = - getDescriptor().getMessageTypes().get(21); - internal_static_mlflow_GetMetric_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_GetMetric_descriptor, - new java.lang.String[] { "RunUuid", "MetricKey", }); - internal_static_mlflow_GetMetric_Response_descriptor = - internal_static_mlflow_GetMetric_descriptor.getNestedTypes().get(0); - internal_static_mlflow_GetMetric_Response_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_GetMetric_Response_descriptor, - new java.lang.String[] { "Metric", }); - internal_static_mlflow_GetParam_descriptor = - getDescriptor().getMessageTypes().get(22); - internal_static_mlflow_GetParam_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_GetParam_descriptor, - new java.lang.String[] { "RunUuid", "ParamName", }); - internal_static_mlflow_GetParam_Response_descriptor = - internal_static_mlflow_GetParam_descriptor.getNestedTypes().get(0); - internal_static_mlflow_GetParam_Response_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_GetParam_Response_descriptor, - new java.lang.String[] { "Parameter", }); - internal_static_mlflow_SearchExpression_descriptor = - getDescriptor().getMessageTypes().get(23); - internal_static_mlflow_SearchExpression_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_SearchExpression_descriptor, - new java.lang.String[] { "Metric", "Parameter", "Expression", }); - internal_static_mlflow_MetricSearchExpression_descriptor = - getDescriptor().getMessageTypes().get(24); - internal_static_mlflow_MetricSearchExpression_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_MetricSearchExpression_descriptor, - new java.lang.String[] { "Key", "Float", "Double", "Clause", }); - internal_static_mlflow_ParameterSearchExpression_descriptor = - getDescriptor().getMessageTypes().get(25); - internal_static_mlflow_ParameterSearchExpression_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_ParameterSearchExpression_descriptor, - new java.lang.String[] { "Key", "String", "Clause", }); - internal_static_mlflow_StringClause_descriptor = - getDescriptor().getMessageTypes().get(26); - internal_static_mlflow_StringClause_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_StringClause_descriptor, - new java.lang.String[] { "Comparator", "Value", }); - internal_static_mlflow_FloatClause_descriptor = - getDescriptor().getMessageTypes().get(27); - internal_static_mlflow_FloatClause_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_FloatClause_descriptor, - new java.lang.String[] { "Comparator", "Value", }); - internal_static_mlflow_DoubleClause_descriptor = - getDescriptor().getMessageTypes().get(28); - internal_static_mlflow_DoubleClause_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_DoubleClause_descriptor, - new java.lang.String[] { "Comparator", "Value", }); internal_static_mlflow_SearchRuns_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(22); internal_static_mlflow_SearchRuns_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_SearchRuns_descriptor, - new java.lang.String[] { "ExperimentIds", "AndedExpressions", "RunViewType", }); + new java.lang.String[] { "ExperimentIds", "Filter", "RunViewType", "MaxResults", "OrderBy", "PageToken", }); internal_static_mlflow_SearchRuns_Response_descriptor = internal_static_mlflow_SearchRuns_descriptor.getNestedTypes().get(0); internal_static_mlflow_SearchRuns_Response_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_SearchRuns_Response_descriptor, - new java.lang.String[] { "Runs", }); + new java.lang.String[] { "Runs", "NextPageToken", }); internal_static_mlflow_ListArtifacts_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(23); internal_static_mlflow_ListArtifacts_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_ListArtifacts_descriptor, - new java.lang.String[] { "RunUuid", "Path", }); + new java.lang.String[] { "RunId", "RunUuid", "Path", }); internal_static_mlflow_ListArtifacts_Response_descriptor = internal_static_mlflow_ListArtifacts_descriptor.getNestedTypes().get(0); internal_static_mlflow_ListArtifacts_Response_fieldAccessorTable = new @@ -47607,41 +42792,41 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_mlflow_ListArtifacts_Response_descriptor, new java.lang.String[] { "RootUri", "Files", }); internal_static_mlflow_FileInfo_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(24); internal_static_mlflow_FileInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_FileInfo_descriptor, new java.lang.String[] { "Path", "IsDir", "FileSize", }); - internal_static_mlflow_GetArtifact_descriptor = - getDescriptor().getMessageTypes().get(32); - internal_static_mlflow_GetArtifact_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_GetArtifact_descriptor, - new java.lang.String[] { "RunUuid", "Path", }); - internal_static_mlflow_GetArtifact_Response_descriptor = - internal_static_mlflow_GetArtifact_descriptor.getNestedTypes().get(0); - internal_static_mlflow_GetArtifact_Response_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_mlflow_GetArtifact_Response_descriptor, - new java.lang.String[] { }); internal_static_mlflow_GetMetricHistory_descriptor = - getDescriptor().getMessageTypes().get(33); + getDescriptor().getMessageTypes().get(25); internal_static_mlflow_GetMetricHistory_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_GetMetricHistory_descriptor, - new java.lang.String[] { "RunUuid", "MetricKey", }); + new java.lang.String[] { "RunId", "RunUuid", "MetricKey", }); internal_static_mlflow_GetMetricHistory_Response_descriptor = internal_static_mlflow_GetMetricHistory_descriptor.getNestedTypes().get(0); internal_static_mlflow_GetMetricHistory_Response_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_mlflow_GetMetricHistory_Response_descriptor, new java.lang.String[] { "Metrics", }); + internal_static_mlflow_LogBatch_descriptor = + getDescriptor().getMessageTypes().get(26); + internal_static_mlflow_LogBatch_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_mlflow_LogBatch_descriptor, + new java.lang.String[] { "RunId", "Metrics", "Params", "Tags", }); + internal_static_mlflow_LogBatch_Response_descriptor = + internal_static_mlflow_LogBatch_descriptor.getNestedTypes().get(0); + internal_static_mlflow_LogBatch_Response_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_mlflow_LogBatch_Response_descriptor, + new java.lang.String[] { }); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.databricks.api.proto.databricks.Databricks.rpc); + registry.add(com.databricks.api.proto.databricks.Databricks.validateRequired); registry.add(org.mlflow.scalapb_interface.Scalapb.message); registry.add(org.mlflow.scalapb_interface.Scalapb.options); - registry.add(com.databricks.api.proto.databricks.Databricks.validateRequired); com.google.protobuf.Descriptors.FileDescriptor .internalUpdateFileDescriptor(descriptor, registry); org.mlflow.scalapb_interface.Scalapb.getDescriptor(); diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/ActiveRun.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/ActiveRun.java new file mode 100644 index 0000000000000..dfc296ab71e1a --- /dev/null +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/ActiveRun.java @@ -0,0 +1,202 @@ +package org.mlflow.tracking; + +import org.mlflow.api.proto.Service.*; + +import java.nio.file.Path; +import java.util.*; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +/** + * Represents an active MLflow run and contains APIs to log data to the run. + */ +public class ActiveRun { + private MlflowClient client; + private RunInfo runInfo; + + ActiveRun(RunInfo runInfo, MlflowClient client) { + this.runInfo = runInfo; + this.client = client; + } + + /** + * Gets the run id of this run. + * @return The run id of this run. + */ + public String getId() { + return runInfo.getRunId(); + } + + /** + * Log a parameter under this run. + * + * @param key The name of the parameter. + * @param value The value of the parameter. + */ + public void logParam(String key, String value) { + client.logParam(getId(), key, value); + } + + /** + * Sets a tag under this run. + * + * @param key The name of the tag. + * @param value The value of the tag. + */ + public void setTag(String key, String value) { + client.setTag(getId(), key, value); + } + + /** + * Like {@link #logMetric(String, double, int)} with a default step of 0. + */ + public void logMetric(String key, double value) { + logMetric(key, value, 0); + } + + /** + * Logs a metric under this run. + * + * @param key The name of the metric. + * @param value The value of the metric. + * @param step The metric step. + */ + public void logMetric(String key, double value, int step) { + client.logMetric(getId(), key, value, System.currentTimeMillis(), step); + } + + /** + * Like {@link #logMetrics(Map, int)} with a default step of 0. + */ + public void logMetrics(Map metrics) { + logMetrics(metrics, 0); + } + + /** + * Log multiple metrics for this run. + * + * @param metrics A map of metric name to value. + * @param step The metric step. + */ + public void logMetrics(Map metrics, int step) { + List protoMetrics = metrics.entrySet().stream() + .map((metric) -> + Metric.newBuilder() + .setKey(metric.getKey()) + .setValue(metric.getValue()) + .setTimestamp(System.currentTimeMillis()) + .setStep(step) + .build() + ).collect(Collectors.toList()); + client.logBatch(getId(), protoMetrics, Collections.emptyList(), Collections.emptyList()); + } + + /** + * Log multiple params for this run. + * + * @param params A map of param name to value. + */ + public void logParams(Map params) { + List protoParams = params.entrySet().stream().map((param) -> + Param.newBuilder() + .setKey(param.getKey()) + .setValue(param.getValue()) + .build() + ).collect(Collectors.toList()); + client.logBatch(getId(), Collections.emptyList(), protoParams, Collections.emptyList()); + } + + /** + * Sets multiple tags for this run. + * + * @param tags A map of tag name to value. + */ + public void setTags(Map tags) { + List protoTags = tags.entrySet().stream().map((tag) -> + RunTag.newBuilder().setKey(tag.getKey()).setValue(tag.getValue()).build() + ).collect(Collectors.toList()); + client.logBatch(getId(), Collections.emptyList(), Collections.emptyList(), protoTags); + } + + /** + * Like {@link #logArtifact(Path, String)} with the artifactPath set to the root of the + * artifact directory. + * + * @param localPath Path of file to upload. Must exist, and must be a simple file + * (not a directory). + */ + public void logArtifact(Path localPath) { + client.logArtifact(getId(), localPath.toFile()); + } + + /** + * Uploads the given local file to the run's root artifact directory. For example, + * + *
+   *   activeRun.logArtifact("/my/localModel", "model")
+   *   mlflowClient.listArtifacts(activeRun.getId(), "model") // returns "model/localModel"
+   *   
+ * + * @param localPath Path of file to upload. Must exist, and must be a simple file + * (not a directory). + * @param artifactPath Artifact path relative to the run's root directory given by + * {@link #getArtifactUri()}. Should NOT start with a /. + */ + public void logArtifact(Path localPath, String artifactPath) { + client.logArtifact(getId(), localPath.toFile(), artifactPath); + } + + /** + * Like {@link #logArtifacts(Path, String)} with the artifactPath set to the root of the + * artifact directory. + * + * @param localPath Directory to upload. Must exist, and must be a directory (not a simple file). + */ + public void logArtifacts(Path localPath) { + client.logArtifacts(getId(), localPath.toFile()); + } + + /** + * Uploads all files within the given local director an artifactPath within the run's root + * artifact directory. For example, if /my/local/dir/ contains two files "file1" and "file2", then + * + *
+   *   activeRun.logArtifacts("/my/local/dir", "model")
+   *   mlflowClient.listArtifacts(activeRun.getId(), "model") // returns "model/file1" and
+   *                                                          // "model/file2"
+   *   
+ * + * (i.e., the contents of the local directory are now available in model/). + * + * @param localPath Directory to upload. Must exist, and must be a directory (not a simple file). + * @param artifactPath Artifact path relative to the run's root directory given by + * {@link #getArtifactUri()}. Should NOT start with a /. + */ + public void logArtifacts(Path localPath, String artifactPath) { + client.logArtifacts(getId(), localPath.toFile(), artifactPath); + } + + /** + * Get the absolute URI of the run artifact directory root. + * @return The absolute URI of the run artifact directory root. + */ + public String getArtifactUri() { + return this.runInfo.getArtifactUri(); + } + + /** + * Ends the active MLflow run. + */ + public void endRun() { + endRun(RunStatus.FINISHED); + } + + /** + * Ends the active MLflow run. + * + * @param status The status of the run. + */ + public void endRun(RunStatus status) { + client.setTerminated(getId(), status); + } +} diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/EmptyPage.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/EmptyPage.java new file mode 100644 index 0000000000000..815c8f7235de0 --- /dev/null +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/EmptyPage.java @@ -0,0 +1,48 @@ +package org.mlflow.tracking; + +import java.util.Collections; +import java.util.Optional; + +public class EmptyPage implements Page { + + /** + * Creates an empty page + */ + EmptyPage() {} + + /** + * @return Zero + */ + public int getPageSize() { + return 0; + } + + /** + * @return False + */ + public boolean hasNextPage() { + return false; + } + + /** + * @return An empty Optional. + */ + public Optional getNextPageToken() { + return Optional.empty(); + } + + /** + * @return An {@link org.mlflow.tracking.EmptyPage} + */ + public EmptyPage getNextPage() { + return this; + } + + /** + * @return An empty iterable. + */ + public Iterable getItems() { + return Collections.EMPTY_LIST; + } + +} diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowClient.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowClient.java index eef1a495fb772..0278272e60331 100644 --- a/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowClient.java +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowClient.java @@ -10,7 +10,9 @@ import java.io.File; import java.net.URI; import java.net.URISyntaxException; +import java.util.ArrayList; import java.util.List; +import java.lang.Iterable; import java.util.Optional; import java.util.stream.Collectors; @@ -18,65 +20,72 @@ * Client to an MLflow Tracking Sever. */ public class MlflowClient { - private static final long DEFAULT_EXPERIMENT_ID = 0; + protected static final String DEFAULT_EXPERIMENT_ID = "0"; private final MlflowProtobufMapper mapper = new MlflowProtobufMapper(); private final ArtifactRepositoryFactory artifactRepositoryFactory; private final MlflowHttpCaller httpCaller; private final MlflowHostCredsProvider hostCredsProvider; - /** Returns a default client based on the MLFLOW_TRACKING_URI environment variable. */ + /** Return a default client based on the MLFLOW_TRACKING_URI environment variable. */ public MlflowClient() { this(getDefaultTrackingUri()); } - /** Instantiates a new client using the provided tracking uri. */ + /** Instantiate a new client using the provided tracking uri. */ public MlflowClient(String trackingUri) { this(getHostCredsProviderFromTrackingUri(trackingUri)); } /** - * Creates a new MlflowClient; users should prefer constructing ApiClients via + * Create a new MlflowClient; users should prefer constructing ApiClients via * {@link #MlflowClient()} or {@link #MlflowClient(String)} if possible. */ public MlflowClient(MlflowHostCredsProvider hostCredsProvider) { this.hostCredsProvider = hostCredsProvider; this.httpCaller = new MlflowHttpCaller(hostCredsProvider); - this. artifactRepositoryFactory = new ArtifactRepositoryFactory(hostCredsProvider); + this.artifactRepositoryFactory = new ArtifactRepositoryFactory(hostCredsProvider); } - /** @return run associated with the id. */ - public Run getRun(String runUuid) { - URIBuilder builder = newURIBuilder("runs/get").setParameter("run_uuid", runUuid); + /** + * Get metadata, params, tags, and metrics for a run. A single value is returned for each metric + * key: the most recently logged metric value at the largest step. + * + * @return Run associated with the ID. + */ + public Run getRun(String runId) { + URIBuilder builder = newURIBuilder("runs/get") + .setParameter("run_uuid", runId) + .setParameter("run_id", runId); return mapper.toGetRunResponse(httpCaller.get(builder.toString())).getRun(); } - /** - * Creates a new run under the default experiment with no application name. - * @return RunInfo created by the server - */ - public RunInfo createRun() { - return createRun(DEFAULT_EXPERIMENT_ID); + public List getMetricHistory(String runId, String key) { + URIBuilder builder = newURIBuilder("metrics/get-history") + .setParameter("run_uuid", runId) + .setParameter("run_id", runId) + .setParameter("metric_key", key); + return mapper.toGetMetricHistoryResponse(httpCaller.get(builder.toString())).getMetricsList(); } /** - * Creates a new run under the given experiment with no application name. - * @return RunInfo created by the server + * Create a new run under the default experiment with no application name. + * @return RunInfo created by the server. */ - public RunInfo createRun(long experimentId) { - return createRun(experimentId, "Java Application"); + public RunInfo createRun() { + return createRun(DEFAULT_EXPERIMENT_ID); } /** - * Creates a new run under the given experiment with the given application name. - * @return RunInfo created by the server + * Create a new run under the given experiment. + * @return RunInfo created by the server. */ - public RunInfo createRun(long experimentId, String appName) { + public RunInfo createRun(String experimentId) { CreateRun.Builder request = CreateRun.newBuilder(); request.setExperimentId(experimentId); - request.setSourceName(appName); - request.setSourceType(SourceType.LOCAL); request.setStartTime(System.currentTimeMillis()); + // userId is deprecated and will be removed in a future release. + // It should be set as the `mlflow.user` tag instead. String username = System.getProperty("user.name"); if (username != null) { request.setUserId(System.getProperty("user.name")); @@ -85,8 +94,18 @@ public RunInfo createRun(long experimentId, String appName) { } /** - * Creates a new run. - * @return RunInfo created by the server + * Create a new run. This method allows providing all possible fields of CreateRun, and can be + * invoked as follows: + * + *
+   *   import org.mlflow.api.proto.Service.CreateRun;
+   *   CreateRun.Builder request = CreateRun.newBuilder();
+   *   request.setExperimentId(experimentId);
+   *   request.setSourceVersion("my-version");
+   *   createRun(request.build());
+   *   
+ * + * @return RunInfo created by the server. */ public RunInfo createRun(CreateRun request) { String ijson = mapper.toJson(request); @@ -94,65 +113,195 @@ public RunInfo createRun(CreateRun request) { return mapper.toCreateRunResponse(ojson).getRun().getInfo(); } - /** @return a list of all RunInfos associated with the given experiment. */ - public List listRunInfos(long experimentId) { - SearchRuns request = SearchRuns.newBuilder().addExperimentIds(experimentId).build(); + /** + * @return A list of all RunInfos associated with the given experiment. + */ + public List listRunInfos(String experimentId) { + List experimentIds = new ArrayList<>(); + experimentIds.add(experimentId); + return searchRuns(experimentIds, null); + } + + /** + * Return RunInfos from provided list of experiments that satisfy the search query. + * @deprecated As of 1.1.0 - please use {@link #searchRuns(List, String, ViewType, int)} or + * similar that returns a page of Run results. + * + * @param experimentIds List of experiment IDs. + * @param searchFilter SQL compatible search query string. Format of this query string is + * similar to that specified on MLflow UI. + * Example : "params.model = 'LogisticRegression' and metrics.acc = 0.9" + * If null, the result will be equivalent to having an empty search filter. + * + * @return A list of all RunInfos that satisfy search filter. + */ + public List searchRuns(List experimentIds, String searchFilter) { + return searchRuns(experimentIds, searchFilter, ViewType.ACTIVE_ONLY, 1000).getItems().stream() + .map(Run::getInfo).collect(Collectors.toList()); + } + + /** + * Return RunInfos from provided list of experiments that satisfy the search query. + * @deprecated As of 1.1.0 - please use {@link #searchRuns(List, String, ViewType, int)} or + * similar that returns a page of Run results. + * + * @param experimentIds List of experiment IDs. + * @param searchFilter SQL compatible search query string. Format of this query string is + * similar to that specified on MLflow UI. + * Example : "params.model = 'LogisticRegression' and metrics.acc != 0.9" + * If null, the result will be equivalent to having an empty search filter. + * @param runViewType ViewType for expected runs. One of (ACTIVE_ONLY, DELETED_ONLY, ALL) + * If null, only runs with viewtype ACTIVE_ONLY will be searched. + * + * @return A list of all RunInfos that satisfy search filter. + */ + public List searchRuns(List experimentIds, + String searchFilter, + ViewType runViewType) { + return searchRuns(experimentIds, searchFilter, runViewType, 1000).getItems().stream() + .map(Run::getInfo).collect(Collectors.toList()); + } + + /** + * Return runs from provided list of experiments that satisfy the search query. + * + * @param experimentIds List of experiment IDs. + * @param searchFilter SQL compatible search query string. Format of this query string is + * similar to that specified on MLflow UI. + * Example : "params.model = 'LogisticRegression' and metrics.acc != 0.9" + * If null, the result will be equivalent to having an empty search filter. + * @param runViewType ViewType for expected runs. One of (ACTIVE_ONLY, DELETED_ONLY, ALL) + * If null, only runs with viewtype ACTIVE_ONLY will be searched. + * @param maxResults Maximum number of runs desired in one page. + * + * @return A list of all Runs that satisfy search filter. + */ + public RunsPage searchRuns(List experimentIds, + String searchFilter, + ViewType runViewType, + int maxResults) { + return searchRuns(experimentIds, searchFilter, runViewType, maxResults, new ArrayList<>(), + null); + } + + /** + * Return runs from provided list of experiments that satisfy the search query. + * + * @param experimentIds List of experiment IDs. + * @param searchFilter SQL compatible search query string. Format of this query string is + * similar to that specified on MLflow UI. + * Example : "params.model = 'LogisticRegression' and metrics.acc != 0.9" + * If null, the result will be equivalent to having an empty search filter. + * @param runViewType ViewType for expected runs. One of (ACTIVE_ONLY, DELETED_ONLY, ALL) + * If null, only runs with viewtype ACTIVE_ONLY will be searched. + * @param maxResults Maximum number of runs desired in one page. + * @param orderBy List of properties to order by. Example: "metrics.acc DESC". + * + * @return A list of all Runs that satisfy search filter. + */ + public RunsPage searchRuns(List experimentIds, + String searchFilter, + ViewType runViewType, + int maxResults, + List orderBy) { + return searchRuns(experimentIds, searchFilter, runViewType, maxResults, orderBy, null); + } + + /** + * Return runs from provided list of experiments that satisfy the search query. + * + * @param experimentIds List of experiment IDs. + * @param searchFilter SQL compatible search query string. Format of this query string is + * similar to that specified on MLflow UI. + * Example : "params.model = 'LogisticRegression' and metrics.acc != 0.9" + * If null, the result will be equivalent to having an empty search filter. + * @param runViewType ViewType for expected runs. One of (ACTIVE_ONLY, DELETED_ONLY, ALL) + * If null, only runs with viewtype ACTIVE_ONLY will be searched. + * @param maxResults Maximum number of runs desired in one page. + * @param orderBy List of properties to order by. Example: "metrics.acc DESC". + * @param pageToken String token specifying the next page of results. It should be obtained from + * a call to {@link #searchRuns(List, String)}. + * + * @return A page of Runs that satisfy the search filter. + */ + public RunsPage searchRuns(List experimentIds, + String searchFilter, + ViewType runViewType, + int maxResults, + List orderBy, + String pageToken) { + SearchRuns.Builder builder = SearchRuns.newBuilder() + .addAllExperimentIds(experimentIds) + .addAllOrderBy(orderBy) + .setMaxResults(maxResults); + + if (searchFilter != null) { + builder.setFilter(searchFilter); + } + if (runViewType != null) { + builder.setRunViewType(runViewType); + } + if (pageToken != null) { + builder.setPageToken(pageToken); + } + SearchRuns request = builder.build(); String ijson = mapper.toJson(request); String ojson = sendPost("runs/search", ijson); - return mapper.toSearchRunsResponse(ojson).getRunsList().stream().map(Run::getInfo) - .collect(Collectors.toList()); + SearchRuns.Response response = mapper.toSearchRunsResponse(ojson); + return new RunsPage(response.getRunsList(), response.getNextPageToken(), experimentIds, + searchFilter, runViewType, maxResults, orderBy, this); } - /** @return a list of all Experiments. */ + /** @return A list of all experiments. */ public List listExperiments() { return mapper.toListExperimentsResponse(httpCaller.get("experiments/list")) .getExperimentsList(); } - /** @return an experiment with the given id. */ - public GetExperiment.Response getExperiment(long experimentId) { + /** @return An experiment with the given ID. */ + public GetExperiment.Response getExperiment(String experimentId) { URIBuilder builder = newURIBuilder("experiments/get") - .setParameter("experiment_id", "" + experimentId); + .setParameter("experiment_id", experimentId); return mapper.toGetExperimentResponse(httpCaller.get(builder.toString())); } - /** @return the experiment associated with the given name or Optional.empty if none exists. */ + /** @return The experiment associated with the given name or Optional.empty if none exists. */ public Optional getExperimentByName(String experimentName) { return listExperiments().stream().filter(e -> e.getName() .equals(experimentName)).findFirst(); } /** - * Creates a new experiment using the default artifact location provided by the server. + * Create a new experiment using the default artifact location provided by the server. * @param experimentName Name of the experiment. This must be unique across all experiments. - * @return experiment id of the newly created experiment. + * @return Experiment ID of the newly created experiment. */ - public long createExperiment(String experimentName) { + public String createExperiment(String experimentName) { String ijson = mapper.makeCreateExperimentRequest(experimentName); String ojson = httpCaller.post("experiments/create", ijson); return mapper.toCreateExperimentResponse(ojson).getExperimentId(); } - /** Mark an experiment and associated runs, params, metrics, etc for deletion. */ - public void deleteExperiment(long experimentId) { + /** Mark an experiment and associated runs, params, metrics, etc. for deletion. */ + public void deleteExperiment(String experimentId) { String ijson = mapper.makeDeleteExperimentRequest(experimentId); httpCaller.post("experiments/delete", ijson); } /** Restore an experiment marked for deletion. */ - public void restoreExperiment(long experimentId) { + public void restoreExperiment(String experimentId) { String ijson = mapper.makeRestoreExperimentRequest(experimentId); httpCaller.post("experiments/restore", ijson); } /** Update an experiment's name. The new name must be unique. */ - public void renameExperiment(long experimentId, String newName) { + public void renameExperiment(String experimentId, String newName) { String ijson = mapper.makeUpdateExperimentRequest(experimentId, newName); httpCaller.post("experiments/update", ijson); } /** - * Deletes a run with the given ID. + * Delete a run with the given ID. */ public void deleteRun(String runId) { String ijson = mapper.makeDeleteRun(runId); @@ -160,7 +309,7 @@ public void deleteRun(String runId) { } /** - * Restores a deleted run with the given ID. + * Restore a deleted run with the given ID. */ public void restoreRun(String runId) { String ijson = mapper.makeRestoreRun(runId); @@ -168,57 +317,106 @@ public void restoreRun(String runId) { } /** - * Logs a parameter against the given run, as a key-value pair. + * Log a parameter against the given run, as a key-value pair. * This cannot be called against the same parameter key more than once. */ - public void logParam(String runUuid, String key, String value) { - sendPost("runs/log-parameter", mapper.makeLogParam(runUuid, key, value)); + public void logParam(String runId, String key, String value) { + sendPost("runs/log-parameter", mapper.makeLogParam(runId, key, value)); } /** - * Logs a new metric against the given run, as a key-value pair. - * New values for the same metric may be recorded over time, and are marked with a timestamp. - * */ - public void logMetric(String runUuid, String key, double value) { - sendPost("runs/log-metric", mapper.makeLogMetric(runUuid, key, value, - System.currentTimeMillis())); + * Log a new metric against the given run, as a key-value pair. Metrics are recorded + * against two axes: timestamp and step. This method uses the number of milliseconds + * since the Unix epoch for the timestamp, and it uses the default step of zero. + * + * @param runId The ID of the run in which to record the metric. + * @param key The key identifying the metric for which to record the specified value. + * @param value The value of the metric. + */ + public void logMetric(String runId, String key, double value) { + logMetric(runId, key, value, System.currentTimeMillis(), 0); } /** - * Logs a new tag against the given run, as a key-value pair. + * Log a new metric against the given run, as a key-value pair. Metrics are recorded + * against two axes: timestamp and step. + * + * @param runId The ID of the run in which to record the metric. + * @param key The key identifying the metric for which to record the specified value. + * @param value The value of the metric. + * @param timestamp The timestamp at which to record the metric value. + * @param step The step at which to record the metric value. */ - public void setTag(String runUuid, String key, String value) { - sendPost("runs/set-tag", mapper.makeSetTag(runUuid, key, value)); + public void logMetric(String runId, String key, double value, long timestamp, long step) { + sendPost("runs/log-metric", mapper.makeLogMetric(runId, key, value, timestamp, step)); } - /** Sets the status of a run to be FINISHED at the current time. */ - public void setTerminated(String runUuid) { - setTerminated(runUuid, RunStatus.FINISHED); + /** + * Log a new tag against the given run, as a key-value pair. + * @param runId The ID of the run on which to set the tag + * @param key The key used to identify the tag. + * @param value The value of the tag. + */ + public void setTag(String runId, String key, String value) { + sendPost("runs/set-tag", mapper.makeSetTag(runId, key, value)); + } + + /** + * Delete a tag on the run ID with a specific key. This is irreversible. + * @param runId String ID of the run + * @param key Name of the tag + */ + public void deleteTag(String runId, String key) { + sendPost("runs/delete-tag", mapper.makeDeleteTag(runId, key)); + } + + /** + * Log multiple metrics, params, and/or tags against a given run (argument runId). + * Argument metrics, params, and tag iterables can be nulls. + */ + public void logBatch(String runId, + Iterable metrics, + Iterable params, + Iterable tags) { + sendPost("runs/log-batch", mapper.makeLogBatch(runId, metrics, params, tags)); + } + + /** Set the status of a run to be FINISHED at the current time. */ + public void setTerminated(String runId) { + setTerminated(runId, RunStatus.FINISHED); } - /** Sets the status of a run to be completed at the current time. */ - public void setTerminated(String runUuid, RunStatus status) { - setTerminated(runUuid, status, System.currentTimeMillis()); + /** Set the status of a run to be completed at the current time. */ + public void setTerminated(String runId, RunStatus status) { + setTerminated(runId, status, System.currentTimeMillis()); } - /** Sets the status of a run to be completed at the given endTime. */ - public void setTerminated(String runUuid, RunStatus status, long endTime) { - sendPost("runs/update", mapper.makeUpdateRun(runUuid, status, endTime)); + /** Set the status of a run to be completed at the given endTime. */ + public void setTerminated(String runId, RunStatus status, long endTime) { + sendPost("runs/update", mapper.makeUpdateRun(runId, status, endTime)); } /** + * :: Experimental :: + * + * This API may change or be removed in a future release without warning. + * * Send a GET to the following path, including query parameters. * This is mostly an internal API, but allows making lower-level or unsupported requests. - * @return JSON response from the server + * @return JSON response from the server. */ public String sendGet(String path) { return httpCaller.get(path); } /** + * :: Experimental :: + * + * This API may change or be removed in a future release without warning. + * * Send a POST to the following path, with a String-encoded JSON body. * This is mostly an internal API, but allows making lower-level or unsupported requests. - * @return JSON response from the server + * @return JSON response from the server. */ public String sendPost(String path, String json) { return httpCaller.post(path, json); @@ -240,7 +438,7 @@ private URIBuilder newURIBuilder(String base) { } /** - * Returns the tracking URI from MLFLOW_TRACKING_URI or throws if not available. + * Return the tracking URI from MLFLOW_TRACKING_URI or throws if not available. * This is used as the body of the no-argument constructor, as constructors must first call * this(). */ @@ -254,7 +452,7 @@ private static String getDefaultTrackingUri() { } /** - * Returns the MlflowHostCredsProvider associated with the given tracking URI. + * Return the MlflowHostCredsProvider associated with the given tracking URI. * This is used as the body of the String-argument constructor, as constructors must first call * this(). */ @@ -285,7 +483,7 @@ private static MlflowHostCredsProvider getHostCredsProviderFromTrackingUri(Strin } /** - * Uploads the given local file to the run's root artifact directory. For example, + * Upload the given local file to the run's root artifact directory. For example, * *
    *   logArtifact(runId, "/my/localModel")
@@ -300,7 +498,7 @@ public void logArtifact(String runId, File localFile) {
   }
 
   /**
-   * Uploads the given local file to an artifactPath within the run's root directory. For example,
+   * Upload the given local file to an artifactPath within the run's root directory. For example,
    *
    *   
    *   logArtifact(runId, "/my/localModel", "model")
@@ -319,7 +517,7 @@ public void logArtifact(String runId, File localFile, String artifactPath) {
   }
 
   /**
-   * Uploads all files within the given local directory the run's root artifact directory.
+   * Upload all files within the given local directory the run's root artifact directory.
    * For example, if /my/local/dir/ contains two files "file1" and "file2", then
    *
    *   
@@ -334,9 +532,8 @@ public void logArtifacts(String runId, File localDir) {
     getArtifactRepository(runId).logArtifacts(localDir);
   }
 
-
   /**
-   * Uploads all files within the given local director an artifactPath within the run's root
+   * Upload all files within the given local director an artifactPath within the run's root
    * artifact directory. For example, if /my/local/dir/ contains two files "file1" and "file2", then
    *
    *   
@@ -356,7 +553,7 @@ public void logArtifacts(String runId, File localDir, String artifactPath) {
   }
 
   /**
-   * Lists the artifacts immediately under the run's root artifact directory. This does not
+   * List the artifacts immediately under the run's root artifact directory. This does not
    * recursively list; instead, it will return FileInfos with isDir=true where further
    * listing may be done.
    * @param runId Run ID of an existing MLflow run.
@@ -366,7 +563,7 @@ public List listArtifacts(String runId) {
   }
 
   /**
-   * Lists the artifacts immediately under the given artifactPath within the run's root artifact
+   * List the artifacts immediately under the given artifactPath within the run's root artifact
    * directory. This does not recursively list; instead, it will return FileInfos with isDir=true
    * where further listing may be done.
    * @param runId Run ID of an existing MLflow run.
@@ -378,7 +575,7 @@ public List listArtifacts(String runId, String artifactPath) {
   }
 
   /**
-   * Returns a local directory containing *all* artifacts within the run's artifact directory.
+   * Return a local directory containing *all* artifacts within the run's artifact directory.
    * Note that this will download the entire directory path, and so may be expensive if
    * the directory has a lot of data.
    * @param runId Run ID of an existing MLflow run.
@@ -388,7 +585,7 @@ public File downloadArtifacts(String runId) {
   }
 
   /**
-   * Returns a local file or directory containing all artifacts within the given artifactPath
+   * Return a local file or directory containing all artifacts within the given artifactPath
    * within the run's root artifactDirectory. For example, if "model/file1" and "model/file2"
    * exist within the artifact directory, then
    *
diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowClientVersion.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowClientVersion.java
new file mode 100644
index 0000000000000..f37fe68da450b
--- /dev/null
+++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowClientVersion.java
@@ -0,0 +1,34 @@
+package org.mlflow.tracking;
+
+import java.io.InputStream;
+import java.util.Properties;
+
+import com.google.common.base.Supplier;
+import com.google.common.base.Suppliers;
+
+/** Returns the version of the MLflow project this client was compiled against. */
+public class MlflowClientVersion {
+  // To avoid extra disk IO during class loading (static initialization), we lazily read the
+  // pom.properties file on first access and then cache the result to avoid future IO.
+  private static Supplier clientVersionSupplier = Suppliers.memoize(() -> {
+    try {
+      Properties p = new Properties();
+      InputStream is = MlflowClientVersion.class.getResourceAsStream(
+        "/META-INF/maven/org.mlflow/mlflow-client/pom.properties");
+      if (is == null) {
+        return "";
+      }
+      p.load(is);
+      return p.getProperty("version", "");
+    } catch (Exception e) {
+      return "";
+    }
+  });
+
+  private MlflowClientVersion() {}
+
+  /** @return MLflow client version (e.g., 0.9.1) or an empty string if detection fails. */
+  public static String getClientVersion() {
+    return clientVersionSupplier.get();
+  }
+}
diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowContext.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowContext.java
new file mode 100644
index 0000000000000..99aef3320ffd9
--- /dev/null
+++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowContext.java
@@ -0,0 +1,217 @@
+package org.mlflow.tracking;
+
+import org.mlflow.api.proto.Service.*;
+import org.mlflow.tracking.utils.DatabricksContext;
+import org.mlflow.tracking.utils.MlflowTagConstants;
+
+import java.util.*;
+import java.util.function.Consumer;
+
+/**
+ * Main entrypoint used to start MLflow runs to log to. This is a higher level interface than
+ * {@code MlflowClient} and provides convenience methods to keep track of active runs and to set
+ * default tags on runs which are created through {@code MlflowContext}
+ *
+ * On construction, MlflowContext will choose a default experiment ID to log to depending on your
+ * environment. To log to a different experiment, use {@link #setExperimentId(String)} or
+ * {@link #setExperimentName(String)}
+ *
+ * 

+ * For example: + *

+ *   // Uses the URI set in the MLFLOW_TRACKING_URI environment variable.
+ *   // To use your own tracking uri set it in the call to "new MlflowContext("tracking-uri")"
+ *   MlflowContext mlflow = new MlflowContext();
+ *   ActiveRun run = mlflow.startRun("run-name");
+ *   run.logParam("alpha", "0.5");
+ *   run.logMetric("MSE", 0.0);
+ *   run.endRun();
+ *   
+ */ +public class MlflowContext { + private MlflowClient client; + private String experimentId; + + /** + * Constructs a {@code MlflowContext} with a MlflowClient based on the MLFLOW_TRACKING_URI + * environment variable. + */ + public MlflowContext() { + this(new MlflowClient()); + } + + /** + * Constructs a {@code MlflowContext} which points to the specified trackingUri. + * + * @param trackingUri The URI to log to. + */ + public MlflowContext(String trackingUri) { + this(new MlflowClient(trackingUri)); + } + + /** + * Constructs a {@code MlflowContext} which points to the specified trackingUri. + * + * @param client The client used to log runs. + */ + public MlflowContext(MlflowClient client) { + this.client = client; + this.experimentId = getDefaultExperimentId(); + } + + /** + * Returns the client used to log runs. + * + * @return the client used to log runs. + */ + public MlflowClient getClient() { + return this.client; + } + + /** + * Sets the experiment to log runs to by name. + * @param experimentName the name of the experiment to log runs to. + * @throws IllegalArgumentException if the experiment name does not match an existing experiment + */ + public MlflowContext setExperimentName(String experimentName) throws IllegalArgumentException { + Optional experimentOpt = client.getExperimentByName(experimentName); + if (!experimentOpt.isPresent()) { + throw new IllegalArgumentException( + String.format("%s is not a valid experiment", experimentName)); + } + experimentId = experimentOpt.get().getExperimentId(); + return this; + } + + /** + * Sets the experiment to log runs to by ID. + * @param experimentId the id of the experiment to log runs to. + */ + public MlflowContext setExperimentId(String experimentId) { + this.experimentId = experimentId; + return this; + } + + /** + * Returns the experiment ID we are logging to. + * + * @return the experiment ID we are logging to. + */ + public String getExperimentId() { + return this.experimentId; + } + + /** + * Starts a MLflow run without a name. To log data to newly created MLflow run see the methods on + * {@link ActiveRun}. MLflow runs should be ended using {@link ActiveRun#endRun()} + * + * @return An {@code ActiveRun} object to log data to. + */ + public ActiveRun startRun() { + return startRun(null); + } + + /** + * Starts a MLflow run. To log data to newly created MLflow run see the methods on + * {@link ActiveRun}. MLflow runs should be ended using {@link ActiveRun#endRun()} + * + * @param runName The name of this run. For display purposes only and is stored in the + * mlflow.runName tag. + * @return An {@code ActiveRun} object to log data to. + */ + public ActiveRun startRun(String runName) { + return startRun(runName, null); + } + + /** + * Like {@link #startRun(String)} but sets the {@code mlflow.parentRunId} tag in order to create + * nested runs. + * + * @param runName The name of this run. For display purposes only and is stored in the + * mlflow.runName tag. + * @param parentRunId The ID of this run's parent + * @return An {@code ActiveRun} object to log data to. + */ + public ActiveRun startRun(String runName, String parentRunId) { + Map tags = new HashMap<>(); + if (runName != null) { + tags.put(MlflowTagConstants.RUN_NAME, runName); + } + tags.put(MlflowTagConstants.USER, System.getProperty("user.name")); + tags.put(MlflowTagConstants.SOURCE_TYPE, "LOCAL"); + if (parentRunId != null) { + tags.put(MlflowTagConstants.PARENT_RUN_ID, parentRunId); + } + + // Add tags from DatabricksContext if they exist + DatabricksContext databricksContext = DatabricksContext.createIfAvailable(); + if (databricksContext != null) { + tags.putAll(databricksContext.getTags()); + } + + CreateRun.Builder createRunBuilder = CreateRun.newBuilder() + .setExperimentId(experimentId) + .setStartTime(System.currentTimeMillis()); + for (Map.Entry tag: tags.entrySet()) { + createRunBuilder.addTags( + RunTag.newBuilder().setKey(tag.getKey()).setValue(tag.getValue()).build()); + } + RunInfo runInfo = client.createRun(createRunBuilder.build()); + + ActiveRun newRun = new ActiveRun(runInfo, client); + return newRun; + } + + /** + * Like {@link #startRun(String)} but will terminate the run after the activeRunFunction is + * executed. + * + * For example + *
+   *   mlflowContext.withActiveRun((activeRun -> {
+   *     activeRun.logParam("layers", "4");
+   *   }));
+   *   
+ * + * @param activeRunFunction A function which takes an {@code ActiveRun} and logs data to it. + */ + public void withActiveRun(Consumer activeRunFunction) { + ActiveRun newRun = startRun(); + try { + activeRunFunction.accept(newRun); + } catch(Exception e) { + newRun.endRun(RunStatus.FAILED); + return; + } + newRun.endRun(RunStatus.FINISHED); + } + + /** + * Like {@link #withActiveRun(Consumer)} with an explicity run name. + * + * @param runName The name of this run. For display purposes only and is stored in the + * mlflow.runName tag. + * @param activeRunFunction A function which takes an {@code ActiveRun} and logs data to it. + */ + public void withActiveRun(String runName, Consumer activeRunFunction) { + ActiveRun newRun = startRun(runName); + try { + activeRunFunction.accept(newRun); + } catch(Exception e) { + newRun.endRun(RunStatus.FAILED); + return; + } + newRun.endRun(RunStatus.FINISHED); + } + + private static String getDefaultExperimentId() { + DatabricksContext databricksContext = DatabricksContext.createIfAvailable(); + if (databricksContext != null && databricksContext.isInDatabricksNotebook()) { + String notebookId = databricksContext.getNotebookId(); + if (notebookId != null) { + return notebookId; + } + } + return MlflowClient.DEFAULT_EXPERIMENT_ID; + } +} diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowHttpCaller.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowHttpCaller.java index 69c84e60d32ac..1bd8d9d311df9 100644 --- a/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowHttpCaller.java +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowHttpCaller.java @@ -73,6 +73,7 @@ String post(String path, String json) { HttpPost request = new HttpPost(); fillRequestSettings(request, path); request.setEntity(new StringEntity(json, StandardCharsets.UTF_8)); + request.setHeader("Content-Type", "application/json"); try { HttpResponse response = httpClient.execute(request); checkError(response); @@ -114,6 +115,13 @@ private void fillRequestSettings(HttpRequestBase request, String path) { } else if (token != null) { request.addHeader("Authorization", "Bearer " + token); } + + String userAgent = "mlflow-java-client"; + String clientVersion = MlflowClientVersion.getClientVersion(); + if (!clientVersion.isEmpty()) { + userAgent += "/" + clientVersion; + } + request.addHeader("User-Agent", userAgent); } private boolean isError(int statusCode) { diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowProtobufMapper.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowProtobufMapper.java index 644d191efaf37..f2a6dbee4176f 100644 --- a/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowProtobufMapper.java +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/MlflowProtobufMapper.java @@ -4,6 +4,8 @@ import com.google.protobuf.MessageOrBuilder; import com.google.protobuf.util.JsonFormat; +import java.lang.Iterable; + import org.mlflow.api.proto.Service.*; class MlflowProtobufMapper { @@ -14,67 +16,97 @@ String makeCreateExperimentRequest(String expName) { return print(builder); } - String makeDeleteExperimentRequest(long experimentId) { + String makeDeleteExperimentRequest(String experimentId) { DeleteExperiment.Builder builder = DeleteExperiment.newBuilder(); builder.setExperimentId(experimentId); return print(builder); } - String makeRestoreExperimentRequest(long experimentId) { + String makeRestoreExperimentRequest(String experimentId) { RestoreExperiment.Builder builder = RestoreExperiment.newBuilder(); builder.setExperimentId(experimentId); return print(builder); } - String makeUpdateExperimentRequest(long experimentId, String newExperimentName) { + String makeUpdateExperimentRequest(String experimentId, String newExperimentName) { UpdateExperiment.Builder builder = UpdateExperiment.newBuilder(); builder.setExperimentId(experimentId); builder.setNewName(newExperimentName); return print(builder); } - String makeLogParam(String runUuid, String key, String value) { + String makeLogParam(String runId, String key, String value) { LogParam.Builder builder = LogParam.newBuilder(); - builder.setRunUuid(runUuid); + builder.setRunUuid(runId); + builder.setRunId(runId); builder.setKey(key); builder.setValue(value); return print(builder); } - String makeLogMetric(String runUuid, String key, double value, long timestamp) { + String makeLogMetric(String runId, String key, double value, long timestamp, long step) { LogMetric.Builder builder = LogMetric.newBuilder(); - builder.setRunUuid(runUuid); + builder.setRunUuid(runId); + builder.setRunId(runId); builder.setKey(key); builder.setValue(value); builder.setTimestamp(timestamp); + builder.setStep(step); return print(builder); } - String makeSetTag(String runUuid, String key, String value) { + String makeSetTag(String runId, String key, String value) { SetTag.Builder builder = SetTag.newBuilder(); - builder.setRunUuid(runUuid); + builder.setRunUuid(runId); + builder.setRunId(runId); builder.setKey(key); builder.setValue(value); return print(builder); } - String makeUpdateRun(String runUuid, RunStatus status, long endTime) { + String makeDeleteTag(String runId, String key) { + DeleteTag.Builder builder = DeleteTag.newBuilder(); + builder.setRunId(runId); + builder.setKey(key); + return print(builder); + } + + String makeLogBatch(String runId, + Iterable metrics, + Iterable params, + Iterable tags) { + LogBatch.Builder builder = LogBatch.newBuilder(); + builder.setRunId(runId); + if (metrics != null) { + builder.addAllMetrics(metrics); + } + if (params != null) { + builder.addAllParams(params); + } + if (tags != null) { + builder.addAllTags(tags); + } + return print(builder); + } + + String makeUpdateRun(String runId, RunStatus status, long endTime) { UpdateRun.Builder builder = UpdateRun.newBuilder(); - builder.setRunUuid(runUuid); + builder.setRunUuid(runId); + builder.setRunId(runId); builder.setStatus(status); builder.setEndTime(endTime); return print(builder); } - String makeDeleteRun(String runUuid) { + String makeDeleteRun(String runId) { DeleteRun.Builder builder = DeleteRun.newBuilder(); - builder.setRunId(runUuid); + builder.setRunId(runId); return print(builder); } - String makeRestoreRun(String runUuid) { + String makeRestoreRun(String runId) { RestoreRun.Builder builder = RestoreRun.newBuilder(); - builder.setRunId(runUuid); + builder.setRunId(runId); return print(builder); } @@ -106,6 +138,12 @@ GetRun.Response toGetRunResponse(String json) { return builder.build(); } + GetMetricHistory.Response toGetMetricHistoryResponse(String json) { + GetMetricHistory.Response.Builder builder = GetMetricHistory.Response.newBuilder(); + merge(json, builder); + return builder.build(); + } + CreateRun.Response toCreateRunResponse(String json) { CreateRun.Response.Builder builder = CreateRun.Response.newBuilder(); merge(json, builder); diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/Page.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/Page.java new file mode 100644 index 0000000000000..2dc495d75daa8 --- /dev/null +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/Page.java @@ -0,0 +1,35 @@ +package org.mlflow.tracking; + +import java.lang.Iterable; +import java.util.Optional; + +interface Page { + + /** + * @return The number of elements in this page. + */ + public int getPageSize(); + + /** + * @return True if there are more pages that can be retrieved from the API. + */ + public boolean hasNextPage(); + + /** + * @return An Optional of the token string to get the next page. + * Empty if there is no next page. + */ + public Optional getNextPageToken(); + + /** + * @return Retrieves the next Page object using the next page token, + * or returns an empty page if there are no more pages. + */ + public Page getNextPage(); + + /** + * @return A List of the elements in this Page. + */ + public Iterable getItems(); + +} diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/RunsPage.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/RunsPage.java new file mode 100644 index 0000000000000..0d1472f32f95d --- /dev/null +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/RunsPage.java @@ -0,0 +1,92 @@ +package org.mlflow.tracking; + +import java.util.Collections; +import java.util.List; +import java.util.ArrayList; +import java.util.Optional; +import org.mlflow.api.proto.Service.*; + +public class RunsPage implements Page { + + private final String token; + private final List runs; + + private final MlflowClient client; + private final List experimentIds; + private final String searchFilter; + private final ViewType runViewType; + private final List orderBy; + private final int maxResults; + + /** + * Creates a fixed size page of Runs. + */ + RunsPage(List runs, + String token, + List experimentIds, + String searchFilter, + ViewType runViewType, + int maxResults, + List orderBy, + MlflowClient client) { + this.runs = Collections.unmodifiableList(runs); + this.token = token; + this.experimentIds = experimentIds; + this.searchFilter = searchFilter; + this.runViewType = runViewType; + this.orderBy = orderBy; + this.maxResults = maxResults; + this.client = client; + } + + /** + * @return The number of runs in the page. + */ + public int getPageSize() { + return this.runs.size(); + } + + /** + * @return True if a token for the next page exists and isn't empty. Otherwise returns false. + */ + public boolean hasNextPage() { + return this.token != null && this.token != ""; + } + + /** + * @return An optional with the token for the next page. + * Empty if the token doesn't exist or is empty. + */ + public Optional getNextPageToken() { + if (this.hasNextPage()) { + return Optional.of(this.token); + } else { + return Optional.empty(); + } + } + + /** + * @return The next page of runs matching the search criteria. + * If there are no more pages, an {@link org.mlflow.tracking.EmptyPage} will be returned. + */ + public Page getNextPage() { + if (this.hasNextPage()) { + return this.client.searchRuns(this.experimentIds, + this.searchFilter, + this.runViewType, + this.maxResults, + this.orderBy, + this.token); + } else { + return new EmptyPage(); + } + } + + /** + * @return An iterable over the runs in this page. + */ + public List getItems() { + return runs; + } + +} diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/creds/DatabricksDynamicHostCredsProvider.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/creds/DatabricksDynamicHostCredsProvider.java index 6f1796437b3d3..fedb41c0a83d7 100644 --- a/mlflow/java/client/src/main/java/org/mlflow/tracking/creds/DatabricksDynamicHostCredsProvider.java +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/creds/DatabricksDynamicHostCredsProvider.java @@ -3,6 +3,7 @@ import java.util.Map; import com.google.common.annotations.VisibleForTesting; +import org.mlflow.tracking.utils.DatabricksContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -17,21 +18,17 @@ private DatabricksDynamicHostCredsProvider(Map configProvider) { } public static DatabricksDynamicHostCredsProvider createIfAvailable() { - return createIfAvailable("com.databricks.config.DatabricksClientSettingsProvider"); + return createIfAvailable(DatabricksContext.CONFIG_PROVIDER_CLASS_NAME); } @VisibleForTesting static DatabricksDynamicHostCredsProvider createIfAvailable(String className) { - try { - Class cls = Class.forName(className); - return new DatabricksDynamicHostCredsProvider((Map) cls.newInstance()); - } catch (ClassNotFoundException e) { - return null; - } catch (IllegalAccessException | InstantiationException e) { - logger.warn("Found but failed to invoke dynamic config provider", e); + Map configProvider = + DatabricksContext.getConfigProviderIfAvailable(className); + if (configProvider == null) { return null; } - + return new DatabricksDynamicHostCredsProvider(configProvider); } @Override diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/package-info.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/package-info.java index a0eb7a848508a..f8c149ae3b4fe 100644 --- a/mlflow/java/client/src/main/java/org/mlflow/tracking/package-info.java +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/package-info.java @@ -1,2 +1,5 @@ -/** MLflow Tracking provides a Java CRUD interface to MLflow Experiments and Runs. */ +/** + * MLflow Tracking provides a Java CRUD interface to MLflow Experiments and Runs -- + * to create and log to MLflow runs, use the {@link org.mlflow.tracking.MlflowContext} interface. + */ package org.mlflow.tracking; diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/samples/FluentExample.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/samples/FluentExample.java new file mode 100644 index 0000000000000..2485cdf9c96c3 --- /dev/null +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/samples/FluentExample.java @@ -0,0 +1,60 @@ +package org.mlflow.tracking.samples; + +import com.google.common.collect.ImmutableMap; +import org.mlflow.tracking.ActiveRun; +import org.mlflow.tracking.MlflowContext; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +public class FluentExample { + public static void main(String[] args) { + MlflowContext mlflow = new MlflowContext(); + ExecutorService executor = Executors.newFixedThreadPool(10); + + // Vanilla usage + { + ActiveRun run = mlflow.startRun("run"); + run.logParam("alpha", "0.0"); + run.logMetric("MSE", 0.0); + run.setTags(ImmutableMap.of( + "company", "databricks", + "org", "engineering" + )); + run.endRun(); + } + + // Lambda usage + { + mlflow.withActiveRun("lambda run", (activeRun -> { + activeRun.logParam("layers", "4"); + // Perform training code + })); + } + // Log one parent run and 5 children run + { + ActiveRun run = mlflow.startRun("parent run"); + for (int i = 0; i <= 5; i++) { + ActiveRun childRun = mlflow.startRun("child run", run.getId()); + childRun.logParam("iteration", Integer.toString(i)); + childRun.endRun(); + } + run.endRun(); + } + + // Log one parent run and 5 children run (multithreaded) + { + ActiveRun run = mlflow.startRun("parent run (multithreaded)"); + for (int i = 0; i <= 5; i++) { + final int i0 = i; + executor.submit(() -> { + ActiveRun childRun = mlflow.startRun("child run (multithreaded)", run.getId()); + childRun.logParam("iteration", Integer.toString(i0)); + childRun.endRun(); + }); + } + run.endRun(); + } + executor.shutdown(); + } +} diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/samples/QuickStartDriver.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/samples/QuickStartDriver.java index aac2096be6770..5402549b447a3 100644 --- a/mlflow/java/client/src/main/java/org/mlflow/tracking/samples/QuickStartDriver.java +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/samples/QuickStartDriver.java @@ -25,7 +25,7 @@ void process(String[] args) throws Exception { System.out.println("====== createExperiment"); String expName = "Exp_" + System.currentTimeMillis(); - long expId = client.createExperiment(expName); + String expId = client.createExperiment(expName); System.out.println("createExperiment: expId=" + expId); System.out.println("====== getExperiment"); @@ -48,13 +48,13 @@ void process(String[] args) throws Exception { System.out.println("getExperimentByName: " + exp3); } - void createRun(MlflowClient client, long expId) { + void createRun(MlflowClient client, String expId) { System.out.println("====== createRun"); // Create run String sourceFile = "MyFile.java"; - RunInfo runCreated = client.createRun(expId, sourceFile); + RunInfo runCreated = client.createRun(expId); System.out.println("CreateRun: " + runCreated); String runId = runCreated.getRunUuid(); diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/utils/DatabricksContext.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/utils/DatabricksContext.java new file mode 100644 index 0000000000000..f05139bfbd10e --- /dev/null +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/utils/DatabricksContext.java @@ -0,0 +1,102 @@ +package org.mlflow.tracking.utils; + +import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.Map; + +public class DatabricksContext { + public static final String CONFIG_PROVIDER_CLASS_NAME = + "com.databricks.config.DatabricksClientSettingsProvider"; + private static final Logger logger = LoggerFactory.getLogger( + DatabricksContext.class); + private final Map configProvider; + + private DatabricksContext(Map configProvider) { + this.configProvider = configProvider; + } + + public static DatabricksContext createIfAvailable() { + return createIfAvailable(CONFIG_PROVIDER_CLASS_NAME); + } + + @VisibleForTesting + static DatabricksContext createIfAvailable(String className) { + Map configProvider = getConfigProviderIfAvailable(className); + if (configProvider == null) { + return null; + } + return new DatabricksContext(configProvider); + } + + public Map getTags() { + Map tags = new HashMap<>(); + if (!isInDatabricksNotebook()) { + return tags; + } + String notebookId = getNotebookId(); + if (notebookId != null) { + tags.put(MlflowTagConstants.DATABRICKS_NOTEBOOK_ID, notebookId); + } + String notebookPath = getNotebookPath(); + if (notebookPath != null) { + tags.put(MlflowTagConstants.SOURCE_NAME, notebookPath); + tags.put(MlflowTagConstants.DATABRICKS_NOTEBOOK_PATH, notebookPath); + tags.put(MlflowTagConstants.SOURCE_TYPE, "NOTEBOOK"); + } + String webappUrl = getWebappUrl(); + if (webappUrl != null) { + tags.put(MlflowTagConstants.DATABRICKS_WEBAPP_URL, webappUrl); + } + return tags; + } + + public boolean isInDatabricksNotebook() { + return configProvider.get("notebookId") != null; + } + + /** + * Should only be called if isInDatabricksNotebook() is true. + */ + public String getNotebookId() { + return configProvider.get("notebookId"); + } + + /** + * Should only be called if isInDatabricksNotebook() is true. + */ + private String getNotebookPath() { + if (!isInDatabricksNotebook()) { + throw new IllegalArgumentException( + "getNotebookPath() should not be called when isInDatabricksNotebook() is false" + ); + }; + return configProvider.get("notebookPath"); + } + + /** + * Should only be called if isInDatabricksNotebook() is true. + */ + private String getWebappUrl() { + if (!isInDatabricksNotebook()) { + throw new IllegalArgumentException( + "getWebappUrl() should not be called when isInDatabricksNotebook() is false" + ); + }; + return configProvider.get("host"); + } + + public static Map getConfigProviderIfAvailable(String className) { + try { + Class cls = Class.forName(className); + return (Map) cls.newInstance(); + } catch (ClassNotFoundException e) { + return null; + } catch (IllegalAccessException | InstantiationException e) { + logger.warn("Found but failed to invoke dynamic config provider", e); + return null; + } + } +} diff --git a/mlflow/java/client/src/main/java/org/mlflow/tracking/utils/MlflowTagConstants.java b/mlflow/java/client/src/main/java/org/mlflow/tracking/utils/MlflowTagConstants.java new file mode 100644 index 0000000000000..0c0191cef7bfe --- /dev/null +++ b/mlflow/java/client/src/main/java/org/mlflow/tracking/utils/MlflowTagConstants.java @@ -0,0 +1,12 @@ +package org.mlflow.tracking.utils; + +public class MlflowTagConstants { + public static final String PARENT_RUN_ID = "mlflow.parentRunId"; + public static final String RUN_NAME = "mlflow.runName"; + public static final String USER = "mlflow.user"; + public static final String SOURCE_TYPE = "mlflow.source.type"; + public static final String SOURCE_NAME = "mlflow.source.name"; + public static final String DATABRICKS_NOTEBOOK_ID = "mlflow.databricks.notebookID"; + public static final String DATABRICKS_NOTEBOOK_PATH = "mlflow.databricks.notebookPath"; + public static final String DATABRICKS_WEBAPP_URL = "mlflow.databricks.webappURL"; +} diff --git a/mlflow/java/client/src/test/java/org/mlflow/tracking/ActiveRunTest.java b/mlflow/java/client/src/test/java/org/mlflow/tracking/ActiveRunTest.java new file mode 100644 index 0000000000000..adb2552c21dd6 --- /dev/null +++ b/mlflow/java/client/src/test/java/org/mlflow/tracking/ActiveRunTest.java @@ -0,0 +1,174 @@ +package org.mlflow.tracking; + +import com.google.common.collect.ImmutableMap; +import org.mlflow.api.proto.Service.*; +import org.mockito.ArgumentCaptor; +import org.testng.annotations.Test; +import org.testng.collections.Lists; + +import java.io.File; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.testng.Assert.*; +import static org.mockito.Mockito.*; + +public class ActiveRunTest { + + private static final String RUN_ID = "test-run-id"; + private static final String ARTIFACT_URI = "dbfs:/artifact-uri"; + + private MlflowClient mockClient; + + private ActiveRun getActiveRun() { + RunInfo r = RunInfo.newBuilder().setRunId(RUN_ID).setArtifactUri(ARTIFACT_URI).build(); + this.mockClient = mock(MlflowClient.class); + return new ActiveRun(r, mockClient); + } + + @Test + public void testGetId() { + assertEquals(getActiveRun().getId(), RUN_ID); + } + + @Test + public void testLogParam() { + getActiveRun().logParam("param-key", "param-value"); + verify(mockClient).logParam(RUN_ID, "param-key", "param-value"); + } + + @Test + public void testSetTag() { + getActiveRun().setTag("tag-key", "tag-value"); + verify(mockClient).setTag(RUN_ID, "tag-key", "tag-value"); + } + + @Test + public void testLogMetric() { + getActiveRun().logMetric("metric-key", 1.0); + // The any is for the timestamp. + verify(mockClient).logMetric(eq(RUN_ID), eq("metric-key"), eq(1.0), anyLong(), eq(0L)); + } + + @Test + public void testLogMetricWithStep() { + getActiveRun().logMetric("metric-key", 1.0, 99); + // The any is for the timestamp. + verify(mockClient).logMetric(eq(RUN_ID), eq("metric-key"), eq(1.0), anyLong(), eq(99L)); + } + + @Test + public void testLogMetrics() { + ActiveRun activeRun = getActiveRun(); + ArgumentCaptor> metricsArg = ArgumentCaptor.forClass(Iterable.class); + activeRun.logMetrics(ImmutableMap.of("a", 0.0, "b", 1.0)); + verify(mockClient).logBatch(eq(RUN_ID), metricsArg.capture(), any(), any()); + + Set metrics = new HashSet<>(); + metricsArg.getValue().forEach(metrics::add); + + assertTrue(metrics.stream() + .anyMatch(m -> m.getKey().equals("a") && m.getValue() == 0.0 && m.getStep() == 0)); + assertTrue(metrics.stream() + .anyMatch(m -> m.getKey().equals("b") && m.getValue() == 1.0 && m.getStep() == 0)); + } + + @Test + public void testLogMetricsWithStep() { + ActiveRun activeRun = getActiveRun(); + ArgumentCaptor> metricsArg = ArgumentCaptor.forClass(Iterable.class); + activeRun.logMetrics(ImmutableMap.of("a", 0.0, "b", 1.0), 99); + verify(mockClient).logBatch(eq(RUN_ID), metricsArg.capture(), any(), any()); + + Set metrics = new HashSet<>(); + metricsArg.getValue().forEach(metrics::add); + + assertTrue(metrics.stream() + .anyMatch(m -> m.getKey().equals("a") && m.getValue() == 0.0 && m.getStep() == 99)); + assertTrue(metrics.stream() + .anyMatch(m -> m.getKey().equals("b") && m.getValue() == 1.0 && m.getStep() == 99)); + } + + @Test + public void testLogParams() { + ActiveRun activeRun = getActiveRun(); + ArgumentCaptor> paramsArg = ArgumentCaptor.forClass(Iterable.class); + activeRun.logParams(ImmutableMap.of("a", "a", "b", "b")); + verify(mockClient).logBatch(eq(RUN_ID), any(), paramsArg.capture(), any()); + + Set params = new HashSet<>(); + paramsArg.getValue().forEach(params::add); + + assertTrue(params.stream() + .anyMatch(p -> p.getKey().equals("a") && p.getValue().equals("a"))); + assertTrue(params.stream() + .anyMatch(p -> p.getKey().equals("b") && p.getValue().equals("b"))); + } + + @Test + public void testSetTags() { + ActiveRun activeRun = getActiveRun(); + ArgumentCaptor> tagsArg = ArgumentCaptor.forClass(Iterable.class); + activeRun.setTags(ImmutableMap.of("a", "a", "b", "b")); + verify(mockClient).logBatch(eq(RUN_ID), any(), any(), tagsArg.capture()); + + Set tags = new HashSet<>(); + tagsArg.getValue().forEach(tags::add); + + assertTrue(tags.stream() + .anyMatch(t -> t.getKey().equals("a") && t.getValue().equals("a"))); + assertTrue(tags.stream() + .anyMatch(t -> t.getKey().equals("b") && t.getValue().equals("b"))); + } + + @Test + public void testLogArtifact() { + ActiveRun activeRun = getActiveRun(); + activeRun.logArtifact(Paths.get("test")); + verify(mockClient).logArtifact(RUN_ID, new File("test")); + } + + @Test + public void testLogArtifactWithArtifactPath() { + ActiveRun activeRun = getActiveRun(); + activeRun.logArtifact(Paths.get("test"), "artifact-path"); + verify(mockClient).logArtifact(RUN_ID, new File("test"), "artifact-path"); + } + + @Test + public void testLogArtifacts() { + ActiveRun activeRun = getActiveRun(); + activeRun.logArtifacts(Paths.get("test")); + verify(mockClient).logArtifacts(RUN_ID, new File("test")); + } + + @Test + public void testLogArtifactsWithArtifactPath() { + ActiveRun activeRun = getActiveRun(); + activeRun.logArtifacts(Paths.get("test"), "artifact-path"); + verify(mockClient).logArtifacts(RUN_ID, new File("test"), "artifact-path"); + } + + @Test + public void testGetArtifactUri() { + ActiveRun activeRun = getActiveRun(); + assertEquals(activeRun.getArtifactUri(), ARTIFACT_URI); + } + + @Test + public void testEndRun() { + ActiveRun activeRun = getActiveRun(); + activeRun.endRun(); + verify(mockClient).setTerminated(RUN_ID, RunStatus.FINISHED); + } + + @Test + public void testEndRunWithStatus() { + ActiveRun activeRun = getActiveRun(); + activeRun.endRun(RunStatus.FAILED); + verify(mockClient).setTerminated(RUN_ID, RunStatus.FAILED); + } +} \ No newline at end of file diff --git a/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowClientTest.java b/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowClientTest.java index cc9ad5e6e5f3e..e212a5b01d140 100644 --- a/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowClientTest.java +++ b/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowClientTest.java @@ -5,7 +5,16 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.List; - +import java.util.Optional; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Set; +import java.util.HashSet; +import java.util.Stack; +import java.util.Vector; +import java.util.LinkedList; + +import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,7 +55,7 @@ public void afterAll() throws InterruptedException { @Test public void getCreateExperimentTest() { String expName = createExperimentName(); - long expId = client.createExperiment(expName); + String expId = client.createExperiment(expName); GetExperiment.Response exp = client.getExperiment(expId); Assert.assertEquals(exp.getExperiment().getName(), expName); } @@ -61,7 +70,7 @@ public void createExistingExperiment() { @Test public void deleteAndRestoreExperiments() { String expName = createExperimentName(); - long expId = client.createExperiment(expName); + String expId = client.createExperiment(expName); Assert.assertEquals(client.getExperiment(expId).getExperiment().getLifecycleStage(), "active"); client.deleteExperiment(expId); @@ -76,7 +85,7 @@ public void renameExperiment() { String expName = createExperimentName(); String newName = createExperimentName(); - long expId = client.createExperiment(expName); + String expId = client.createExperiment(expName); Assert.assertEquals(client.getExperiment(expId).getExperiment().getName(), expName); client.renameExperiment(expId, newName); @@ -88,7 +97,7 @@ public void listExperimentsTest() { List expsBefore = client.listExperiments(); String expName = createExperimentName(); - long expId = client.createExperiment(expName); + String expId = client.createExperiment(expName); List exps = client.listExperiments(); Assert.assertEquals(exps.size(), 1 + expsBefore.size()); @@ -106,21 +115,18 @@ public void listExperimentsTest() { public void addGetRun() { // Create exp String expName = createExperimentName(); - long expId = client.createExperiment(expName); + String expId = client.createExperiment(expName); logger.debug(">> TEST.0"); // Create run - String user = System.getenv("USER"); long startTime = System.currentTimeMillis(); - String sourceFile = "MyFile.java"; - RunInfo runCreated = client.createRun(expId, sourceFile); + RunInfo runCreated = client.createRun(expId); runId = runCreated.getRunUuid(); logger.debug("runId=" + runId); List runInfos = client.listRunInfos(expId); Assert.assertEquals(runInfos.size(), 1); - Assert.assertEquals(runInfos.get(0).getSourceType(), SourceType.LOCAL); Assert.assertEquals(runInfos.get(0).getStatus(), RunStatus.RUNNING); // Log parameters @@ -130,12 +136,19 @@ public void addGetRun() { // Log metrics client.logMetric(runId, "accuracy_score", ACCURACY_SCORE); client.logMetric(runId, "zero_one_loss", ZERO_ONE_LOSS); + client.logMetric(runId, "multi_log_default_step_ts", 2.0); + client.logMetric(runId, "multi_log_default_step_ts", -1.0); + client.logMetric(runId, "multi_log_specified_step_ts", 1.0, -1000, 1); + client.logMetric(runId, "multi_log_specified_step_ts", 2.0, 2000, -5); + client.logMetric(runId, "multi_log_specified_step_ts", -3.0, 3000, 4); + client.logMetric(runId, "multi_log_specified_step_ts", 4.0, 2999, 4); // Log tag client.setTag(runId, "user_email", USER_EMAIL); // Update finished run - client.setTerminated(runId, RunStatus.FINISHED, startTime + 1001); + client.setTerminated(runId, RunStatus.FINISHED); + long endTime = System.currentTimeMillis(); List updatedRunInfos = client.listRunInfos(expId); Assert.assertEquals(updatedRunInfos.size(), 1); @@ -149,23 +162,180 @@ public void addGetRun() { // Assert run from getRun Run run = client.getRun(runId); RunInfo runInfo = run.getInfo(); - assertRunInfo(runInfo, expId, sourceFile); + assertRunInfo(runInfo, expId); + // verify run start and end are set in ms + Assert.assertTrue(runInfo.getStartTime() >= startTime); + Assert.assertTrue(runInfo.getEndTime() <= endTime); // Assert parent run ID is not set. Assert.assertTrue(run.getData().getTagsList().stream().noneMatch( tag -> tag.getKey().equals("mlflow.parentRunId"))); } + @Test + public void deleteTag() { + // Create experiment + String expName = createExperimentName(); + String expId = client.createExperiment(expName); + + // Create run + RunInfo runCreated = client.createRun(expId); + String runId = runCreated.getRunUuid(); + client.setTag(runId, "tag0", "val0"); + client.setTag(runId, "tag1", "val1"); + client.deleteTag(runId, "tag0"); + Run run = client.getRun(runId); + // test that the tag was correctly deleted. + for (RunTag rt : run.getData().getTagsList()) { + Assert.assertTrue(!rt.getKey().equals("tag0")); + } + // test that you can't re-delete the old tag + try { + client.deleteTag(runId, "tag0"); + Assert.fail(); + } catch (MlflowClientException e) { + Assert.assertTrue(e.getMessage().contains(String.format("No tag with name: tag0 in run with id %s", runId))); + } + // test that you can't delete a tag that doesn't already exist. + try { + client.deleteTag(runId, "fakeTag"); + Assert.fail(); + } catch (MlflowClientException e) { + Assert.assertTrue(e.getMessage().contains(String.format("No tag with name: fakeTag in run with id %s", runId))); + } + // test that you can't delete a tag on a nonexistent run. + try { + client.deleteTag("fakeRunId", "fakeTag"); + Assert.fail(); + } catch (MlflowClientException e) { + Assert.assertTrue(e.getMessage().contains(String.format("Run '%s' not found", "fakeRunId"))); + } + } + + @Test + public void searchRuns() { + // Create exp + String expName = createExperimentName(); + String expId = client.createExperiment(expName); + logger.debug(">> TEST.0"); + + // Create run + String user = System.getenv("USER"); + long startTime = System.currentTimeMillis(); + String sourceFile = "MyFile.java"; + + RunInfo runCreated_1 = client.createRun(expId); + String runId_1 = runCreated_1.getRunUuid(); + logger.debug("runId=" + runId_1); + + RunInfo runCreated_2 = client.createRun(expId); + String runId_2 = runCreated_2.getRunUuid(); + logger.debug("runId=" + runId_2); + + // Log parameters + client.logParam(runId_1, "min_samples_leaf", MIN_SAMPLES_LEAF); + client.logParam(runId_2, "min_samples_leaf", MIN_SAMPLES_LEAF); + + client.logParam(runId_1, "max_depth", "5"); + client.logParam(runId_2, "max_depth", "15"); + + // Log metrics + client.logMetric(runId_1, "accuracy_score", 0.1); + client.logMetric(runId_1, "accuracy_score", 0.4); + client.logMetric(runId_2, "accuracy_score", 0.9); + + // Log tag + client.setTag(runId_1, "user_email", USER_EMAIL); + client.setTag(runId_1, "test", "works"); + client.setTag(runId_2, "test", "also works"); + + List experimentIds = Arrays.asList(expId); + + // metrics based searches + List searchResult = client.searchRuns(experimentIds, "metrics.accuracy_score < 0"); + Assert.assertEquals(searchResult.size(), 0); + + searchResult = client.searchRuns(experimentIds, "metrics.accuracy_score > 0"); + Assert.assertEquals(searchResult.size(), 2); + + searchResult = client.searchRuns(experimentIds, "metrics.accuracy_score < 0.3"); + Assert.assertEquals(searchResult.size(), 0); + + searchResult = client.searchRuns(experimentIds, "metrics.accuracy_score < 0.5"); + Assert.assertEquals(searchResult.get(0).getRunUuid(), runId_1); + + searchResult = client.searchRuns(experimentIds, "metrics.accuracy_score > 0.5"); + Assert.assertEquals(searchResult.get(0).getRunUuid(), runId_2); + + // parameter based searches + searchResult = client.searchRuns(experimentIds, + "params.min_samples_leaf = '" + MIN_SAMPLES_LEAF + "'"); + Assert.assertEquals(searchResult.size(), 2); + searchResult = client.searchRuns(experimentIds, + "params.min_samples_leaf != '" + MIN_SAMPLES_LEAF + "'"); + Assert.assertEquals(searchResult.size(), 0); + searchResult = client.searchRuns(experimentIds, "params.max_depth = '5'"); + Assert.assertEquals(searchResult.get(0).getRunUuid(), runId_1); + + searchResult = client.searchRuns(experimentIds, "params.max_depth = '15'"); + Assert.assertEquals(searchResult.get(0).getRunUuid(), runId_2); + + // tag based search + searchResult = client.searchRuns(experimentIds, "tag.user_email = '" + USER_EMAIL + "'"); + Assert.assertEquals(searchResult.get(0).getRunUuid(), runId_1); + + searchResult = client.searchRuns(experimentIds, "tag.user_email != '" + USER_EMAIL + "'"); + Assert.assertEquals(searchResult.size(), 0); + + searchResult = client.searchRuns(experimentIds, "tag.test = 'works'"); + Assert.assertEquals(searchResult.get(0).getRunUuid(), runId_1); + + searchResult = client.searchRuns(experimentIds, "tag.test = 'also works'"); + Assert.assertEquals(searchResult.get(0).getRunUuid(), runId_2); + + // Paged searchRuns + + List searchRuns = Lists.newArrayList(client.searchRuns(experimentIds, "", + ViewType.ACTIVE_ONLY, 1000, Lists.newArrayList("metrics.accuracy_score")).getItems()); + Assert.assertEquals(searchRuns.get(0).getInfo().getRunUuid(), runId_1); + Assert.assertEquals(searchRuns.get(1).getInfo().getRunUuid(), runId_2); + + searchRuns = Lists.newArrayList(client.searchRuns(experimentIds, "", ViewType.ACTIVE_ONLY, + 1000, Lists.newArrayList("params.min_samples_leaf", "metrics.accuracy_score DESC")) + .getItems()); + Assert.assertEquals(searchRuns.get(1).getInfo().getRunUuid(), runId_1); + Assert.assertEquals(searchRuns.get(0).getInfo().getRunUuid(), runId_2); + + Page page = client.searchRuns(experimentIds, "", ViewType.ACTIVE_ONLY, 1000); + Assert.assertEquals(page.getPageSize(), 2); + Assert.assertEquals(page.hasNextPage(), false); + Assert.assertEquals(page.getNextPageToken(), Optional.empty()); + + page = client.searchRuns(experimentIds, "", ViewType.ACTIVE_ONLY, 1); + Assert.assertEquals(page.getPageSize(), 1); + Assert.assertEquals(page.hasNextPage(), true); + Assert.assertNotEquals(page.getNextPageToken(), Optional.empty()); + + Page page2 = page.getNextPage(); + Assert.assertEquals(page2.getPageSize(), 1); + Assert.assertEquals(page2.hasNextPage(), false); + Assert.assertEquals(page2.getNextPageToken(), Optional.empty()); + + Page page3 = page2.getNextPage(); + Assert.assertEquals(page3.getPageSize(), 0); + Assert.assertEquals(page3.getNextPageToken(), Optional.empty()); + } + @Test public void createRunWithParent() { String expName = createExperimentName(); - long expId = client.createExperiment(expName); + String expId = client.createExperiment(expName); RunInfo parentRun = client.createRun(expId); String parentRunId = parentRun.getRunUuid(); RunInfo childRun = client.createRun(CreateRun.newBuilder() .setExperimentId(expId) - .setParentRunId(parentRunId) .build()); + client.setTag(childRun.getRunUuid(), "mlflow.parentRunId", parentRunId); List childTags = client.getRun(childRun.getRunUuid()).getData().getTagsList(); String parentRunIdTagValue = childTags.stream() .filter(t -> t.getKey().equals("mlflow.parentRunId")) @@ -185,26 +355,141 @@ public void checkParamsAndMetrics() { assertParam(params, "max_depth", MAX_DEPTH); List metrics = run.getData().getMetricsList(); - Assert.assertEquals(metrics.size(), 2); + Assert.assertEquals(metrics.size(), 4); assertMetric(metrics, "accuracy_score", ACCURACY_SCORE); assertMetric(metrics, "zero_one_loss", ZERO_ONE_LOSS); + assertMetric(metrics, "multi_log_default_step_ts", -1.0); + assertMetric(metrics, "multi_log_specified_step_ts", -3.0); assert(metrics.get(0).getTimestamp() > 0) : metrics.get(0).getTimestamp(); + List multiDefaultMetricHistory = client.getMetricHistory( + runId, "multi_log_default_step_ts"); + assertMetricHistory(multiDefaultMetricHistory, "multi_log_default_step_ts", + Arrays.asList(2.0, -1.0), Arrays.asList(0L, 0L)); + + List multiSpecifiedMetricHistory = client.getMetricHistory( + runId, "multi_log_specified_step_ts"); + assertMetricHistory(multiSpecifiedMetricHistory, "multi_log_specified_step_ts", + Arrays.asList(1.0, 2.0, -3.0, 4.0), Arrays.asList(-1000L, 2000L, 3000L, 2999L), + Arrays.asList(1L, -5L, 4L, 4L)); + List tags = run.getData().getTagsList(); Assert.assertEquals(tags.size(), 1); assertTag(tags, "user_email", USER_EMAIL); } + @Test + public void testBatchedLogging() { + // Create exp + String expName = createExperimentName(); + String expId = client.createExperiment(expName); + logger.debug(">> TEST.0"); + + // Test logging just metrics + { + RunInfo runCreated = client.createRun(expId); + String runUuid = runCreated.getRunId(); + logger.debug("runUuid=" + runUuid); + + List metrics = new ArrayList<>(Arrays.asList(createMetric("met1", 0.081D, 10, 0), + createMetric("metric2", 82.3D, 100, 73), createMetric("metric3", 1.0D, 1000, 1), + createMetric("metric3", 2.0D, 2000, 3), createMetric("metric3", 3.0D, 0, -2))); + client.logBatch(runUuid, metrics, null, null); + + Run run = client.getRun(runUuid); + Assert.assertEquals(run.getInfo().getRunId(), runUuid); + + List loggedMetrics = run.getData().getMetricsList(); + Assert.assertEquals(loggedMetrics.size(), 3); + assertMetric(loggedMetrics, "met1", 0.081D, 10, 0); + assertMetric(loggedMetrics, "metric2", 82.3D, 100, 73); + assertMetric(loggedMetrics, "metric3", 2.0D, 2000, 3); + } + + // Test logging just params + { + RunInfo runCreated = client.createRun(expId); + String runUuid = runCreated.getRunId(); + logger.debug("runUuid=" + runUuid); + + Set params = new HashSet(Arrays.asList( + createParam("p1", "this is a param string"), + createParam("p2", "a b"), + createParam("3", "x"))); + client.logBatch(runUuid, null, params, null); + + Run run = client.getRun(runUuid); + Assert.assertEquals(run.getInfo().getRunId(), runUuid); + + List loggedParams = run.getData().getParamsList(); + Assert.assertEquals(loggedParams.size(), 3); + assertParam(loggedParams, "p1", "this is a param string"); + assertParam(loggedParams, "p2", "a b"); + assertParam(loggedParams, "3", "x"); + } + + // Test logging just tags + { + RunInfo runCreated = client.createRun(expId); + String runUuid = runCreated.getRunId(); + logger.debug("runUuid=" + runUuid); + + Stack tags = new Stack(); + tags.push(createTag("t1", "tagtagtag")); + client.logBatch(runUuid, null, null, tags); + + Run run = client.getRun(runUuid); + Assert.assertEquals(run.getInfo().getRunId(), runUuid); + + List loggedTags = run.getData().getTagsList(); + Assert.assertEquals(loggedTags.size(), 1); + assertTag(loggedTags, "t1", "tagtagtag"); + } + + // All + { + RunInfo runCreated = client.createRun(expId); + String runUuid = runCreated.getRunId(); + logger.debug("runUuid=" + runUuid); + + List metrics = new LinkedList<>(Arrays.asList(createMetric("m1", 32.23D, 12, 0))); + Vector params = new Vector<>(Arrays.asList(createParam("p1", "param1"), + createParam("p2", "another param"))); + Set tags = new HashSet<>(Arrays.asList(createTag("t1", "t1"), + createTag("t2", "xx"), + createTag("t3", "xx"))); + client.logBatch(runUuid, metrics, params, tags); + + Run run = client.getRun(runUuid); + Assert.assertEquals(run.getInfo().getRunId(), runUuid); + + List loggedMetrics = run.getData().getMetricsList(); + Assert.assertEquals(loggedMetrics.size(), 1); + assertMetric(loggedMetrics, "m1", 32.23D); + + List loggedParams = run.getData().getParamsList(); + Assert.assertEquals(loggedParams.size(), 2); + assertParam(loggedParams, "p1", "param1"); + assertParam(loggedParams, "p2", "another param"); + + List loggedTags = run.getData().getTagsList(); + Assert.assertEquals(loggedTags.size(), 3); + assertTag(loggedTags, "t1", "t1"); + assertTag(loggedTags, "t2", "xx"); + assertTag(loggedTags, "t3", "xx"); + } + } + @Test public void deleteAndRestoreRun() { String expName = createExperimentName(); - long expId = client.createExperiment(expName); + String expId = client.createExperiment(expName); String sourceFile = "MyFile.java"; - RunInfo runCreated = client.createRun(expId, sourceFile); + RunInfo runCreated = client.createRun(expId); Assert.assertEquals(runCreated.getLifecycleStage(), "active"); - String deleteRunId = runCreated.getRunUuid(); + String deleteRunId = runCreated.getRunId(); client.deleteRun(deleteRunId); Assert.assertEquals(client.getRun(deleteRunId).getInfo().getLifecycleStage(), "deleted"); client.restoreRun(deleteRunId); diff --git a/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowContextTest.java b/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowContextTest.java new file mode 100644 index 0000000000000..f913373a76646 --- /dev/null +++ b/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowContextTest.java @@ -0,0 +1,126 @@ +package org.mlflow.tracking; + +import static org.mockito.Mockito.*; + +import static org.mlflow.api.proto.Service.*; + +import org.mlflow.tracking.utils.MlflowTagConstants; +import org.mockito.ArgumentCaptor; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.Test; + +import java.util.List; +import java.util.Optional; + +public class MlflowContextTest { + private static MlflowClient mockClient; + + @AfterMethod + public static void afterMethod() { + mockClient = null; + } + + public static MlflowContext setupMlflowContext() { + mockClient = mock(MlflowClient.class); + MlflowContext mlflow = new MlflowContext(mockClient); + return mlflow; + } + + @Test + public void testGetClient() { + MlflowContext mlflow = setupMlflowContext(); + Assert.assertEquals(mlflow.getClient(), mockClient); + } + + @Test + public void testSetExperimentName() { + // Will throw if there is no experiment with the same name. + { + MlflowContext mlflow = setupMlflowContext(); + when(mockClient.getExperimentByName("experiment-name")).thenReturn(Optional.empty()); + try { + mlflow.setExperimentName("experiment-name"); + Assert.fail(); + } catch (IllegalArgumentException expected) { + } + } + + // Will set experiment-id if experiment is returned from getExperimentByName + { + MlflowContext mlflow = setupMlflowContext(); + when(mockClient.getExperimentByName("experiment-name")).thenReturn( + Optional.of(Experiment.newBuilder().setExperimentId("123").build())); + mlflow.setExperimentName("experiment-name"); + Assert.assertEquals(mlflow.getExperimentId(), "123"); + } + } + + @Test + public void testSetAndGetExperimentId() { + MlflowContext mlflow = setupMlflowContext(); + mlflow.setExperimentId("apple"); + Assert.assertEquals(mlflow.getExperimentId(), "apple"); + } + + @Test + public void testStartRun() { + // Sets the appropriate tags + ArgumentCaptor createRunArgument = ArgumentCaptor.forClass(CreateRun.class); + MlflowContext mlflow = setupMlflowContext(); + mlflow.setExperimentId("123"); + mlflow.startRun("apple", "parent-run-id"); + verify(mockClient).createRun(createRunArgument.capture()); + List tags = createRunArgument.getValue().getTagsList(); + Assert.assertEquals(createRunArgument.getValue().getExperimentId(), "123"); + Assert.assertTrue(tags.contains(createRunTag(MlflowTagConstants.RUN_NAME, "apple"))); + Assert.assertTrue(tags.contains(createRunTag(MlflowTagConstants.SOURCE_TYPE, "LOCAL"))); + Assert.assertTrue(tags.contains(createRunTag(MlflowTagConstants.USER, System.getProperty("user.name")))); + Assert.assertTrue(tags.contains(createRunTag(MlflowTagConstants.PARENT_RUN_ID, "parent-run-id"))); + } + + @Test + public void testStartRunWithNoRunName() { + // Sets the appropriate tags + ArgumentCaptor createRunArgument = ArgumentCaptor.forClass(CreateRun.class); + MlflowContext mlflow = setupMlflowContext(); + mlflow.startRun(); + verify(mockClient).createRun(createRunArgument.capture()); + List tags = createRunArgument.getValue().getTagsList(); + Assert.assertFalse( + tags.stream().anyMatch(tag -> tag.getKey().equals(MlflowTagConstants.RUN_NAME))); + } + + @Test + public void testWithActiveRun() { + // Sets the appropriate tags + MlflowContext mlflow = setupMlflowContext(); + mlflow.setExperimentId("123"); + when(mockClient.createRun(any(CreateRun.class))) + .thenReturn(RunInfo.newBuilder().setRunId("test-id").build()); + mlflow.withActiveRun("apple", activeRun -> { + Assert.assertEquals(activeRun.getId(), "test-id"); + }); + verify(mockClient).createRun(any(CreateRun.class)); + verify(mockClient).setTerminated(any(), any()); + } + + @Test + public void testWithActiveRunNoRunName() { + // Sets the appropriate tags + MlflowContext mlflow = setupMlflowContext(); + mlflow.setExperimentId("123"); + when(mockClient.createRun(any(CreateRun.class))) + .thenReturn(RunInfo.newBuilder().setRunId("test-id").build()); + mlflow.withActiveRun(activeRun -> { + Assert.assertEquals(activeRun.getId(), "test-id"); + }); + verify(mockClient).createRun(any(CreateRun.class)); + verify(mockClient).setTerminated(any(), any()); + } + + + private static RunTag createRunTag(String key, String value) { + return RunTag.newBuilder().setKey(key).setValue(value).build(); + } +} \ No newline at end of file diff --git a/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowProtobufMapperTest.java b/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowProtobufMapperTest.java index 41ee3204512d0..d437c5c32fccf 100644 --- a/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowProtobufMapperTest.java +++ b/mlflow/java/client/src/test/java/org/mlflow/tracking/MlflowProtobufMapperTest.java @@ -23,6 +23,7 @@ public void testSerializeSnakeCase() { Map expectedMessage = new HashMap<>(); expectedMessage.put("run_uuid", "my-id"); + expectedMessage.put("run_id", "my-id"); expectedMessage.put("key", "my-key"); expectedMessage.put("value", "my-value"); Assert.assertEquals(serializedMessage, expectedMessage); @@ -33,6 +34,6 @@ public void testDeserializeSnakeCaseAndUnknown() { MlflowProtobufMapper mapper = new MlflowProtobufMapper(); Service.CreateExperiment.Response result = mapper.toCreateExperimentResponse( "{\"experiment_id\": 123, \"what is this field\": \"even\"}"); - Assert.assertEquals(result.getExperimentId(), 123); + Assert.assertEquals(result.getExperimentId(), "123"); } } diff --git a/mlflow/java/client/src/test/java/org/mlflow/tracking/TestClientProvider.java b/mlflow/java/client/src/test/java/org/mlflow/tracking/TestClientProvider.java index d2f12258a2f7a..788b143589b5e 100644 --- a/mlflow/java/client/src/test/java/org/mlflow/tracking/TestClientProvider.java +++ b/mlflow/java/client/src/test/java/org/mlflow/tracking/TestClientProvider.java @@ -84,7 +84,7 @@ private MlflowClient startServerProcess() throws IOException { int freePort = getFreePort(); String bindAddress = "127.0.0.1"; pb.command("mlflow", "server", "--host", bindAddress, "--port", "" + freePort, - "--file-store", tempDir.resolve("mlruns").toString(), "--workers", "1"); + "--backend-store-uri", tempDir.resolve("mlruns").toString(), "--workers", "1"); serverProcess = pb.start(); // NB: We cannot use pb.inheritIO() because that interacts poorly with the Maven diff --git a/mlflow/java/client/src/test/java/org/mlflow/tracking/TestUtils.java b/mlflow/java/client/src/test/java/org/mlflow/tracking/TestUtils.java index 17a06a964814e..2e20f3f7c1750 100644 --- a/mlflow/java/client/src/test/java/org/mlflow/tracking/TestUtils.java +++ b/mlflow/java/client/src/test/java/org/mlflow/tracking/TestUtils.java @@ -14,10 +14,10 @@ static boolean equals(double a, double b) { return a == b ? true : Math.abs(a - b) < EPSILON; } - static void assertRunInfo(RunInfo runInfo, long experimentId, String sourceName) { + static void assertRunInfo(RunInfo runInfo, String experimentId) { Assert.assertEquals(runInfo.getExperimentId(), experimentId); - Assert.assertEquals(runInfo.getSourceName(), sourceName); Assert.assertNotEquals(runInfo.getUserId(), ""); + Assert.assertTrue(runInfo.getStartTime() < runInfo.getEndTime()); } public static void assertParam(List params, String key, String value) { @@ -28,6 +28,30 @@ public static void assertMetric(List metrics, String key, double value) Assert.assertTrue(metrics.stream().filter(e -> e.getKey().equals(key) && equals(e.getValue(), value)).findFirst().isPresent()); } + public static void assertMetric(List metrics, String key, double value, long timestamp, long step) { + Assert.assertTrue(metrics.stream().filter( + e -> e.getKey().equals(key) && equals(e.getValue(), value) && equals(e.getTimestamp(), timestamp) + && equals(e.getStep(), step)).findFirst().isPresent()); + } + + public static void assertMetricHistory(List history, String key, List values, List steps) { + Assert.assertEquals(history.size(), values.size()); + Assert.assertEquals(history.size(), steps.size()); + for (int i = 0; i < history.size(); i++) { + Metric metric = history.get(i); + Assert.assertEquals(metric.getKey(), key); + Assert.assertTrue(equals(metric.getValue(), values.get(i))); + Assert.assertTrue(equals(metric.getStep(), steps.get(i))); + } + } + + public static void assertMetricHistory(List history, String key, List values, List timestamps, List steps) { + assertMetricHistory(history, key, values, steps); + for(int i = 0; i < history.size(); ++i) { + Assert.assertTrue(equals(history.get(i).getTimestamp(), timestamps.get(i))); + } + } + public static void assertTag(List tags, String key, String value) { Assert.assertTrue(tags.stream().filter(e -> e.getKey().equals(key) && e.getValue().equals(value)).findFirst().isPresent()); } @@ -38,4 +62,23 @@ public static java.util.Optional getExperimentByName(List baseMap = new HashMap<>(); + + public static class MyDynamicProvider extends AbstractMap { + @Override + public Set> entrySet() { + return baseMap.entrySet(); + } + } + + @BeforeMethod + public static void beforeMethod() { + baseMap = new HashMap<>(); + } + + + @Test + public void testIsInDatabricksNotebook() { + baseMap.put("notebookId", "1"); + DatabricksContext context = DatabricksContext.createIfAvailable(MyDynamicProvider.class.getName()); + Assert.assertTrue(context.isInDatabricksNotebook()); + } + + @Test + public void testGetNotebookId() { + baseMap.put("notebookId", "1"); + DatabricksContext context = DatabricksContext.createIfAvailable(MyDynamicProvider.class.getName()); + Assert.assertEquals(context.getNotebookId(), "1"); + } + + @Test + public void testGetTags() { + // Will return empty map if not in Databricks notebook. + { + baseMap.put("notebookId", null); + baseMap.put("notebookPath", null); + DatabricksContext context = DatabricksContext.createIfAvailable(MyDynamicProvider.class.getName()); + Assert.assertFalse(context.isInDatabricksNotebook()); + Assert.assertEquals(context.getTags(), Maps.newHashMap()); + } + + // Will return all tags if context is set as expected. + { + baseMap = new HashMap<>(); + Map expectedTags = ImmutableMap.of( + MlflowTagConstants.DATABRICKS_NOTEBOOK_ID, "1", + MlflowTagConstants.DATABRICKS_NOTEBOOK_PATH, "test-path", + MlflowTagConstants.SOURCE_TYPE, "NOTEBOOK", + MlflowTagConstants.SOURCE_NAME, "test-path"); + baseMap.put("notebookId", "1"); + baseMap.put("notebookPath", "test-path"); + DatabricksContext context = DatabricksContext.createIfAvailable(MyDynamicProvider.class.getName()); + Assert.assertEquals(context.getTags(), expectedTags); + } + + // Will not set notebook path tags if context doesn't have a notebookPath member. + { + baseMap = new HashMap<>(); + Map expectedTags = ImmutableMap.of( + MlflowTagConstants.DATABRICKS_NOTEBOOK_ID, "1"); + baseMap.put("notebookId", "1"); + baseMap.put("notebookPath", null); + DatabricksContext context = DatabricksContext.createIfAvailable(MyDynamicProvider.class.getName()); + Assert.assertEquals(context.getTags(), expectedTags); + } + } +} diff --git a/mlflow/java/pom.xml b/mlflow/java/pom.xml index 6e6a61f9660fa..82e233243994b 100644 --- a/mlflow/java/pom.xml +++ b/mlflow/java/pom.xml @@ -3,13 +3,29 @@ 4.0.0 org.mlflow mlflow-parent - 0.7.1 + 1.0.0 pom MLflow Parent POM http://mlflow.org Open source platform for the machine learning lifecycle + + + google-maven-central + Google Maven Central + https://maven-central.storage-download.googleapis.com/repos/central/data + + + + + + google-maven-central + Google Maven Central + https://maven-central.storage-download.googleapis.com/repos/central/data + + + @@ -40,7 +56,7 @@ - 0.5.1 + 1.0.0 1.8 1.8 2.11.12 @@ -67,20 +83,26 @@ 4.12 test + + org.mockito + mockito-core + 2.28.2 + test + com.fasterxml.jackson.dataformat jackson-dataformat-yaml - 2.9.6 + 2.9.8 com.fasterxml.jackson.core jackson-databind - 2.9.6 + 2.9.8 com.fasterxml.jackson.core jackson-annotations - 2.9.6 + 2.9.8 ml.combust.mleap @@ -238,6 +260,11 @@ + + net.alchim31.maven + scala-maven-plugin + 4.0.2 + diff --git a/mlflow/java/scoring/pom.xml b/mlflow/java/scoring/pom.xml index e351f126ef025..69ca570b9c971 100644 --- a/mlflow/java/scoring/pom.xml +++ b/mlflow/java/scoring/pom.xml @@ -5,7 +5,7 @@ org.mlflow mlflow-parent - 0.7.1 + 1.0.0 ../pom.xml @@ -18,7 +18,6 @@ mlflow-scoring - 0.7.1 jar MLflow scoring server http://mlflow.org @@ -80,6 +79,23 @@ + + net.alchim31.maven + scala-maven-plugin + + + scala-compile-first + process-resources + + add-source + compile + + + + + ${scala.version} + + org.apache.maven.plugins maven-checkstyle-plugin diff --git a/mlflow/java/scoring/src/main/java/org/mlflow/Flavor.java b/mlflow/java/scoring/src/main/java/org/mlflow/Flavor.java index 8208afe1a887f..22302bcaac7f5 100644 --- a/mlflow/java/scoring/src/main/java/org/mlflow/Flavor.java +++ b/mlflow/java/scoring/src/main/java/org/mlflow/Flavor.java @@ -3,11 +3,11 @@ /** Interface for exposing information about an MLFlow model flavor. */ public interface Flavor { /** @return The name of the model flavor */ - public String getName(); + String getName(); /** * @return The relative path to flavor-specific model data. This path is relative to the root * directory of an MLFlow model */ - public String getModelDataPath(); + String getModelDataPath(); } diff --git a/mlflow/java/scoring/src/main/java/org/mlflow/LoaderModule.java b/mlflow/java/scoring/src/main/java/org/mlflow/LoaderModule.java index 4d155f442feea..6bf65189b1807 100644 --- a/mlflow/java/scoring/src/main/java/org/mlflow/LoaderModule.java +++ b/mlflow/java/scoring/src/main/java/org/mlflow/LoaderModule.java @@ -45,15 +45,14 @@ public Predictor load(Model model) { * @param modelRootPath The path to the root directory of the MLFlow model */ public Predictor load(String modelRootPath) throws PredictorLoadingException { - Optional model = Optional.empty(); try { - model = Optional.of(Model.fromRootPath(modelRootPath)); + Optional model = Optional.of(Model.fromRootPath(modelRootPath)); + return load(model.get()); } catch (IOException e) { throw new PredictorLoadingException( "Failed to load the model configuration at the specified path. Please ensure that" + " this is the path to the root directory of a valid MLFlow model"); } - return load(model.get()); } /** diff --git a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/LeapFrameSchema.java b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/LeapFrameSchema.java deleted file mode 100644 index 543d88309c25d..0000000000000 --- a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/LeapFrameSchema.java +++ /dev/null @@ -1,71 +0,0 @@ -package org.mlflow.sagemaker; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import org.mlflow.utils.SerializationUtils; - -/** Representation of the dataframe schema that an {@link MLeapPredictor} expects inputs to have */ -class LeapFrameSchema { - private final Map rawSchema; - private final List fieldNames; - - @JsonIgnoreProperties(ignoreUnknown = true) - private static class SchemaField { - @JsonProperty("name") - String name; - } - - private LeapFrameSchema(Map rawSchema) { - this.rawSchema = rawSchema; - if (!rawSchema.containsKey("fields")) { - throw new InvalidSchemaException("Leap frame schema must contain a top-level `fields` key!"); - } - - final ObjectMapper mapper = new ObjectMapper(); - List fields = - mapper.convertValue(rawSchema.get("fields"), new TypeReference>() {}); - - this.fieldNames = new ArrayList(); - for (SchemaField field : fields) { - fieldNames.add(field.name); - } - } - - /** - * @throws InvalidSchemaException If the schema cannot be parsed from JSON or does not contain - * required keys - * @throws IOException If the schema file cannot be loaded from the specified path - */ - static LeapFrameSchema fromPath(String filePath) throws IOException { - File schemaFile = new File(filePath); - try { - return new LeapFrameSchema( - (Map) SerializationUtils.parseJsonFromFile(filePath, Map.class)); - } catch (IOException e) { - throw new InvalidSchemaException("The specified schema could not be parsed as JSON."); - } - } - - /** - * @return The list of dataframe fields expected by the transformer with this schema, in the order - * that these fields are expected to appear - */ - List getFieldNames() { - return this.fieldNames; - } - - /** - * @return A representation of the schema as a map containg standard Java objects. This is useful - * for serializing the schema as JSON - */ - Map getRawSchema() { - return this.rawSchema; - } -} diff --git a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/LeapFrameUtils.java b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/LeapFrameUtils.java deleted file mode 100644 index 4ad1e8f51bdc0..0000000000000 --- a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/LeapFrameUtils.java +++ /dev/null @@ -1,23 +0,0 @@ -package org.mlflow.sagemaker; - -import java.nio.charset.Charset; -import ml.combust.mleap.json.DefaultFrameReader; -import ml.combust.mleap.runtime.frame.DefaultLeapFrame; - -/** - * Utilities for serializing, deserialize, and manipulating MLeap {@link - * ml.combust.mleap.runtime.frame.LeapFrame} objects - */ -class LeapFrameUtils { - private static final DefaultFrameReader frameReader = new DefaultFrameReader(); - private static final Charset jsonCharset = Charset.forName("UTF-8"); - - /** - * Deserializes a {@link ml.combust.mleap.runtime.frame.LeapFrame} from its serialized JSON - * representation - */ - protected static DefaultLeapFrame getLeapFrameFromJson(String frameJson) { - byte[] frameBytes = frameJson.getBytes(jsonCharset); - return frameReader.fromBytes(frameBytes, jsonCharset).get(); - } -} diff --git a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/MLeapPredictor.java b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/MLeapPredictor.java index 7795bc93a0999..603932abae032 100644 --- a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/MLeapPredictor.java +++ b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/MLeapPredictor.java @@ -3,25 +3,24 @@ import com.fasterxml.jackson.core.JsonProcessingException; import java.io.File; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; + +import ml.combust.mleap.core.types.StructType; import ml.combust.mleap.runtime.MleapContext; import ml.combust.mleap.runtime.frame.DefaultLeapFrame; -import ml.combust.mleap.runtime.frame.Row; import ml.combust.mleap.runtime.frame.Transformer; import ml.combust.mleap.runtime.javadsl.BundleBuilder; import ml.combust.mleap.runtime.javadsl.ContextBuilder; +import ml.combust.mleap.runtime.javadsl.LeapFrameSupport; import org.mlflow.utils.SerializationUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import scala.collection.JavaConverters; -import scala.collection.Seq; /** A {@link org.mlflow.sagemaker.Predictor} implementation for the MLeap model flavor */ public class MLeapPredictor extends Predictor { private final Transformer pipelineTransformer; - private final LeapFrameSchema inputSchema; // As in the `pyfunc` wrapper for Spark models, we expect output dataframes // to have a `prediction` column that contains model predictions. Only entries in this @@ -30,6 +29,8 @@ public class MLeapPredictor extends Predictor { // spark.py#L248 private static final String PREDICTION_COLUMN_NAME = "prediction"; private static final Logger logger = LoggerFactory.getLogger(MLeapPredictor.class); + private final LeapFrameSupport leapFrameSupport; + private final StructType inputSchema; /** * Constructs an {@link MLeapPredictor} @@ -41,35 +42,58 @@ public class MLeapPredictor extends Predictor { public MLeapPredictor(String modelDataPath, String inputSchemaPath) { MleapContext mleapContext = new ContextBuilder().createMleapContext(); BundleBuilder bundleBuilder = new BundleBuilder(); + MLeapSchemaReader schemaReader = new MLeapSchemaReader(); + this.leapFrameSupport = new LeapFrameSupport(); + this.pipelineTransformer = bundleBuilder.load(new File(modelDataPath), mleapContext).root(); try { - this.inputSchema = LeapFrameSchema.fromPath(inputSchemaPath); - } catch (IOException e) { + this.inputSchema = schemaReader.fromFile(inputSchemaPath); + } catch (Exception e) { logger.error("Could not read the model input schema from the specified path", e); throw new PredictorLoadingException( - String.format( - "Failed to load model input schema from specified path: %s", inputSchemaPath)); + String.format( + "Failed to load model input schema from specified path: %s", inputSchemaPath)); } } @Override protected PredictorDataWrapper predict(PredictorDataWrapper input) throws PredictorEvaluationException { - PandasRecordOrientedDataFrame pandasFrame = null; + PandasSplitOrientedDataFrame pandasFrame; try { - pandasFrame = PandasRecordOrientedDataFrame.fromJson(input.toJson()); + pandasFrame = PandasSplitOrientedDataFrame.fromJson(input.toJson()); } catch (IOException e) { logger.error( - "Encountered a JSON conversion error during conversion of Pandas dataframe to LeapFrame.", + "Encountered a JSON parsing error during conversion of input to a Pandas DataFrame" + + " representation.", e); throw new PredictorEvaluationException( - "Failed to transform input into a JSON representation of an MLeap dataframe." - + " Please ensure that the input is a JSON-serialized Pandas Dataframe" - + " with the `record` orientation.", + "Encountered a JSON parsing error while transforming input into a Pandas DataFrame" + + " representation. Ensure that the input is a JSON-serialized Pandas DataFrame" + + " with the `split` orientation.", + e); + } catch (InvalidSchemaException e) { + logger.error( + "Encountered a schema mismatch while transforming input into a Pandas DataFrame" + + " representation.", + e); + throw new PredictorEvaluationException( + "Encountered a schema mismatch while transforming input into a Pandas DataFrame" + + " representation. Ensure that the input is a JSON-serialized Pandas DataFrame" + + " with the `split` orientation.", + e); + } catch (IllegalArgumentException e) { + logger.error( + "Failed to transform input into a Pandas DataFrame because the parsed frame is invalid.", + e); + throw new PredictorEvaluationException( + "Failed to transform input into a Pandas DataFrame because the parsed frame is invalid." + + " Ensure that the input is a JSON-serialized Pandas DataFrame with the `split`" + + " orientation.", e); } - DefaultLeapFrame leapFrame = null; + DefaultLeapFrame leapFrame; try { leapFrame = pandasFrame.toLeapFrame(this.inputSchema); } catch (InvalidSchemaException e) { @@ -87,33 +111,24 @@ protected PredictorDataWrapper predict(PredictorDataWrapper input) // This single-element is the `prediction` column; as is the case with the `pyfunc` wrapper // for Spark models, the query response is comprised solely of entries in the `prediction` // column - Seq predictionColumnSelectionArgs = - JavaConverters.asScalaIteratorConverter(Arrays.asList(PREDICTION_COLUMN_NAME).iterator()) - .asScala() - .toSeq(); - DefaultLeapFrame predictionsFrame = + DefaultLeapFrame predictionsFrame = this.leapFrameSupport.select( this.pipelineTransformer .transform(leapFrame) - .get() - .select(predictionColumnSelectionArgs) - .get(); - Seq predictionRows = predictionsFrame.collect(); - Iterable predictionRowsIterable = - JavaConverters.asJavaIterableConverter(predictionRows).asJava(); - List predictions = new ArrayList(); - for (Row row : predictionRowsIterable) { - predictions.add(row.getRaw(0)); - } + .get(), Collections.singletonList(PREDICTION_COLUMN_NAME)); + + List predictions = this.leapFrameSupport.collect(predictionsFrame) + .stream() + .map(row -> row.getRaw(0)) + .collect(Collectors.toList()); - String predictionsJson = null; try { - predictionsJson = SerializationUtils.toJson(predictions); + String predictionsJson = SerializationUtils.toJson(predictions); + return new PredictorDataWrapper(predictionsJson, PredictorDataWrapper.ContentType.Json); } catch (JsonProcessingException e) { logger.error("Encountered an error while serializing the output dataframe.", e); throw new PredictorEvaluationException( "Failed to serialize prediction results as a JSON list!"); } - return new PredictorDataWrapper(predictionsJson, PredictorDataWrapper.ContentType.Json); } /** @return The underlying MLeap pipeline transformer that this predictor uses for inference */ diff --git a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/MLeapSchemaReader.scala b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/MLeapSchemaReader.scala new file mode 100644 index 0000000000000..bf2b6b4b4c54e --- /dev/null +++ b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/MLeapSchemaReader.scala @@ -0,0 +1,17 @@ +package org.mlflow.sagemaker + +import java.io.File +import java.nio.charset.Charset + +import ml.combust.mleap.core.types.StructType +import ml.combust.mleap.json.JsonSupport._ +import org.apache.commons.io.FileUtils +import spray.json._ + +class MLeapSchemaReader() { + + def fromFile(filePath: String) : StructType = { + val json = FileUtils.readFileToString(new File(filePath), Charset.defaultCharset()) + json.parseJson.convertTo[StructType] + } +} diff --git a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/PandasRecordOrientedDataFrame.java b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/PandasRecordOrientedDataFrame.java deleted file mode 100644 index 60fcc360342e9..0000000000000 --- a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/PandasRecordOrientedDataFrame.java +++ /dev/null @@ -1,68 +0,0 @@ -package org.mlflow.sagemaker; - -import com.fasterxml.jackson.core.JsonProcessingException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import ml.combust.mleap.runtime.frame.DefaultLeapFrame; -import org.mlflow.utils.SerializationUtils; - -/** - * A representation of a serialized Pandas dataframe in record-oriented format. For more - * information, see `pandas.DataFrame.toJson(orient="records")` - * (https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_json.html) - */ -class PandasRecordOrientedDataFrame { - private final List> records; - - private static final String LEAP_FRAME_KEY_ROWS = "rows"; - private static final String LEAP_FRAME_KEY_SCHEMA = "schema"; - - private PandasRecordOrientedDataFrame(List> records) { - this.records = records; - } - - /** - * Constructs a {@link PandasRecordOrientedDataFrame} - * - * @param frameJson A representation of the dataframe - */ - static PandasRecordOrientedDataFrame fromJson(String frameJson) throws IOException { - return new PandasRecordOrientedDataFrame(SerializationUtils.fromJson(frameJson, List.class)); - } - - /** @return The number of records contained in the dataframe */ - int size() { - return this.records.size(); - } - - /** - * Applies the specified MLeap frame schema ({@link LeapFrameSchema}) to this dataframe, producing - * a {@link DefaultLeapFrame} - * - * @throws InvalidSchemaException If the supplied pandas dataframe is invalid (missing a required - * field, etc) - */ - DefaultLeapFrame toLeapFrame(LeapFrameSchema leapFrameSchema) throws JsonProcessingException { - List> mleapRows = new ArrayList<>(); - for (Map record : this.records) { - List mleapRow = new ArrayList<>(); - for (String fieldName : leapFrameSchema.getFieldNames()) { - if (!record.containsKey(fieldName)) { - throw new InvalidSchemaException( - String.format("Pandas dataframe is missing a required field: `%s`", fieldName)); - } - mleapRow.add(record.get(fieldName)); - } - mleapRows.add(mleapRow); - } - Map rawFrame = new HashMap<>(); - rawFrame.put(LEAP_FRAME_KEY_ROWS, mleapRows); - rawFrame.put(LEAP_FRAME_KEY_SCHEMA, leapFrameSchema.getRawSchema()); - String leapFrameJson = SerializationUtils.toJson(rawFrame); - DefaultLeapFrame leapFrame = LeapFrameUtils.getLeapFrameFromJson(leapFrameJson); - return leapFrame; - } -} diff --git a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/PandasSplitOrientedDataFrame.java b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/PandasSplitOrientedDataFrame.java new file mode 100644 index 0000000000000..8a10baa51204f --- /dev/null +++ b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/PandasSplitOrientedDataFrame.java @@ -0,0 +1,109 @@ +package org.mlflow.sagemaker; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import ml.combust.mleap.core.types.StructField; +import ml.combust.mleap.core.types.StructType; +import ml.combust.mleap.runtime.frame.DefaultLeapFrame; +import ml.combust.mleap.runtime.frame.Row; +import ml.combust.mleap.runtime.javadsl.LeapFrameBuilder; +import ml.combust.mleap.runtime.javadsl.LeapFrameSupport; +import org.mlflow.utils.SerializationUtils; + +/** + * A representation of a serialized Pandas DataFrame in split-oriented format. For more information, + * see `pandas.DataFrame.toJson(orient="split")` + * (https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_json.html) + */ +class PandasSplitOrientedDataFrame { + + private static final LeapFrameBuilder leapFrameBuilder = new LeapFrameBuilder(); + private static final LeapFrameSupport leapFrameSupport = new LeapFrameSupport(); + + private final List> entries; + + private static final String PANDAS_FRAME_KEY_COLUMN_NAMES = "columns"; + private static final String PANDAS_FRAME_KEY_ROWS = "data"; + + private PandasSplitOrientedDataFrame(List columnNames, List> rows) { + this.entries = new ArrayList<>(); + for (int rowIndex = 0; rowIndex < rows.size(); ++rowIndex) { + List row = rows.get(rowIndex); + if (row.size() != columnNames.size()) { + throw new IllegalArgumentException( + String.format( + "Row %d of the DataFrame does not contain the expected number of columns! Found %d" + + " columns, expected %d columns", + rowIndex, row.size(), columnNames.size())); + } + LinkedHashMap newEntry = new LinkedHashMap<>(row.size()); + for (int i = 0; i < row.size(); ++i) { + newEntry.put(columnNames.get(i), row.get(i)); + } + this.entries.add(newEntry); + } + } + + /** + * Constructs a {@link PandasSplitOrientedDataFrame} + * + * @param frameJson A representation of the DataFrame + */ + static PandasSplitOrientedDataFrame fromJson(String frameJson) throws IOException { + Map> parsedFrame = SerializationUtils.fromJson(frameJson, Map.class); + validatePandasDataFrameJsonRepresentation(parsedFrame); + return new PandasSplitOrientedDataFrame( + (List) parsedFrame.get(PANDAS_FRAME_KEY_COLUMN_NAMES), + (List>) parsedFrame.get(PANDAS_FRAME_KEY_ROWS)); + } + + private static void validatePandasDataFrameJsonRepresentation(Map> parsedFrame) + throws InvalidSchemaException { + String[] expectedColumnNames = + new String[] {PANDAS_FRAME_KEY_COLUMN_NAMES, PANDAS_FRAME_KEY_ROWS}; + for (String columnName : expectedColumnNames) { + if (!parsedFrame.containsKey(columnName)) { + throw new InvalidSchemaException( + String.format( + "The JSON representation of the serialized Pandas DataFrame is missing an expected " + + " column with name: `%s` that is required by the Pandas `split` orientation.", + columnName)); + } + } + } + + /** @return The number of rows contained in the DataFrame */ + int size() { + return this.entries.size(); + } + + /** + * Applies the specified MLeap frame schema ({@link StructType}) to this DataFrame, producing + * a {@link DefaultLeapFrame} + * + * @throws InvalidSchemaException If the supplied pandas DataFrame is invalid (missing a required + * field, etc) + */ + DefaultLeapFrame toLeapFrame(StructType leapFrameSchema) { + List mleapRows = new ArrayList<>(); + + for (Map entry : this.entries) { + List mleapRow = new ArrayList<>(); + for (StructField field : leapFrameSupport.getFields(leapFrameSchema)) { + String fieldName = field.name(); + if (!entry.containsKey(fieldName)) { + throw new InvalidSchemaException( + String.format("Pandas DataFrame is missing a required field: `%s`", fieldName)); + } + mleapRow.add(entry.get(fieldName)); + } + mleapRows.add(leapFrameBuilder.createRowFromIterable(mleapRow)); + } + + return leapFrameBuilder.createFrame(leapFrameSchema, mleapRows); + } +} diff --git a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/ScoringServer.java b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/ScoringServer.java index e421a2354dff7..ff15620bcb928 100644 --- a/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/ScoringServer.java +++ b/mlflow/java/scoring/src/main/java/org/mlflow/sagemaker/ScoringServer.java @@ -25,8 +25,8 @@ public class ScoringServer { private static final String REQUEST_CONTENT_TYPE_JSON = "application/json"; private static final String REQUEST_CONTENT_TYPE_CSV = "text/csv"; - static final String ENV_VAR_MINIMUM_SERVER_THREADS = "SCORING_SERVER_MIN_THREADS"; - static final String ENV_VAR_MAXIMUM_SERVER_THREADS = "SCORING_SERVER_MAX_THREADS"; + static final String ENV_VAR_MINIMUM_SERVER_THREADS = "MLFLOW_SCORING_SERVER_MIN_THREADS"; + static final String ENV_VAR_MAXIMUM_SERVER_THREADS = "MLFLOW_SCORING_SERVER_MAX_THREADS"; static final int DEFAULT_MINIMUM_SERVER_THREADS = 1; // Assuming an 8 core machine with hyperthreading @@ -72,14 +72,13 @@ public ScoringServer(String modelPath) throws PredictorLoadingException { private static Predictor loadPredictorFromPath(String modelPath) throws PredictorLoadingException { - Model config = null; try { - config = Model.fromRootPath(modelPath); + Model config = Model.fromRootPath(modelPath); + return (new MLeapLoader()).load(config); } catch (IOException e) { throw new PredictorLoadingException( "Failed to load the configuration for the MLFlow model at the specified path."); } - return (new MLeapLoader()).load(config); } /** @@ -191,8 +190,9 @@ public void doPost(HttpServletRequest request, HttpServletResponse response) String.format( "Received a request with an unsupported content type: %s", requestContentType)); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); - responseContent = getErrorResponseJson( - "Requests must have a content header of type `application/json` or `text/csv`"); + responseContent = + getErrorResponseJson( + "Requests must have a content header of type `application/json` or `text/csv`"); } catch (Exception e) { logger.error("An unknown error occurred while evaluating the prediction request.", e); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); diff --git a/mlflow/java/scoring/src/test/java/org/mlflow/FileUtilsTest.java b/mlflow/java/scoring/src/test/java/org/mlflow/FileUtilsTest.java index cfdd834f7f069..cc6c14764be3e 100644 --- a/mlflow/java/scoring/src/test/java/org/mlflow/FileUtilsTest.java +++ b/mlflow/java/scoring/src/test/java/org/mlflow/FileUtilsTest.java @@ -1,6 +1,5 @@ package org.mlflow.utils; -import java.io.ByteArrayInputStream; import org.junit.Assert; import org.junit.Test; diff --git a/mlflow/java/scoring/src/test/java/org/mlflow/LeapFrameSchemaTest.java b/mlflow/java/scoring/src/test/java/org/mlflow/LeapFrameSchemaTest.java deleted file mode 100644 index d6179d38d517c..0000000000000 --- a/mlflow/java/scoring/src/test/java/org/mlflow/LeapFrameSchemaTest.java +++ /dev/null @@ -1,21 +0,0 @@ -package org.mlflow.sagemaker; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import org.junit.Assert; -import org.junit.Test; -import org.mlflow.MLflowRootResourceProvider; -import org.mlflow.utils.SerializationUtils; - -/** Unit tests for the {@link LeapFrameSchema} module */ -public class LeapFrameSchemaTest { - @Test - public void testLeapFrameSchemaIsLoadedFromValidPathWithCorrectFieldOrder() throws IOException { - String schemaPath = MLflowRootResourceProvider.getResourcePath("mleap_model/mleap/schema.json"); - LeapFrameSchema schema = LeapFrameSchema.fromPath(schemaPath); - List orderedFieldNames = schema.getFieldNames(); - List expectedOrderedFieldNames = Arrays.asList("text", "topic"); - Assert.assertEquals(orderedFieldNames, expectedOrderedFieldNames); - } -} diff --git a/mlflow/java/scoring/src/test/java/org/mlflow/LeapFrameUtilsTest.java b/mlflow/java/scoring/src/test/java/org/mlflow/LeapFrameUtilsTest.java deleted file mode 100644 index 7687bf8b54e2d..0000000000000 --- a/mlflow/java/scoring/src/test/java/org/mlflow/LeapFrameUtilsTest.java +++ /dev/null @@ -1,18 +0,0 @@ -package org.mlflow.sagemaker; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import ml.combust.mleap.runtime.frame.DefaultLeapFrame; -import org.junit.Test; - -/** Unit tests for the {@link LeapFrameUtils} module */ -public class LeapFrameUtilsTest { - @Test - public void testValidSerializedLeapFrameIsDeserializedAsLeapFrameObjectSuccessfully() - throws IOException { - String framePath = getClass().getResource("sample_leapframe.json").getFile(); - String frameJson = new String(Files.readAllBytes(Paths.get(framePath))); - DefaultLeapFrame leapFrame = LeapFrameUtils.getLeapFrameFromJson(frameJson); - } -} diff --git a/mlflow/java/scoring/src/test/java/org/mlflow/LoaderModuleTest.java b/mlflow/java/scoring/src/test/java/org/mlflow/LoaderModuleTest.java index d9a4c337be181..1d7c48bc57afb 100644 --- a/mlflow/java/scoring/src/test/java/org/mlflow/LoaderModuleTest.java +++ b/mlflow/java/scoring/src/test/java/org/mlflow/LoaderModuleTest.java @@ -12,7 +12,7 @@ public class LoaderModuleTest { public void testMLeapLoaderModuleDeserializesValidMLeapModelAsPredictor() { String modelPath = MLflowRootResourceProvider.getResourcePath("mleap_model"); try { - Predictor predictor = (new MLeapLoader()).load(modelPath); + Predictor predictor = new MLeapLoader().load(modelPath); } catch (PredictorLoadingException e) { e.printStackTrace(); Assert.fail("Encountered unexpected `PredictorLoadingException`!"); diff --git a/mlflow/java/scoring/src/test/java/org/mlflow/MLeapPredictorTest.java b/mlflow/java/scoring/src/test/java/org/mlflow/MLeapPredictorTest.java index 10e7d929dfd5f..723970a67787e 100644 --- a/mlflow/java/scoring/src/test/java/org/mlflow/MLeapPredictorTest.java +++ b/mlflow/java/scoring/src/test/java/org/mlflow/MLeapPredictorTest.java @@ -1,6 +1,5 @@ package org.mlflow.sagemaker; -import com.fasterxml.jackson.core.JsonProcessingException; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; @@ -27,7 +26,7 @@ public void testMLeapPredictorGetPipelineYieldsValidMLeapTransformer() public void testMLeapPredictorEvaluatesCompatibleInputCorrectly() throws IOException, PredictorEvaluationException { String modelPath = MLflowRootResourceProvider.getResourcePath("mleap_model"); - MLeapPredictor predictor = (MLeapPredictor) (new MLeapLoader()).load(modelPath); + MLeapPredictor predictor = (MLeapPredictor) new MLeapLoader().load(modelPath); String sampleInputPath = MLflowRootResourceProvider.getResourcePath("mleap_model/sample_input.json"); @@ -38,20 +37,24 @@ public void testMLeapPredictorEvaluatesCompatibleInputCorrectly() } @Test - public void - testMLeapPredictorThrowsPredictorEvaluationExceptionWhenEvaluatingInputWithMissingField() - throws IOException, JsonProcessingException { + public void testMLeapPredictorThrowsPredictorEvaluationExceptionWhenInputIsMissingField() + throws IOException { String modelPath = MLflowRootResourceProvider.getResourcePath("mleap_model"); MLeapPredictor predictor = (MLeapPredictor) (new MLeapLoader()).load(modelPath); String sampleInputPath = MLflowRootResourceProvider.getResourcePath("mleap_model/sample_input.json"); String sampleInputJson = new String(Files.readAllBytes(Paths.get(sampleInputPath))); - List> sampleInput = - SerializationUtils.fromJson(sampleInputJson, List.class); - - sampleInput.get(0).remove("topic"); + Map> sampleInput = SerializationUtils.fromJson(sampleInputJson, Map.class); + List> rows = (List>) sampleInput.get("data"); + List columnNames = (List) sampleInput.get("columns"); + int topicIndex = columnNames.indexOf("topic"); + columnNames.remove("topic"); + for (List row : rows) { + row.remove(topicIndex); + } String badInputJson = SerializationUtils.toJson(sampleInput); + PredictorDataWrapper inputData = new PredictorDataWrapper(badInputJson, PredictorDataWrapper.ContentType.Json); try { @@ -77,8 +80,9 @@ public void testMLeapPredictorThrowsPredictorEvaluationExceptionWhenEvaluatingBa try { predictor.predict(badInputData); - Assert.fail("Expected predictor evaluation on a bad JSON input" - + "to throw a PredictorEvaluationException."); + Assert.fail( + "Expected predictor evaluation on a bad JSON input" + + "to throw a PredictorEvaluationException."); } catch (PredictorEvaluationException e) { // Success } diff --git a/mlflow/java/scoring/src/test/java/org/mlflow/PandasDataFrameTest.java b/mlflow/java/scoring/src/test/java/org/mlflow/PandasDataFrameTest.java index 5917a2d50843b..e172571d28326 100644 --- a/mlflow/java/scoring/src/test/java/org/mlflow/PandasDataFrameTest.java +++ b/mlflow/java/scoring/src/test/java/org/mlflow/PandasDataFrameTest.java @@ -1,28 +1,30 @@ package org.mlflow.sagemaker; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; -import org.junit.Assert; -import org.junit.Test; -import java.io.File; -import org.mlflow.utils.SerializationUtils; -import java.io.IOException; +import java.util.HashMap; import java.util.List; import java.util.Map; -import org.mlflow.MLflowRootResourceProvider; - +import ml.combust.mleap.core.types.StructType; import ml.combust.mleap.runtime.frame.DefaultLeapFrame; -import com.fasterxml.jackson.core.JsonProcessingException; +import org.junit.Assert; +import org.junit.Test; +import org.mlflow.MLflowRootResourceProvider; +import org.mlflow.utils.SerializationUtils; public class PandasDataFrameTest { + + private final MLeapSchemaReader schemaReader = new MLeapSchemaReader(); + @Test public void testPandasDataFrameIsProducedFromValidJsonSuccessfully() throws IOException { String sampleInputPath = MLflowRootResourceProvider.getResourcePath("mleap_model/sample_input.json"); String sampleInputJson = new String(Files.readAllBytes(Paths.get(sampleInputPath))); - PandasRecordOrientedDataFrame pandasFrame = - PandasRecordOrientedDataFrame.fromJson(sampleInputJson); + PandasSplitOrientedDataFrame pandasFrame = + PandasSplitOrientedDataFrame.fromJson(sampleInputJson); Assert.assertEquals((pandasFrame.size() == 1), true); } @@ -30,54 +32,102 @@ public void testPandasDataFrameIsProducedFromValidJsonSuccessfully() throws IOEx public void testLoadingPandasDataFrameFromInvalidJsonThrowsIOException() { String badFrameJson = "this is not valid frame json"; try { - PandasRecordOrientedDataFrame pandasFrame = - PandasRecordOrientedDataFrame.fromJson(badFrameJson); - Assert.fail("Expected parsing a pandas dataframe from invalid json to throw an IOException."); + PandasSplitOrientedDataFrame pandasFrame = + PandasSplitOrientedDataFrame.fromJson(badFrameJson); + Assert.fail("Expected parsing a pandas DataFrame from invalid json to throw an IOException."); } catch (IOException e) { // Succeed } } + @Test + public void testLoadingPandasDataFrameFromJsonWithInvalidSplitOrientationSchemaThrowsException() + throws IOException { + String sampleInputPath = + MLflowRootResourceProvider.getResourcePath("mleap_model/sample_input.json"); + String sampleInputJson = new String(Files.readAllBytes(Paths.get(sampleInputPath))); + Map> sampleInput = SerializationUtils.fromJson(sampleInputJson, Map.class); + sampleInput.remove("columns"); + String missingSchemaFieldJson = SerializationUtils.toJson(sampleInput); + + try { + PandasSplitOrientedDataFrame pandasFrame = + PandasSplitOrientedDataFrame.fromJson(missingSchemaFieldJson); + Assert.fail( + "Expected parsing a pandas DataFrame with an invalid `split` orientation schema" + + " to throw an exception."); + } catch (InvalidSchemaException e) { + // Succeed + } + } + + @Test + public void testLoadingPandasDataFrameFromJsonWithInvalidFrameDataThrowsException() + throws IOException { + String sampleInputPath = + MLflowRootResourceProvider.getResourcePath("mleap_model/sample_input.json"); + String sampleInputJson = new String(Files.readAllBytes(Paths.get(sampleInputPath))); + Map> sampleInput = SerializationUtils.fromJson(sampleInputJson, Map.class); + + // Remove a column from the first row of the sample input and check for an exception + // during parsing + Map> missingColumnInFirstRowInput = new HashMap<>(sampleInput); + List> rows = (List>) missingColumnInFirstRowInput.get("data"); + rows.get(0).remove(0); + String missingColumnInFirstRowJson = SerializationUtils.toJson(missingColumnInFirstRowInput); + try { + PandasSplitOrientedDataFrame pandasFrame = + PandasSplitOrientedDataFrame.fromJson(missingColumnInFirstRowJson); + Assert.fail("Expected parsing a pandas DataFrame with invalid data to throw an exception."); + } catch (IllegalArgumentException e) { + // Succeed + } + } + @Test public void testPandasDataFrameWithMLeapCompatibleSchemaIsConvertedToLeapFrameSuccessfully() - throws JsonProcessingException, IOException { - String schemaPath = MLflowRootResourceProvider.getResourcePath("mleap_model/mleap/schema.json"); - LeapFrameSchema leapFrameSchema = LeapFrameSchema.fromPath(schemaPath); + throws IOException { + String schemaPath = MLflowRootResourceProvider.getResourcePath("mleap_model/mleap/schema.json"); + StructType leapFrameSchema = schemaReader.fromFile(schemaPath); String sampleInputPath = MLflowRootResourceProvider.getResourcePath("mleap_model/sample_input.json"); String sampleInputJson = new String(Files.readAllBytes(Paths.get(sampleInputPath))); - PandasRecordOrientedDataFrame pandasFrame = - PandasRecordOrientedDataFrame.fromJson(sampleInputJson); + PandasSplitOrientedDataFrame pandasFrame = + PandasSplitOrientedDataFrame.fromJson(sampleInputJson); DefaultLeapFrame leapFrame = pandasFrame.toLeapFrame(leapFrameSchema); } /** - * In order to produce a leap frame from a pandas dataframe, the pandas dataframe - * must contain all of the fields specified by the intended leap frame's schema. - * This test ensures that an exception is thrown if such a field is missing + * In order to produce a leap frame from a Pandas DataFrame, the Pandas DataFrame must contain all + * of the fields specified by the intended leap frame's schema. This test ensures that an + * exception is thrown if such a field is missing */ @Test public void testConvertingPandasDataFrameWithMissingMLeapSchemaFieldThrowsException() - throws IOException, JsonProcessingException { + throws IOException { String schemaPath = MLflowRootResourceProvider.getResourcePath("mleap_model/mleap/schema.json"); - LeapFrameSchema leapFrameSchema = LeapFrameSchema.fromPath(schemaPath); - + StructType leapFrameSchema = schemaReader.fromFile(schemaPath); String sampleInputPath = MLflowRootResourceProvider.getResourcePath("mleap_model/sample_input.json"); String sampleInputJson = new String(Files.readAllBytes(Paths.get(sampleInputPath))); - List> sampleInput = - SerializationUtils.fromJson(sampleInputJson, List.class); - sampleInput.get(0).remove("topic"); - String missingFieldJson = SerializationUtils.toJson(sampleInput); + Map> sampleInput = SerializationUtils.fromJson(sampleInputJson, Map.class); + List> rows = (List>) sampleInput.get("data"); + List columnNames = (List) sampleInput.get("columns"); + int topicIndex = columnNames.indexOf("topic"); + columnNames.remove("topic"); + for (List row : rows) { + row.remove(topicIndex); + } + String missingDataColumnJson = SerializationUtils.toJson(sampleInput); - PandasRecordOrientedDataFrame pandasFrame = - PandasRecordOrientedDataFrame.fromJson(missingFieldJson); + PandasSplitOrientedDataFrame pandasFrame = + PandasSplitOrientedDataFrame.fromJson(missingDataColumnJson); try { pandasFrame.toLeapFrame(leapFrameSchema); Assert.fail( - "Expected leap frame conversion of a pandas dataframe with a missing field to fail."); + "Expected leap frame conversion of a pandas DataFrame with a missing field to fail."); } catch (InvalidSchemaException e) { // Succeed } diff --git a/mlflow/java/scoring/src/test/java/org/mlflow/ScoringServerTest.java b/mlflow/java/scoring/src/test/java/org/mlflow/ScoringServerTest.java index bb691831b86b1..10f88748860eb 100644 --- a/mlflow/java/scoring/src/test/java/org/mlflow/ScoringServerTest.java +++ b/mlflow/java/scoring/src/test/java/org/mlflow/ScoringServerTest.java @@ -1,6 +1,5 @@ package org.mlflow.sagemaker; -import com.fasterxml.jackson.core.JsonProcessingException; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -126,7 +125,7 @@ public void testMultipleServersRunOnDifferentPortsSucceedfully() throws IOExcept @Test public void testScoringServerWithValidPredictorRespondsToInvocationWithPredictorOutputContent() - throws IOException, JsonProcessingException { + throws IOException { Map predictorDict = new HashMap<>(); predictorDict.put("Text", "Response"); String predictorJson = SerializationUtils.toJson(predictorDict); diff --git a/mlflow/java/scoring/src/test/resources/org/mlflow/mleap_model/mleap/schema.json b/mlflow/java/scoring/src/test/resources/org/mlflow/mleap_model/mleap/schema.json index 1f1b71e0301c1..17b1650a48bf2 100644 --- a/mlflow/java/scoring/src/test/resources/org/mlflow/mleap_model/mleap/schema.json +++ b/mlflow/java/scoring/src/test/resources/org/mlflow/mleap_model/mleap/schema.json @@ -1,17 +1,17 @@ { "fields": [ { - "nullable": true, - "type": "string", - "name": "text", + "nullable": true, + "type": "string", + "name": "text", "metadata": {} - }, + }, { - "nullable": true, - "type": "string", - "name": "topic", + "nullable": true, + "type": "string", + "name": "topic", "metadata": {} } - ], + ], "type": "struct" } diff --git a/mlflow/java/scoring/src/test/resources/org/mlflow/mleap_model/sample_input.json b/mlflow/java/scoring/src/test/resources/org/mlflow/mleap_model/sample_input.json index 91f7f3508a2e8..2f439d0fca121 100644 --- a/mlflow/java/scoring/src/test/resources/org/mlflow/mleap_model/sample_input.json +++ b/mlflow/java/scoring/src/test/resources/org/mlflow/mleap_model/sample_input.json @@ -1,6 +1,12 @@ -[ - { - "topic": "alt.atheism", - "text": "From: marshall@csugrad.cs.vt.edu (Kevin Marshall) Subject: Re: Faith and Dogma Organization: Virginia Tech Computer Science Dept, Blacksburg, VA Lines: 96 NNTP-Posting-Host: csugrad.cs.vt.edsadsadasdasdaddu tgk@cs.toronto.edu (Todd Kelley) writes: >In light of what happened in Waco, I need to get something of my >chest. > >Faith and dogma are dangerous. " - } -] \ No newline at end of file +{ + "data": [ + [ + "From: marshall@csugrad.cs.vt.edu (Kevin Marshall) Subject: Re: Faith and Dogma Organization: Virginia Tech Computer Science Dept, Blacksburg, VA Lines: 96 NNTP-Posting-Host: csugrad.cs.vt.edsadsadasdasdaddu tgk@cs.toronto.edu (Todd Kelley) writes: >In light of what happened in Waco, I need to get something of my >chest. > >Faith and dogma are dangerous. ", + "alt.atheism" + ] + ], + "columns": [ + "text", + "topic" + ] +} diff --git a/mlflow/java/scoring/src/test/resources/org/mlflow/sagemaker/sample_leapframe.json b/mlflow/java/scoring/src/test/resources/org/mlflow/sagemaker/sample_leapframe.json deleted file mode 100644 index 69d2cb4d169aa..0000000000000 --- a/mlflow/java/scoring/src/test/resources/org/mlflow/sagemaker/sample_leapframe.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "rows": [ - [ - "From: marshall@csugrad.cs.vt.edu (Kevin Marshall) Subject: Re: Faith and Dogma Organization: Virginia Tech Computer Science Dept, Blacksburg, VA Lines: 96 NNTP-Posting-Host: csugrad.cs.vt.edsadsadasdasdaddu tgk@cs.toronto.edu (Todd Kelley) writes: >In light of what happened in Waco, I need to get something of my >chest. > >Faith and dogma are dangerous. ", - "alt.atheism" - ] - ], - "schema": { - "fields": [ - { - "type": "string", - "name": "text" - }, - { - "type": "string", - "name": "topic" - } - ] - } -} \ No newline at end of file diff --git a/mlflow/keras.py b/mlflow/keras.py index 6b669815ecbd6..11514cf3400c7 100644 --- a/mlflow/keras.py +++ b/mlflow/keras.py @@ -10,62 +10,183 @@ from __future__ import absolute_import +import importlib import os - -import keras.backend as K +import yaml import pandas as pd from mlflow import pyfunc from mlflow.models import Model import mlflow.tracking +from mlflow.exceptions import MlflowException +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration + +FLAVOR_NAME = "keras" +# File name to which custom objects cloudpickle is saved - used during save and load +_CUSTOM_OBJECTS_SAVE_PATH = "custom_objects.cloudpickle" +_KERAS_MODULE_SPEC_PATH = "keras_module.txt" +# File name to which keras model is saved +_MODEL_SAVE_PATH = "model.h5" -def save_model(keras_model, path, conda_env=None, mlflow_model=Model()): +def get_default_conda_env(include_cloudpickle=False, keras_module=None): + """ + :return: The default Conda environment for MLflow Models produced by calls to + :func:`save_model()` and :func:`log_model()`. + """ + import tensorflow as tf + keras_dependency = [] # if we use tf.keras we only need to declare dependency on tensorflow + if keras_module is None: + import keras + keras_module = keras + if keras_module.__name__ == "keras": + keras_dependency = ["keras=={}".format(keras_module.__version__)] + pip_deps = None + if include_cloudpickle: + import cloudpickle + pip_deps = ["cloudpickle=={}".format(cloudpickle.__version__)] + return _mlflow_conda_env( + additional_conda_deps=keras_dependency + [ + # The Keras pyfunc representation requires the TensorFlow + # backend for Keras. Therefore, the conda environment must + # include TensorFlow + "tensorflow=={}".format(tf.__version__), + ], + additional_pip_deps=pip_deps, + additional_conda_channels=None) + + +def save_model(keras_model, path, conda_env=None, mlflow_model=Model(), custom_objects=None, + keras_module=None, **kwargs): """ Save a Keras model to a path on the local file system. :param keras_model: Keras model to be saved. :param path: Local path where the model is to be saved. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the + dependencies contained in :func:`get_default_conda_env()`. If + ``None``, the default :func:`get_default_conda_env()` environment is + added to the model. The following is an *example* dictionary + representation of a Conda environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'keras=2.2.4', + 'tensorflow=1.8.0' + ] + } + :param keras_module: Keras module to be used to save / load the model. If not provided, MLflow + will attempt to infer the Keras module based on the given model. :param mlflow_model: MLflow model config this flavor is being added to. + :param kwargs: kwargs to pass to ``keras_model.save`` method. >>> import mlflow >>> # Build, compile, and train your model >>> keras_model = ... >>> keras_model_path = ... - >>> keras_model.compile(optimizer="rmsprop", loss="mse", metrics["accuracy"]) + >>> keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"]) >>> results = keras_model.fit( ... x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val)) ... # Save the model as an MLflow Model >>> mlflow.keras.save_model(keras_model, keras_model_path) """ - import keras + if keras_module is None: + def _is_plain_keras(model): + try: + # NB: Network is the first parent with save method + import keras.engine.network + return isinstance(model, keras.engine.network.Network) + except ImportError: + return False + + def _is_tf_keras(model): + try: + # NB: Network is not exposed in tf.keras, we check for Model instead. + import tensorflow.keras.models + return isinstance(model, tensorflow.keras.models.Model) + except ImportError: + return False + + if _is_plain_keras(keras_model): + keras_module = importlib.import_module("keras") + elif _is_tf_keras(keras_model): + keras_module = importlib.import_module("tensorflow.keras") + else: + raise MlflowException("Unable to infer keras module from the model, please specify " + "which keras module ('keras' or 'tensorflow.keras') is to be " + "used to save and load the model.") + elif type(keras_module) == str: + keras_module = importlib.import_module(keras_module) path = os.path.abspath(path) if os.path.exists(path): - raise Exception("Path '{}' already exists".format(path)) - os.makedirs(path) - model_file = os.path.join(path, "model.h5") - keras_model.save(model_file) - + raise MlflowException("Path '{}' already exists".format(path)) + data_subpath = "data" + data_path = os.path.join(path, data_subpath) + os.makedirs(data_path) + if custom_objects is not None: + _save_custom_objects(data_path, custom_objects) + with open(os.path.join(data_path, _KERAS_MODULE_SPEC_PATH), "w") as f: + f.write(keras_module.__name__) + model_subpath = os.path.join(data_subpath, _MODEL_SAVE_PATH) + keras_model.save(os.path.join(path, model_subpath), **kwargs) + mlflow_model.add_flavor(FLAVOR_NAME, + keras_module=keras_module.__name__, + keras_version=keras_module.__version__, + data=data_subpath) + conda_env_subpath = "conda.yaml" + if conda_env is None: + conda_env = get_default_conda_env(include_cloudpickle=custom_objects is not None, + keras_module=keras_module) + elif not isinstance(conda_env, dict): + with open(conda_env, "r") as f: + conda_env = yaml.safe_load(f) + with open(os.path.join(path, conda_env_subpath), "w") as f: + yaml.safe_dump(conda_env, stream=f, default_flow_style=False) pyfunc.add_to_model(mlflow_model, loader_module="mlflow.keras", - data="model.h5", env=conda_env) - mlflow_model.add_flavor("keras", keras_version=keras.__version__) + data=data_subpath, env=conda_env_subpath) mlflow_model.save(os.path.join(path, "MLmodel")) -def log_model(keras_model, artifact_path, **kwargs): +def log_model(keras_model, artifact_path, conda_env=None, custom_objects=None, keras_module=None, + **kwargs): """ Log a Keras model as an MLflow artifact for the current run. :param keras_model: Keras model to be saved. :param artifact_path: Run-relative artifact path. + :param conda_env: Either a dictionary representation of a Conda environment or + the path to a Conda environment yaml file. + If provided, this decribes the environment this model should be + run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If ``None``, the default + :func:`mlflow.keras.get_default_conda_env()` environment is added to + the model. The following is an *example* dictionary representation of a + Conda environment:: + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'keras=2.2.4', + 'tensorflow=1.8.0' + ] + } + :param kwargs: kwargs to pass to ``keras_model.save`` method. >>> from keras import Dense, layers >>> import mlflow >>> # Build, compile, and train your model >>> keras_model = ... - >>> keras_model.compile(optimizer="rmsprop", loss="mse", metrics["accuracy"]) + >>> keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"]) >>> results = keras_model.fit( ... x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val)) >>> # Log metrics and log the model @@ -73,12 +194,47 @@ def log_model(keras_model, artifact_path, **kwargs): >>> mlflow.keras.log_model(keras_model, "models") """ Model.log(artifact_path=artifact_path, flavor=mlflow.keras, - keras_model=keras_model, **kwargs) + keras_model=keras_model, conda_env=conda_env, custom_objects=custom_objects, + keras_module=keras_module, **kwargs) -def _load_model(model_file): - import keras.models - return keras.models.load_model(os.path.abspath(model_file)) +def _save_custom_objects(path, custom_objects): + """ + Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later. + + :param path: An absolute path that points to the data directory within /path/to/model. + :param custom_objects: A dictionary that maps layer names to layer definitions + """ + import cloudpickle + custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH) + with open(custom_objects_path, "wb") as out_f: + cloudpickle.dump(custom_objects, out_f) + + +def _load_model(model_path, keras_module, **kwargs): + keras_models = importlib.import_module(keras_module.__name__ + ".models") + custom_objects = kwargs.pop("custom_objects", {}) + custom_objects_path = None + if os.path.isdir(model_path): + if os.path.isfile(os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)): + custom_objects_path = os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH) + model_path = os.path.join(model_path, _MODEL_SAVE_PATH) + if custom_objects_path is not None: + import cloudpickle + with open(custom_objects_path, "rb") as in_f: + pickled_custom_objects = cloudpickle.load(in_f) + pickled_custom_objects.update(custom_objects) + custom_objects = pickled_custom_objects + from distutils.version import StrictVersion + if StrictVersion(keras_module.__version__.split('-')[0]) >= StrictVersion("2.2.3"): + # NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead + # of string to avoid issues. + import h5py + with h5py.File(os.path.abspath(model_path), "r") as model_path: + return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs) + else: + # NOTE: Older versions of Keras only handle filepath. + return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs) class _KerasModelWrapper: @@ -95,11 +251,21 @@ def predict(self, dataframe): return predicted -def _load_pyfunc(model_file): +def _load_pyfunc(path): """ Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. + + :param path: Local filesystem path to the MLflow Model with the ``keras`` flavor. """ - if K._BACKEND == 'tensorflow': + if os.path.isfile(os.path.join(path, _KERAS_MODULE_SPEC_PATH)): + with open(os.path.join(path, _KERAS_MODULE_SPEC_PATH), "r") as f: + keras_module = importlib.import_module(f.read()) + else: + import keras + keras_module = keras + + K = importlib.import_module(keras_module.__name__ + ".backend") + if keras_module.__name__ == "tensorflow.keras" or K._BACKEND == 'tensorflow': import tensorflow as tf graph = tf.Graph() sess = tf.Session(graph=graph) @@ -109,24 +275,39 @@ def _load_pyfunc(model_file): with graph.as_default(): with sess.as_default(): # pylint:disable=not-context-manager K.set_learning_phase(0) - m = _load_model(model_file) + m = _load_model(path, keras_module=keras_module, compile=False) return _KerasModelWrapper(m, graph, sess) else: - raise Exception("Unsupported backend '%s'" % K._BACKEND) + raise MlflowException("Unsupported backend '%s'" % K._BACKEND) -def load_model(path, run_id=None): +def load_model(model_uri, **kwargs): """ - Load a Keras model from a local file (if ``run_id`` is None) or a run. + Load a Keras model from a local file or a run. + + Extra arguments are passed through to keras.load_model. + + :param model_uri: The location, in URI format, of the MLflow model. For example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. - :param path: Local filesystem path or run-relative artifact path to the model saved - by :py:func:`mlflow.keras.log_model`. - :param run_id: Run ID. If provided, combined with ``path`` to identify the model. + :return: A Keras model instance. - >>> # Load persisted model as a Keras model or as a PyFunc, call predict() on a Pandas DataFrame - >>> keras_model = mlflow.keras.load_model("models", run_id="96771d893a5e46159d9f3b49bf9013e2") + >>> # Load persisted model as a Keras model or as a PyFunc, call predict() on a pandas DataFrame + >>> keras_model = mlflow.keras.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2" + "/models") >>> predictions = keras_model.predict(x_test) """ - if run_id is not None: - path = mlflow.tracking.utils._get_model_log_dir(model_name=path, run_id=run_id) - return _load_model(os.path.join(path, "model.h5")) + local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) + flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) + keras_module = importlib.import_module(flavor_conf.get("keras_module", "keras")) + keras_model_artifacts_path = os.path.join( + local_model_path, + flavor_conf.get("data", _MODEL_SAVE_PATH)) + return _load_model(model_path=keras_model_artifacts_path, keras_module=keras_module, **kwargs) diff --git a/mlflow/mleap.py b/mlflow/mleap.py index 00c03df321de9..09e2c2bcc003c 100644 --- a/mlflow/mleap.py +++ b/mlflow/mleap.py @@ -16,10 +16,12 @@ import mlflow from mlflow.models import Model from mlflow.exceptions import MlflowException +from mlflow.utils import keyword_only FLAVOR_NAME = "mleap" +@keyword_only def log_model(spark_model, sample_input, artifact_path): """ Log a Spark MLLib model in MLeap format as an MLflow artifact @@ -61,15 +63,17 @@ def log_model(spark_model, sample_input, artifact_path): >>> pipeline = Pipeline(stages=[tokenizer, hashingTF, lr]) >>> model = pipeline.fit(training) >>> #log parameters - >>> mlflow.log_parameter("max_iter", 10) - >>> mlflow.log_parameter("reg_param", 0.001) + >>> mlflow.log_param("max_iter", 10) + >>> mlflow.log_param("reg_param", 0.001) >>> #log the Spark MLlib model in MLeap format - >>> mlflow.mleap.log_model(model, test_df, "mleap-model") + >>> mlflow.mleap.log_model(spark_model=model, sample_input=test_df, + >>> artifact_path="mleap-model") """ return Model.log(artifact_path=artifact_path, flavor=mlflow.mleap, spark_model=spark_model, sample_input=sample_input) +@keyword_only def save_model(spark_model, sample_input, path, mlflow_model=Model()): """ Save a Spark MLlib PipelineModel in MLeap format at a local path. @@ -86,20 +90,13 @@ def save_model(spark_model, sample_input, path, mlflow_model=Model()): required by MLeap for data schema inference. :param path: Local path where the model is to be saved. :param mlflow_model: :py:mod:`mlflow.models.Model` to which this flavor is being added. - - >>> import mlflow - >>> import mlflow.mleap - >>> #set values as appropriate - >>> spark_model = ... - >>> model_save_dir = ... - >>> sample_input_df = ... - >>> #save the spark MLlib model in MLeap flavor - >>> mlflow.mleap.save_model(spark_model, sample_input_df, model_save_dir) """ - add_to_model(mlflow_model, path, spark_model, sample_input) + add_to_model(mlflow_model=mlflow_model, path=path, spark_model=spark_model, + sample_input=sample_input) mlflow_model.save(os.path.join(path, "MLmodel")) +@keyword_only def add_to_model(mlflow_model, path, spark_model, sample_input): """ Add the MLeap flavor to an existing MLflow model. @@ -110,16 +107,6 @@ def add_to_model(mlflow_model, path, spark_model, sample_input): cannot contain any custom transformers. :param sample_input: Sample PySpark DataFrame input that the model can evaluate. This is required by MLeap for data schema inference. - - >>> import mlflow - >>> import mlflow.mleap - >>> #set values - >>> mlflow_model = ... - >>> spark_model = ... - >>> model_path_dir = ... - >>> sample_input_df = - >>> #add MLeap flavor to our MLflow model - >>> mlflow.mleap.add_to_model(mlflow_model,model_path_dir, sample_input_df) """ from pyspark.ml.pipeline import PipelineModel from pyspark.sql import DataFrame @@ -136,6 +123,9 @@ def add_to_model(mlflow_model, path, spark_model, sample_input): raise Exception("The sample input must be a PySpark dataframe of type `{df_type}`".format( df_type=DataFrame.__module__)) + # MLeap's model serialization routine requires an absolute output path + path = os.path.abspath(path) + mleap_path_full = os.path.join(path, "mleap") mleap_datapath_sub = os.path.join("mleap", "model") mleap_datapath_full = os.path.join(path, mleap_datapath_sub) @@ -179,10 +169,10 @@ def add_to_model(mlflow_model, path, spark_model, sample_input): def _get_mleap_schema(dataframe): """ - :param dataframe: A PySpark dataframe object + :param dataframe: A PySpark DataFrame object :return: The schema of the supplied dataframe, in MLeap format. This serialized object of type - `ml.combust.mleap.core.types.StructType`, represented as a JSON dictionary. + ``ml.combust.mleap.core.types.StructType``, represented as a JSON dictionary. """ from pyspark.ml.util import _jvm ReflectionUtil = _jvm().py4j.reflection.ReflectionUtil @@ -214,5 +204,5 @@ def _handle_py4j_error(reraised_error_type, reraised_error_text): class MLeapSerializationException(MlflowException): - """Exception thrown when a model or dataframe cannot be serialized in MLeap format""" + """Exception thrown when a model or DataFrame cannot be serialized in MLeap format""" pass diff --git a/mlflow/models/__init__.py b/mlflow/models/__init__.py index f1b495489a208..3bc9286e7a2b9 100644 --- a/mlflow/models/__init__.py +++ b/mlflow/models/__init__.py @@ -15,25 +15,26 @@ For details, see `MLflow Models <../models.html>`_. """ +from abc import abstractmethod, ABCMeta from datetime import datetime import yaml - import mlflow from mlflow.utils.file_utils import TempDir class Model(object): - """An MLflow Model that can support multiple model flavors.""" - - def __init__(self, artifact_path=None, run_id=None, utc_time_created=datetime.utcnow(), - flavors=None): + """ + An MLflow Model that can support multiple model flavors. Provides APIs for implementing + new Model flavors. + """ + def __init__(self, artifact_path=None, run_id=None, utc_time_created=None, flavors=None): # store model id instead of run_id and path to avoid confusion when model gets exported if run_id: self.run_id = run_id self.artifact_path = artifact_path - self.utc_time_created = str(utc_time_created) + self.utc_time_created = str(utc_time_created or datetime.utcnow()) self.flavors = flavors if flavors is not None else {} def add_flavor(self, name, **params): @@ -52,6 +53,9 @@ def save(self, path): @classmethod def load(cls, path): """Load a model from its YAML representation.""" + import os + if os.path.isdir(path): + path = os.path.join(path, "MLmodel") with open(path) as f: return cls(**yaml.safe_load(f.read())) @@ -68,7 +72,67 @@ def log(cls, artifact_path, flavor, **kwargs): """ with TempDir() as tmp: local_path = tmp.path("model") - run_id = mlflow.tracking.fluent._get_or_start_run().info.run_uuid + run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id mlflow_model = cls(artifact_path=artifact_path, run_id=run_id) flavor.save_model(path=local_path, mlflow_model=mlflow_model, **kwargs) mlflow.tracking.fluent.log_artifacts(local_path, artifact_path) + + +class FlavorBackend(object): + """ + Abstract class for Flavor Backend. + This class defines the API interface for local model deployment of MLflow model flavors. + """ + + __metaclass__ = ABCMeta + + def __init__(self, config, **kwargs): # pylint: disable=unused-argument + self._config = config + + @abstractmethod + def predict(self, model_uri, input_path, output_path, content_type, json_format): + """ + Generate predictions using a saved MLflow model referenced by the given URI. + Input and output are read from and written to a file or stdin / stdout. + + :param model_uri: URI pointing to the MLflow model to be used for scoring. + :param input_path: Path to the file with input data. If not specified, data is read from + stdin. + :param output_path: Path to the file with output predictions. If not specified, data is + written to stdout. + :param content_type: Specifies the input format. Can be one of {'json', 'csv'} + :param json_format: Only applies if content_type == 'json'. Specifies how is the input data + encoded in json. Can be one of {'split', 'records'} mirroring the + behavior of Pandas orient attribute. The default is 'split' which + expects dict like data: ``{'index' -> [index], 'columns' -> [columns], + 'data' -> [values]}``, where index is optional. + For more information see "https://pandas.pydata.org/ + pandas-docs/stable/reference/api/pandas.read_json.html" + """ + pass + + @abstractmethod + def serve(self, model_uri, port, host): + """ + Serve saved MLflow model locally. + :param model_uri: URI pointing to the MLflow model to be used for scoring. + :param port: Port to deploy the model to. + :param host: Host to use for the model deployment. Defaults to 'localhost'. + """ + pass + + @abstractmethod + def can_score_model(self): + """ + Check whether this flavor backend can be deployed in the current environment. + + :return: True if this flavor backend can be applied int he current environment. + """ + pass + + def can_build_image(self): + """ + :return: True if this flavor has a `build_image` method defined for building a docker + container capable of serving the model, False otherwise. + """ + return callable(getattr(self.__class__, 'build_image', None)) diff --git a/mlflow/models/cli.py b/mlflow/models/cli.py new file mode 100644 index 0000000000000..ff527571dc248 --- /dev/null +++ b/mlflow/models/cli.py @@ -0,0 +1,139 @@ +import logging +import click +import os +import posixpath + +from mlflow.models import Model +from mlflow.models.flavor_backend_registry import get_flavor_backend +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.file_utils import TempDir +from mlflow.utils import cli_args + +_logger = logging.getLogger(__name__) + + +@click.group("models") +def commands(): + """ + Deploy MLflow models locally. + + To deploy a model associated with a run on a tracking server, set the MLFLOW_TRACKING_URI + environment variable to the URL of the desired server. + """ + pass + + +@commands.command("serve") +@cli_args.MODEL_URI +@cli_args.PORT +@cli_args.HOST +@cli_args.WORKERS +@cli_args.NO_CONDA +@cli_args.INSTALL_MLFLOW +def serve(model_uri, port, host, workers, no_conda=False, install_mlflow=False): + """ + Serve a model saved with MLflow by launching a webserver on the specified host and port. For + information about the input data formats accepted by the webserver, see the following + documentation: https://www.mlflow.org/docs/latest/models.html#model-deployment. + + You can make requests to ``POST /invocations`` in pandas split- or record-oriented formats. + + Example: + + .. code-block:: bash + + $ mlflow models serve -m runs:/my-run-id/model-path & + + $ curl http://127.0.0.1:5000/invocations -H 'Content-Type: application/json' -d '{ + "columns": ["a", "b", "c"], + "data": [[1, 2, 3], [4, 5, 6]] + }' + """ + return _get_flavor_backend(model_uri, + no_conda=no_conda, + workers=workers, + install_mlflow=install_mlflow).serve(model_uri=model_uri, port=port, + host=host) + + +@commands.command("predict") +@cli_args.MODEL_URI +@click.option("--input-path", "-i", default=None, + help="CSV containing pandas DataFrame to predict against.") +@click.option("--output-path", "-o", default=None, + help="File to output results to as json file. If not provided, output to stdout.") +@click.option("--content-type", "-t", default="json", + help="Content type of the input file. Can be one of {'json', 'csv'}.") +@click.option("--json-format", "-j", default="split", + help="Only applies if the content type is 'json'. Specify how the data is encoded. " + "Can be one of {'split', 'records'} mirroring the behavior of Pandas orient " + "attribute. The default is 'split' which expects dict like data: " + "{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}, " + "where index is optional. For more information see " + "https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_json" + ".html") +@cli_args.NO_CONDA +@cli_args.INSTALL_MLFLOW +def predict(model_uri, input_path, output_path, content_type, json_format, no_conda, + install_mlflow): + """ + Generate predictions in json format using a saved MLflow model. For information about the input + data formats accepted by this function, see the following documentation: + https://www.mlflow.org/docs/latest/models.html#model-deployment. + """ + if content_type == "json" and json_format not in ("split", "records"): + raise Exception("Unsupported json format '{}'.".format(json_format)) + return _get_flavor_backend(model_uri, no_conda=no_conda, + install_mlflow=install_mlflow).predict(model_uri=model_uri, + input_path=input_path, + output_path=output_path, + content_type=content_type, + json_format=json_format) + + +@commands.command("build-docker") +@cli_args.MODEL_URI +@click.option("--name", "-n", default="mlflow-pyfunc-servable", + help="Name to use for built image") +@cli_args.INSTALL_MLFLOW +def build_docker(model_uri, name, install_mlflow): + """ + **EXPERIMENTAL**: Builds a Docker image whose default entrypoint serves the specified MLflow + model at port 8080 within the container, using the 'python_function' flavor. + + For example, the following command builds a docker image named 'my-image-name' that serves the + model from run 'some-run-uuid' at run-relative artifact path 'my-model': + + .. code:: bash + + mlflow models build-docker -m "runs:/some-run-uuid/my-model" -n "my-image-name" + + We can then serve the model, exposing it at port 5001 on the host via: + + .. code:: bash + + docker run -p 5001:8080 "my-image-name" + + See https://www.mlflow.org/docs/latest/python_api/mlflow.pyfunc.html for more information on the + 'python_function' flavor. + + This command is experimental (may be changed or removed in a future release without warning) + and does not guarantee that the arguments nor format of the Docker container will remain the + same. + """ + mlflow_home = os.environ.get("MLFLOW_HOME", None) + _get_flavor_backend(model_uri, docker_build=True).build_image(model_uri, name, + mlflow_home=mlflow_home, + install_mlflow=install_mlflow) + + +def _get_flavor_backend(model_uri, **kwargs): + with TempDir() as tmp: + local_path = _download_artifact_from_uri(posixpath.join(model_uri, "MLmodel"), + output_path=tmp.path()) + model = Model.load(local_path) + flavor_name, flavor_backend = get_flavor_backend(model, **kwargs) + if flavor_backend is None: + raise Exception("No suitable flavor backend was found for the model.") + _logger.info("Selected backend for flavor '%s'", flavor_name) + return flavor_backend diff --git a/mlflow/sagemaker/container/__init__.py b/mlflow/models/container/__init__.py similarity index 58% rename from mlflow/sagemaker/container/__init__.py rename to mlflow/models/container/__init__.py index 5acd30991af51..a04d0b4d17f9c 100644 --- a/mlflow/sagemaker/container/__init__.py +++ b/mlflow/models/container/__init__.py @@ -1,5 +1,5 @@ """ -Initialize the environment and start model serving on Sagemaker or local Docker container. +Initialize the environment and start model serving in a Docker container. To be executed only during the model deployment. @@ -8,11 +8,10 @@ import multiprocessing import os -import shutil import signal +import shutil from subprocess import check_call, Popen import sys -import yaml from pkg_resources import resource_filename @@ -21,12 +20,13 @@ from mlflow import pyfunc, mleap from mlflow.models import Model -from mlflow.utils.logging_utils import eprint +from mlflow.models.docker_utils import DISABLE_ENV_CREATION from mlflow.version import VERSION as MLFLOW_VERSION MODEL_PATH = "/opt/ml/model" -DEPLOYMENT_CONFIG_KEY_FLAVOR_NAME = "deployment_flavor_name" + +DEPLOYMENT_CONFIG_KEY_FLAVOR_NAME = "MLFLOW_DEPLOYMENT_FLAVOR_NAME" DEFAULT_SAGEMAKER_SERVER_PORT = 8080 @@ -51,19 +51,6 @@ def _init(cmd): args=str(sys.argv))) -def _server_dependencies_cmds(): - """ - Get commands required to install packages required to serve the model with MLflow. These are - packages outside of the user-provided environment, except for the MLflow itself. - - :return: List of commands. - """ - # TODO: Should we reinstall MLflow? What if there is MLflow in the user's conda environment? - return ["conda install -c anaconda gunicorn", "conda install -c anaconda gevent", - "pip install /opt/mlflow/." if _container_includes_mlflow_source() - else "pip install mlflow=={}".format(MLFLOW_VERSION)] - - def _serve(): """ Serve the model. @@ -87,22 +74,56 @@ def _serve(): raise Exception("This container only supports models with the MLeap or PyFunc flavors.") +def _install_pyfunc_deps(model_path=None, install_mlflow=False): + """ + Creates a conda env for serving the model at the specified path and installs almost all serving + dependencies into the environment - MLflow is not installed as it's not available via conda. + """ + # If model is a pyfunc model, create its conda env (even if it also has mleap flavor) + has_env = False + if model_path: + model_config_path = os.path.join(model_path, "MLmodel") + model = Model.load(model_config_path) + # NOTE: this differs from _serve cause we always activate the env even if you're serving + # an mleap model + if pyfunc.FLAVOR_NAME not in model.flavors: + return + conf = model.flavors[pyfunc.FLAVOR_NAME] + if pyfunc.ENV in conf: + print("creating and activating custom environment") + env = conf[pyfunc.ENV] + env_path_dst = os.path.join("/opt/mlflow/", env) + env_path_dst_dir = os.path.dirname(env_path_dst) + if not os.path.exists(env_path_dst_dir): + os.makedirs(env_path_dst_dir) + shutil.copyfile(os.path.join(MODEL_PATH, env), env_path_dst) + conda_create_model_env = "conda env create -n custom_env -f {}".format(env_path_dst) + if Popen(["bash", "-c", conda_create_model_env]).wait() != 0: + raise Exception("Failed to create model environment.") + has_env = True + activate_cmd = ["source /miniconda/bin/activate custom_env"] if has_env else [] + # NB: install gunicorn[gevent] from pip rather than from conda because gunicorn is already + # dependency of mlflow on pip and we expect mlflow to be part of the environment. + install_server_deps = ["pip install gunicorn[gevent]"] + if Popen(["bash", "-c", " && ".join(activate_cmd + install_server_deps)]).wait() != 0: + raise Exception("Failed to install serving dependencies into the model environment.") + if has_env and install_mlflow: + install_mlflow_cmd = [ + "pip install /opt/mlflow/." if _container_includes_mlflow_source() + else "pip install mlflow=={}".format(MLFLOW_VERSION) + ] + if Popen(["bash", "-c", " && ".join(activate_cmd + install_mlflow_cmd)]).wait() != 0: + raise Exception("Failed to install mlflow into the model environment.") + + def _serve_pyfunc(model): conf = model.flavors[pyfunc.FLAVOR_NAME] bash_cmds = [] if pyfunc.ENV in conf: - print("activating custom environment") - env = conf[pyfunc.ENV] - env_path_dst = os.path.join("/opt/mlflow/", env) - env_path_dst_dir = os.path.dirname(env_path_dst) - if not os.path.exists(env_path_dst_dir): - os.makedirs(env_path_dst_dir) - # TODO: should we test that the environment does not include any of the server dependencies? - # Those are gonna be reinstalled. should probably test this on the client side - shutil.copyfile(os.path.join(MODEL_PATH, env), env_path_dst) - os.system("conda env create -n custom_env -f {}".format(env_path_dst)) - bash_cmds += ["source /miniconda/bin/activate custom_env"] + _server_dependencies_cmds() - nginx_conf = resource_filename(mlflow.sagemaker.__name__, "container/scoring_server/nginx.conf") + if not os.environ.get(DISABLE_ENV_CREATION) == "true": + _install_pyfunc_deps(MODEL_PATH, install_mlflow=True) + bash_cmds += ["source /miniconda/bin/activate custom_env"] + nginx_conf = resource_filename(mlflow.models.__name__, "container/scoring_server/nginx.conf") nginx = Popen(['nginx', '-c', nginx_conf]) # link the log streams to stdout/err so they will be logged to the container logs check_call(['ln', '-sf', '/dev/stdout', '/var/log/nginx/access.log']) @@ -111,8 +132,8 @@ def _serve_pyfunc(model): os.system("pip -V") os.system("python -V") os.system('python -c"from mlflow.version import VERSION as V; print(V)"') - cmd = ("gunicorn --timeout 60 -k gevent -b unix:/tmp/gunicorn.sock -w {nworkers} " + - "mlflow.sagemaker.container.scoring_server.wsgi:app").format(nworkers=cpu_count) + cmd = "gunicorn -w {cpu_count} ".format(cpu_count=cpu_count) + \ + "${GUNICORN_CMD_ARGS} mlflow.models.container.scoring_server.wsgi:app" bash_cmds.append(cmd) gunicorn = Popen(["/bin/bash", "-c", " && ".join(bash_cmds)]) signal.signal(signal.SIGTERM, lambda a, b: _sigterm_handler(pids=[nginx.pid, gunicorn.pid])) @@ -134,7 +155,7 @@ def _serve_mleap(): def _container_includes_mlflow_source(): - return os.path.isdir("/opt/mlflow") + return os.path.exists("/opt/mlflow/setup.py") def _train(): diff --git a/mlflow/models/container/scoring_server/__init__.py b/mlflow/models/container/scoring_server/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/mlflow/sagemaker/container/scoring_server/nginx.conf b/mlflow/models/container/scoring_server/nginx.conf similarity index 94% rename from mlflow/sagemaker/container/scoring_server/nginx.conf rename to mlflow/models/container/scoring_server/nginx.conf index cb581acee1d23..143ea6c8e5633 100644 --- a/mlflow/sagemaker/container/scoring_server/nginx.conf +++ b/mlflow/models/container/scoring_server/nginx.conf @@ -15,7 +15,7 @@ http { access_log /var/log/nginx/access.log combined; upstream gunicorn { - server unix:/tmp/gunicorn.sock; + server 127.0.0.1:8000; } server { diff --git a/mlflow/sagemaker/container/scoring_server/wsgi.py b/mlflow/models/container/scoring_server/wsgi.py similarity index 100% rename from mlflow/sagemaker/container/scoring_server/wsgi.py rename to mlflow/models/container/scoring_server/wsgi.py diff --git a/mlflow/models/docker_utils.py b/mlflow/models/docker_utils.py new file mode 100644 index 0000000000000..68ac1af4e1a04 --- /dev/null +++ b/mlflow/models/docker_utils.py @@ -0,0 +1,109 @@ +import os +from subprocess import Popen, PIPE, STDOUT +import logging + +import mlflow +import mlflow.version +from mlflow.utils.file_utils import TempDir, _copy_project +from mlflow.utils.logging_utils import eprint + +_logger = logging.getLogger(__name__) + +DISABLE_ENV_CREATION = "MLFLOW_DISABLE_ENV_CREATION" + +_DOCKERFILE_TEMPLATE = """ +# Build an image that can serve mlflow models. +FROM ubuntu:16.04 + +RUN apt-get -y update && apt-get install -y --no-install-recommends \ + wget \ + curl \ + nginx \ + ca-certificates \ + bzip2 \ + build-essential \ + cmake \ + openjdk-8-jdk \ + git-core \ + maven \ + && rm -rf /var/lib/apt/lists/* + +# Download and setup miniconda +RUN curl https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh >> miniconda.sh +RUN bash ./miniconda.sh -b -p /miniconda; rm ./miniconda.sh; +ENV PATH="/miniconda/bin:$PATH" +ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 +ENV GUNICORN_CMD_ARGS="--timeout 60 -k gevent" +# Set up the program in the image +WORKDIR /opt/mlflow + +{install_mlflow} + +{custom_setup_steps} +{entrypoint} +""" + + +def _get_mlflow_install_step(dockerfile_context_dir, mlflow_home): + """ + Get docker build commands for installing MLflow given a Docker context dir and optional source + directory + """ + if mlflow_home: + mlflow_dir = _copy_project( + src_path=mlflow_home, dst_path=dockerfile_context_dir) + return ( + "COPY {mlflow_dir} /opt/mlflow\n" + "RUN pip install /opt/mlflow\n" + "RUN cd /opt/mlflow/mlflow/java/scoring && " + "mvn --batch-mode package -DskipTests && " + "mkdir -p /opt/java/jars && " + "mv /opt/mlflow/mlflow/java/scoring/target/" + "mlflow-scoring-*-with-dependencies.jar /opt/java/jars\n" + ).format(mlflow_dir=mlflow_dir) + else: + return ( + "RUN pip install mlflow=={version}\n" + "RUN mvn " + " --batch-mode dependency:copy" + " -Dartifact=org.mlflow:mlflow-scoring:{version}:pom" + " -DoutputDirectory=/opt/java\n" + "RUN mvn " + " --batch-mode dependency:copy" + " -Dartifact=org.mlflow:mlflow-scoring:{version}:jar" + " -DoutputDirectory=/opt/java/jars" + ).format(version=mlflow.version.VERSION) + + +def _build_image(image_name, entrypoint, mlflow_home=None, custom_setup_steps_hook=None): + """ + Build an MLflow Docker image that can be used to serve a + The image is built locally and it requires Docker to run. + + :param image_name: Docker image name. + :param entry_point: String containing ENTRYPOINT directive for docker image + :param mlflow_home: (Optional) Path to a local copy of the MLflow GitHub repository. + If specified, the image will install MLflow from this directory. + If None, it will install MLflow from pip. + :param custom_setup_steps_hook: (Optional) Single-argument function that takes the string path + of a dockerfile context directory and returns a string containing Dockerfile commands to + run during the image build step. + """ + mlflow_home = os.path.abspath(mlflow_home) if mlflow_home else None + with TempDir() as tmp: + cwd = tmp.path() + install_mlflow = _get_mlflow_install_step(cwd, mlflow_home) + custom_setup_steps = custom_setup_steps_hook(cwd) if custom_setup_steps_hook else "" + with open(os.path.join(cwd, "Dockerfile"), "w") as f: + f.write(_DOCKERFILE_TEMPLATE.format( + install_mlflow=install_mlflow, custom_setup_steps=custom_setup_steps, + entrypoint=entrypoint)) + _logger.info("Building docker image with name %s", image_name) + os.system('find {cwd}/'.format(cwd=cwd)) + proc = Popen(["docker", "build", "-t", image_name, "-f", "Dockerfile", "."], + cwd=cwd, + stdout=PIPE, + stderr=STDOUT, + universal_newlines=True) + for x in iter(proc.stdout.readline, ""): + eprint(x, end='') diff --git a/mlflow/models/flavor_backend_registry.py b/mlflow/models/flavor_backend_registry.py new file mode 100644 index 0000000000000..dc29117a0f26b --- /dev/null +++ b/mlflow/models/flavor_backend_registry.py @@ -0,0 +1,26 @@ +""" +Registry of supported flavor backends. Contains a mapping of flavors to flavor backends. This +mapping is used to select suitable flavor when deploying generic MLflow models. + +Flavor backend can deploy particular flavor locally to generate predictions, deploy as a local +REST api endpoint, or build a docker image for serving the model locally or remotely. +Not all flavors have a flavor backend. +""" +import mlflow.pyfunc as pyfunc +from mlflow.pyfunc.backend import PyFuncBackend +from mlflow.rfunc.backend import RFuncBackend + + +_flavor_backends = { + pyfunc.FLAVOR_NAME: PyFuncBackend, + "crate": RFuncBackend +} + + +def get_flavor_backend(model, build_docker=True, **kwargs): + for flavor_name, flavor_config in model.flavors.items(): + if flavor_name in _flavor_backends: + backend = _flavor_backends[flavor_name](flavor_config, **kwargs) + if build_docker and backend.can_build_image() or backend.can_score_model(): + return flavor_name, backend + return None, None diff --git a/mlflow/onnx.py b/mlflow/onnx.py new file mode 100644 index 0000000000000..4e88b61f7d062 --- /dev/null +++ b/mlflow/onnx.py @@ -0,0 +1,237 @@ +""" +The ``mlflow.onnx`` module provides APIs for logging and loading ONNX models in the MLflow Model +format. This module exports MLflow Models with the following flavors: + +ONNX (native) format + This is the main flavor that can be loaded back as an ONNX model object. +:py:mod:`mlflow.pyfunc` + Produced for use by generic pyfunc-based deployment tools and batch inference. +""" + +from __future__ import absolute_import + +import os +import yaml +import numpy as np + +import pandas as pd + +from mlflow import pyfunc +from mlflow.models import Model +import mlflow.tracking +from mlflow.exceptions import MlflowException +from mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils import experimental +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration + +FLAVOR_NAME = "onnx" + + +@experimental +def get_default_conda_env(): + """ + :return: The default Conda environment for MLflow Models produced by calls to + :func:`save_model()` and :func:`log_model()`. + """ + import onnx + import onnxruntime + return _mlflow_conda_env( + additional_conda_deps=None, + additional_pip_deps=[ + "onnx=={}".format(onnx.__version__), + # The ONNX pyfunc representation requires the OnnxRuntime + # inference engine. Therefore, the conda environment must + # include OnnxRuntime + "onnxruntime=={}".format(onnxruntime.__version__), + ], + additional_conda_channels=None, + ) + + +@experimental +def save_model(onnx_model, path, conda_env=None, mlflow_model=Model()): + """ + Save an ONNX model to a path on the local file system. + + :param onnx_model: ONNX model to be saved. + :param path: Local path where the model is to be saved. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If `None`, the default + :func:`get_default_conda_env()` environment is added to the model. + The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.6.0', + 'onnx=1.4.1', + 'onnxruntime=0.3.0' + ] + } + + :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to. + """ + import onnx + + path = os.path.abspath(path) + if os.path.exists(path): + raise MlflowException( + message="Path '{}' already exists".format(path), + error_code=RESOURCE_ALREADY_EXISTS) + os.makedirs(path) + model_data_subpath = "model.onnx" + model_data_path = os.path.join(path, model_data_subpath) + + # Save onnx-model + onnx.save_model(onnx_model, model_data_path) + + conda_env_subpath = "conda.yaml" + if conda_env is None: + conda_env = get_default_conda_env() + elif not isinstance(conda_env, dict): + with open(conda_env, "r") as f: + conda_env = yaml.safe_load(f) + with open(os.path.join(path, conda_env_subpath), "w") as f: + yaml.safe_dump(conda_env, stream=f, default_flow_style=False) + + pyfunc.add_to_model(mlflow_model, loader_module="mlflow.onnx", + data=model_data_subpath, env=conda_env_subpath) + mlflow_model.add_flavor(FLAVOR_NAME, onnx_version=onnx.__version__, data=model_data_subpath) + mlflow_model.save(os.path.join(path, "MLmodel")) + + +def _load_model(model_file): + import onnx + + onnx_model = onnx.load(model_file) + # Check Formation + onnx.checker.check_model(onnx_model) + return onnx_model + + +class _OnnxModelWrapper: + def __init__(self, path): + import onnxruntime + self.rt = onnxruntime.InferenceSession(path) + assert len(self.rt.get_inputs()) >= 1 + self.inputs = [ + (inp.name, inp.type) for inp in self.rt.get_inputs() + ] + self.output_names = [ + outp.name for outp in self.rt.get_outputs() + ] + + @staticmethod + def _cast_float64_to_float32(dataframe, column_names): + for input_name in column_names: + if dataframe[input_name].values.dtype == np.float64: + dataframe[input_name] = dataframe[input_name].values.astype(np.float32) + return dataframe + + @experimental + def predict(self, dataframe): + """ + :param dataframe: A Pandas DataFrame that is converted to a collection of ONNX Runtime + inputs. If the underlying ONNX model only defines a *single* input + tensor, the DataFrame's values are converted to a NumPy array + representation using the `DataFrame.values() + `_ method. If the + underlying ONNX model defines *multiple* input tensors, each column + of the DataFrame is converted to a NumPy array representation. + The corresponding NumPy array representation is then passed to the + ONNX Runtime. For more information about the ONNX Runtime, see + ``_. + :return: A Pandas DataFrame output. Each column of the DataFrame corresponds to an + output tensor produced by the underlying ONNX model. + """ + # ONNXRuntime throws the following exception for some operators when the input + # dataframe contains float64 values. Unfortunately, even if the original user-supplied + # dataframe did not contain float64 values, the serialization/deserialization between the + # client and the scoring server can introduce 64-bit floats. This is being tracked in + # https://github.com/mlflow/mlflow/issues/1286. Meanwhile, we explicitly cast the input to + # 32-bit floats when needed. TODO: Remove explicit casting when issue #1286 is fixed. + if len(self.inputs) > 1: + cols = [name for (name, type) in self.inputs if type == 'tensor(float)'] + else: + cols = dataframe.columns if self.inputs[0][1] == 'tensor(float)' else [] + + dataframe = _OnnxModelWrapper._cast_float64_to_float32(dataframe, cols) + if len(self.inputs) > 1: + feed_dict = { + name: dataframe[name].values + for (name, _) in self.inputs + } + else: + feed_dict = {self.inputs[0][0]: dataframe.values} + + predicted = self.rt.run(self.output_names, feed_dict) + return pd.DataFrame.from_dict( + {c: p.reshape(-1) for (c, p) in zip(self.output_names, predicted)}) + + +def _load_pyfunc(path): + """ + Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. + """ + return _OnnxModelWrapper(path) + + +@experimental +def load_model(model_uri): + """ + Load an ONNX model from a local file or a run. + + :param model_uri: The location, in URI format, of the MLflow model, for example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see the + `Artifacts Documentation `_. + + :return: An ONNX model instance. + + """ + local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) + flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) + onnx_model_artifacts_path = os.path.join(local_model_path, flavor_conf["data"]) + return _load_model(model_file=onnx_model_artifacts_path) + + +@experimental +def log_model(onnx_model, artifact_path, conda_env=None): + """ + Log an ONNX model as an MLflow artifact for the current run. + + :param onnx_model: ONNX model to be saved. + :param artifact_path: Run-relative artifact path. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If `None`, the default + :func:`get_default_conda_env()` environment is added to the model. + The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.6.0', + 'onnx=1.4.1', + 'onnxruntime=0.3.0' + ] + } + """ + Model.log(artifact_path=artifact_path, flavor=mlflow.onnx, + onnx_model=onnx_model, conda_env=conda_env) diff --git a/mlflow/projects/__init__.py b/mlflow/projects/__init__.py index d6c7c2b04610b..905a1047ad8f4 100644 --- a/mlflow/projects/__init__.py +++ b/mlflow/projects/__init__.py @@ -7,92 +7,190 @@ from distutils import dir_util import hashlib import json +import yaml import os +import sys import re +import shutil +from six.moves import urllib import subprocess import tempfile +import logging +import posixpath +import docker +import mlflow.tracking as tracking +import mlflow.tracking.fluent as fluent from mlflow.projects.submitted_run import LocalSubmittedRun, SubmittedRun from mlflow.projects import _project_spec -from mlflow.exceptions import ExecutionException -from mlflow.entities import RunStatus, SourceType, Param -import mlflow.tracking as tracking -from mlflow.tracking.fluent import _get_experiment_id, _get_git_commit - - +from mlflow.exceptions import ExecutionException, MlflowException +from mlflow.entities import RunStatus, SourceType +from mlflow.tracking.fluent import _get_experiment_id +from mlflow.tracking.context.default_context import _get_user +from mlflow.tracking.context.git_context import _get_git_commit import mlflow.projects.databricks from mlflow.utils import process -from mlflow.utils.logging_utils import eprint -from mlflow.utils.mlflow_tags import MLFLOW_GIT_BRANCH_NAME +from mlflow.utils.file_utils import path_to_local_sqlite_uri, path_to_local_file_uri +from mlflow.utils.mlflow_tags import MLFLOW_PROJECT_ENV, MLFLOW_DOCKER_IMAGE_URI, \ + MLFLOW_DOCKER_IMAGE_ID, MLFLOW_USER, MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE, \ + MLFLOW_GIT_COMMIT, MLFLOW_GIT_REPO_URL, MLFLOW_GIT_BRANCH, LEGACY_MLFLOW_GIT_REPO_URL, \ + LEGACY_MLFLOW_GIT_BRANCH_NAME, MLFLOW_PROJECT_ENTRY_POINT, MLFLOW_PARENT_RUN_ID, \ + MLFLOW_PROJECT_BACKEND +from mlflow.utils import databricks_utils, file_utils # TODO: this should be restricted to just Git repos and not S3 and stuff like that _GIT_URI_REGEX = re.compile(r"^[^/]*:") +_FILE_URI_REGEX = re.compile(r"^file://.+") +_ZIP_URI_REGEX = re.compile(r".+\.zip$") # Environment variable indicating a path to a conda installation. MLflow will default to running # "conda" if unset MLFLOW_CONDA_HOME = "MLFLOW_CONDA_HOME" +_GENERATED_DOCKERFILE_NAME = "Dockerfile.mlflow-autogenerated" +_PROJECT_TAR_ARCHIVE_NAME = "mlflow-project-docker-build-context" +_MLFLOW_DOCKER_TRACKING_DIR_PATH = "/mlflow/tmp/mlruns" + +_logger = logging.getLogger(__name__) + + +def _resolve_experiment_id(experiment_name=None, experiment_id=None): + """ + Resolve experiment. + + Verifies either one or other is specified - cannot be both selected. + + :param experiment_name: Name of experiment under which to launch the run. + :param experiment_id: ID of experiment under which to launch the run. + :return: int + """ + + if experiment_name and experiment_id: + raise MlflowException("Specify only one of 'experiment_name' or 'experiment_id'.") + + exp_id = experiment_id + if experiment_name: + client = tracking.MlflowClient() + exp_id = client.get_experiment_by_name(experiment_name).experiment_id + exp_id = exp_id or _get_experiment_id() + return exp_id -def _run(uri, entry_point="main", version=None, parameters=None, experiment_id=None, - mode=None, cluster_spec=None, git_username=None, git_password=None, use_conda=True, - storage_dir=None, block=True, run_id=None): +def _run(uri, experiment_id, entry_point="main", version=None, parameters=None, + backend=None, backend_config=None, use_conda=True, + storage_dir=None, synchronous=True, run_id=None): """ - Helper that delegates to the project-running method corresponding to the passed-in mode. + Helper that delegates to the project-running method corresponding to the passed-in backend. Returns a ``SubmittedRun`` corresponding to the project run. """ - if mode == "databricks": - mlflow.projects.databricks.before_run_validations(mlflow.get_tracking_uri(), cluster_spec) - exp_id = experiment_id or _get_experiment_id() parameters = parameters or {} - work_dir = _fetch_project(uri=uri, force_tempdir=False, version=version, - git_username=git_username, git_password=git_password) + work_dir = _fetch_project(uri=uri, force_tempdir=False, version=version) project = _project_spec.load_project(work_dir) + _validate_execution_environment(project, backend) project.get_entry_point(entry_point)._validate_parameters(parameters) if run_id: active_run = tracking.MlflowClient().get_run(run_id) else: - active_run = _create_run(uri, exp_id, work_dir, entry_point) + active_run = _create_run(uri, experiment_id, work_dir, entry_point) # Consolidate parameters for logging. # `storage_dir` is `None` since we want to log actual path not downloaded local path entry_point_obj = project.get_entry_point(entry_point) final_params, extra_params = entry_point_obj.compute_parameters(parameters, storage_dir=None) for key, value in (list(final_params.items()) + list(extra_params.items())): - tracking.MlflowClient().log_param(active_run.info.run_uuid, key, value) + tracking.MlflowClient().log_param(active_run.info.run_id, key, value) + + repo_url = _get_git_repo_url(work_dir) + if repo_url is not None: + for tag in [MLFLOW_GIT_REPO_URL, LEGACY_MLFLOW_GIT_REPO_URL]: + tracking.MlflowClient().set_tag(active_run.info.run_id, tag, repo_url) # Add branch name tag if a branch is specified through -version if _is_valid_branch_name(work_dir, version): - tracking.MlflowClient().set_tag(active_run.info.run_uuid, MLFLOW_GIT_BRANCH_NAME, version) + for tag in [MLFLOW_GIT_BRANCH, LEGACY_MLFLOW_GIT_BRANCH_NAME]: + tracking.MlflowClient().set_tag(active_run.info.run_id, tag, version) - if mode == "databricks": + if backend == "databricks": + tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND, + "databricks") from mlflow.projects.databricks import run_databricks return run_databricks( remote_run=active_run, uri=uri, entry_point=entry_point, work_dir=work_dir, parameters=parameters, - experiment_id=exp_id, cluster_spec=cluster_spec) - elif mode == "local" or mode is None: - # Synchronously create a conda environment (even though this may take some time) to avoid - # failures due to multiple concurrent attempts to create the same conda env. - conda_env_name = _get_or_create_conda_env(project.conda_env_path) if use_conda else None - # In blocking mode, run the entry point command in blocking fashion, sending status updates - # to the tracking server when finished. Note that the run state may not be persisted to the - # tracking server if interrupted - if block: - command = _get_entry_point_command( - project, entry_point, parameters, conda_env_name, storage_dir) - return _run_entry_point(command, work_dir, exp_id, run_id=active_run.info.run_uuid) + experiment_id=experiment_id, cluster_spec=backend_config) + + elif backend == "local" or backend is None: + command = [] + command_separator = " " + # If a docker_env attribute is defined in MLproject then it takes precedence over conda yaml + # environments, so the project will be executed inside a docker container. + if project.docker_env: + tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_ENV, + "docker") + tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND, + "local") + _validate_docker_env(project) + _validate_docker_installation() + image = _build_docker_image(work_dir=work_dir, + repository_uri=project.name, + base_image=project.docker_env.get('image'), + run_id=active_run.info.run_id) + command += _get_docker_command(image=image, active_run=active_run) + # Synchronously create a conda environment (even though this may take some time) + # to avoid failures due to multiple concurrent attempts to create the same conda env. + elif use_conda: + tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_ENV, "conda") + tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND, "local") + command_separator = " && " + conda_env_name = _get_or_create_conda_env(project.conda_env_path) + command += _get_conda_command(conda_env_name) + # In synchronous mode, run the entry point command in a blocking fashion, sending status + # updates to the tracking server when finished. Note that the run state may not be + # persisted to the tracking server if interrupted + if synchronous: + command += _get_entry_point_command(project, entry_point, parameters, storage_dir) + command = command_separator.join(command) + return _run_entry_point(command, work_dir, experiment_id, + run_id=active_run.info.run_id) # Otherwise, invoke `mlflow run` in a subprocess return _invoke_mlflow_run_subprocess( - work_dir=work_dir, entry_point=entry_point, parameters=parameters, experiment_id=exp_id, - use_conda=use_conda, storage_dir=storage_dir, run_id=active_run.info.run_uuid) - supported_modes = ["local", "databricks"] + work_dir=work_dir, entry_point=entry_point, parameters=parameters, + experiment_id=experiment_id, + use_conda=use_conda, storage_dir=storage_dir, run_id=active_run.info.run_id) + elif backend == "kubernetes": + from mlflow.projects import kubernetes as kb + tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_ENV, "docker") + tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND, + "kubernetes") + _validate_docker_env(project) + _validate_docker_installation() + kube_config = _parse_kubernetes_config(backend_config) + image = _build_docker_image(work_dir=work_dir, + repository_uri=kube_config["repository-uri"], + base_image=project.docker_env.get('image'), + run_id=active_run.info.run_id) + image_digest = kb.push_image_to_registry(image.tags[0]) + submitted_run = kb.run_kubernetes_job(project.name, + active_run, + image.tags[0], + image_digest, + _get_entry_point_command(project, entry_point, + parameters, storage_dir), + _get_run_env_vars( + run_id=active_run.info.run_uuid, + experiment_id=active_run.info.experiment_id), + kube_config['kube-context'], + kube_config['kube-job-template']) + return submitted_run + + supported_backends = ["local", "databricks", "kubernetes"] raise ExecutionException("Got unsupported execution mode %s. Supported " - "values: %s" % (mode, supported_modes)) + "values: %s" % (backend, supported_backends)) -def run(uri, entry_point="main", version=None, parameters=None, experiment_id=None, - mode=None, cluster_spec=None, git_username=None, git_password=None, use_conda=True, - storage_dir=None, block=True, run_id=None): +def run(uri, entry_point="main", version=None, parameters=None, + experiment_name=None, experiment_id=None, + backend=None, backend_config=None, use_conda=True, + storage_dir=None, synchronous=True, run_id=None): """ Run an MLflow project. The project can be local or stored at a Git URI. @@ -111,38 +209,61 @@ def run(uri, entry_point="main", version=None, parameters=None, experiment_id=No using "python" to run ``.py`` files and the default shell (specified by environment variable ``$SHELL``) to run ``.sh`` files. :param version: For Git-based projects, either a commit hash or a branch name. + :param experiment_name: Name of experiment under which to launch the run. :param experiment_id: ID of experiment under which to launch the run. - :param mode: Execution mode of the run: "local" or "databricks". - :param cluster_spec: When ``mode`` is "databricks", path to a JSON file containing a - `Databricks cluster specification - `_ - to use when launching a run. - :param git_username: Username for HTTP(S) authentication with Git. - :param git_password: Password for HTTP(S) authentication with Git. + :param backend: Execution backend for the run: "local" or "databricks". If running against + Databricks, will run against a Databricks workspace determined as follows: if + a Databricks tracking URI of the form ``databricks://profile`` has been set + (e.g. by setting the MLFLOW_TRACKING_URI environment variable), will run + against the workspace specified by . Otherwise, runs against the + workspace specified by the default Databricks CLI profile. + :param backend_config: A dictionary, or a path to a JSON file (must end in '.json'), which will + be passed as config to the backend. For the Databricks backend, this + should be a cluster spec: see `Databricks Cluster Specs for Jobs + `_ + for more information. :param use_conda: If True (the default), create a new Conda environment for the run and install project dependencies within that environment. Otherwise, run the project in the current environment without installing any project dependencies. - :param storage_dir: Used only if ``mode`` is "local". MLflow downloads artifacts from + :param storage_dir: Used only if ``backend`` is "local". MLflow downloads artifacts from distributed URIs passed to parameters of type ``path`` to subdirectories of ``storage_dir``. - :param block: Whether to block while waiting for a run to complete. Defaults to True. - Note that if ``block`` is False and mode is "local", this method will return, but - the current process will block when exiting until the local run completes. - If the current process is interrupted, any asynchronous runs launched via this - method will be terminated. + :param synchronous: Whether to block while waiting for a run to complete. Defaults to True. + Note that if ``synchronous`` is False and ``backend`` is "local", this + method will return, but the current process will block when exiting until + the local run completes. If the current process is interrupted, any + asynchronous runs launched via this method will be terminated. :param run_id: Note: this argument is used internally by the MLflow project APIs and should not be specified. If specified, the run ID will be used instead of creating a new run. :return: :py:class:`mlflow.projects.SubmittedRun` exposing information (e.g. run ID) about the launched run. """ + + cluster_spec_dict = backend_config + if (backend_config and type(backend_config) != dict + and os.path.splitext(backend_config)[-1] == ".json"): + with open(backend_config, 'r') as handle: + try: + cluster_spec_dict = json.load(handle) + except ValueError: + _logger.error( + "Error when attempting to load and parse JSON cluster spec from file %s", + backend_config) + raise + + if backend == "databricks": + mlflow.projects.databricks.before_run_validations(mlflow.get_tracking_uri(), backend_config) + + experiment_id = _resolve_experiment_id(experiment_name=experiment_name, + experiment_id=experiment_id) + submitted_run_obj = _run( - uri=uri, entry_point=entry_point, version=version, parameters=parameters, - experiment_id=experiment_id, mode=mode, cluster_spec=cluster_spec, - git_username=git_username, git_password=git_password, use_conda=use_conda, - storage_dir=storage_dir, block=block, run_id=run_id) - if block: + uri=uri, experiment_id=experiment_id, entry_point=entry_point, version=version, + parameters=parameters, backend=backend, backend_config=cluster_spec_dict, + use_conda=use_conda, storage_dir=storage_dir, synchronous=synchronous, run_id=run_id) + if synchronous: _wait_for(submitted_run_obj) return submitted_run_obj @@ -156,13 +277,13 @@ def _wait_for(submitted_run_obj): try: active_run = tracking.MlflowClient().get_run(run_id) if run_id is not None else None if submitted_run_obj.wait(): - eprint("=== Run (ID '%s') succeeded ===" % run_id) + _logger.info("=== Run (ID '%s') succeeded ===", run_id) _maybe_set_run_terminated(active_run, "FINISHED") else: _maybe_set_run_terminated(active_run, "FAILED") raise ExecutionException("Run (ID '%s') failed" % run_id) except KeyboardInterrupt: - eprint("=== Run (ID '%s') interrupted, cancelling run ===" % run_id) + _logger.error("=== Run (ID '%s') interrupted, cancelling run ===", run_id) submitted_run_obj.cancel() _maybe_set_run_terminated(active_run, "FAILED") raise @@ -174,7 +295,7 @@ def _parse_subdirectory(uri): subdirectory = '' parsed_uri = uri if '#' in uri: - subdirectory = uri[uri.find('#')+1:] + subdirectory = uri[uri.find('#') + 1:] parsed_uri = uri[:uri.find('#')] if subdirectory and '.' in subdirectory: raise ExecutionException("'.' is not allowed in project subdirectory paths.") @@ -187,17 +308,42 @@ def _get_storage_dir(storage_dir): return tempfile.mkdtemp(dir=storage_dir) +def _get_git_repo_url(work_dir): + from git import Repo + from git.exc import GitCommandError, InvalidGitRepositoryError + try: + repo = Repo(work_dir, search_parent_directories=True) + remote_urls = [remote.url for remote in repo.remotes] + if len(remote_urls) == 0: + return None + except GitCommandError: + return None + except InvalidGitRepositoryError: + return None + return remote_urls[0] + + def _expand_uri(uri): if _is_local_uri(uri): return os.path.abspath(uri) return uri +def _is_file_uri(uri): + """Returns True if the passed-in URI is a file:// URI.""" + return _FILE_URI_REGEX.match(uri) + + def _is_local_uri(uri): """Returns True if the passed-in URI should be interpreted as a path on the local filesystem.""" return not _GIT_URI_REGEX.match(uri) +def _is_zip_uri(uri): + """Returns True if the passed-in URI points to a ZIP file.""" + return _ZIP_URI_REGEX.match(uri) + + def _is_valid_branch_name(work_dir, version): """ Returns True if the ``version`` is the name of a branch in a Git project. @@ -214,54 +360,73 @@ def _is_valid_branch_name(work_dir, version): return False -def _fetch_project(uri, force_tempdir, version=None, git_username=None, git_password=None): +def _fetch_project(uri, force_tempdir, version=None): """ Fetch a project into a local directory, returning the path to the local project directory. :param force_tempdir: If True, will fetch the project into a temporary directory. Otherwise, - will fetch Git projects into a temporary directory but simply return the - path of local projects (i.e. perform a no-op for local projects). + will fetch ZIP or Git projects into a temporary directory but simply + return the path of local projects (i.e. perform a no-op for local + projects). """ parsed_uri, subdirectory = _parse_subdirectory(uri) - use_temp_dst_dir = force_tempdir or not _is_local_uri(parsed_uri) + use_temp_dst_dir = force_tempdir or _is_zip_uri(parsed_uri) or not _is_local_uri(parsed_uri) dst_dir = tempfile.mkdtemp() if use_temp_dst_dir else parsed_uri if use_temp_dst_dir: - eprint("=== Fetching project from %s into %s ===" % (uri, dst_dir)) - if _is_local_uri(uri): + _logger.info("=== Fetching project from %s into %s ===", uri, dst_dir) + if _is_zip_uri(parsed_uri): + if _is_file_uri(parsed_uri): + parsed_file_uri = urllib.parse.urlparse(urllib.parse.unquote(parsed_uri)) + parsed_uri = os.path.join(parsed_file_uri.netloc, parsed_file_uri.path) + _unzip_repo(zip_file=( + parsed_uri if _is_local_uri(parsed_uri) else _fetch_zip_repo(parsed_uri)), + dst_dir=dst_dir) + elif _is_local_uri(uri): if version is not None: raise ExecutionException("Setting a version is only supported for Git project URIs") if use_temp_dst_dir: dir_util.copy_tree(src=parsed_uri, dst=dst_dir) else: assert _GIT_URI_REGEX.match(parsed_uri), "Non-local URI %s should be a Git URI" % parsed_uri - _fetch_git_repo(parsed_uri, version, dst_dir, git_username, git_password) + _fetch_git_repo(parsed_uri, version, dst_dir) res = os.path.abspath(os.path.join(dst_dir, subdirectory)) if not os.path.exists(res): raise ExecutionException("Could not find subdirectory %s of %s" % (subdirectory, dst_dir)) return res -def _fetch_git_repo(uri, version, dst_dir, git_username, git_password): +def _unzip_repo(zip_file, dst_dir): + import zipfile + with zipfile.ZipFile(zip_file) as zip_in: + zip_in.extractall(dst_dir) + + +def _fetch_zip_repo(uri): + import requests + from io import BytesIO + # TODO (dbczumar): Replace HTTP resolution via ``requests.get`` with an invocation of + # ```mlflow.data.download_uri()`` when the API supports the same set of available stores as + # the artifact repository (Azure, FTP, etc). See the following issue: + # https://github.com/mlflow/mlflow/issues/763. + response = requests.get(uri) + try: + response.raise_for_status() + except requests.HTTPError as error: + raise ExecutionException("Unable to retrieve ZIP file. Reason: %s" % str(error)) + return BytesIO(response.content) + + +def _fetch_git_repo(uri, version, dst_dir): """ Clone the git repo at ``uri`` into ``dst_dir``, checking out commit ``version`` (or defaulting to the head commit of the repository's master branch if version is unspecified). - If ``git_username`` and ``git_password`` are specified, uses them to authenticate while fetching - the repo. Otherwise, assumes authentication parameters are specified by the environment, - e.g. by a Git credential helper. + Assumes authentication parameters are specified by the environment, e.g. by a Git credential + helper. """ # We defer importing git until the last moment, because the import requires that the git # executable is availble on the PATH, so we only want to fail if we actually need it. import git repo = git.Repo.init(dst_dir) origin = repo.create_remote("origin", uri) - git_args = [git_username, git_password] - if not (all(arg is not None for arg in git_args) or all(arg is None for arg in git_args)): - raise ExecutionException("Either both or neither of git_username and git_password must be " - "specified.") - if git_username: - git_credentials = "url=%s\nusername=%s\npassword=%s" % (uri, git_username, git_password) - repo.git.config("--local", "credential.helper", "cache") - process.exec_cmd(cmd=["git", "credential-cache", "store"], cwd=dst_dir, - cmd_stdin=git_credentials) origin.fetch() if version is not None: try: @@ -275,8 +440,10 @@ def _fetch_git_repo(uri, version, dst_dir, git_username, git_password): repo.heads.master.checkout() -def _get_conda_env_name(conda_env_path): +def _get_conda_env_name(conda_env_path, env_id=None): conda_env_contents = open(conda_env_path).read() if conda_env_path else "" + if env_id: + conda_env_contents += env_id return "mlflow-%s" % hashlib.sha1(conda_env_contents.encode("utf-8")).hexdigest() @@ -296,10 +463,16 @@ def _get_conda_bin_executable(executable_name): return executable_name -def _get_or_create_conda_env(conda_env_path): +def _get_or_create_conda_env(conda_env_path, env_id=None): """ Given a `Project`, creates a conda environment containing the project's dependencies if such a conda environment doesn't already exist. Returns the name of the conda environment. + :param conda_env_path: Path to a conda yaml file. + :param env_id: Optional string that is added to the contents of the yaml file before + calculating the hash. It can be used to distinguish environments that have the + same conda dependencies but are supposed to be different based on the context. + For example, when serving the model we may install additional dependencies to the + environment after the environment has been activated. """ conda_path = _get_conda_bin_executable("conda") try: @@ -313,9 +486,9 @@ def _get_or_create_conda_env(conda_env_path): "executable".format(conda_path, MLFLOW_CONDA_HOME)) (_, stdout, _) = process.exec_cmd([conda_path, "env", "list", "--json"]) env_names = [os.path.basename(env) for env in json.loads(stdout)['envs']] - project_env_name = _get_conda_env_name(conda_env_path) + project_env_name = _get_conda_env_name(conda_env_path, env_id) if project_env_name not in env_names: - eprint('=== Creating conda environment %s ===' % project_env_name) + _logger.info('=== Creating conda environment %s ===', project_env_name) if conda_env_path: process.exec_cmd([conda_path, "env", "create", "-n", project_env_name, "--file", conda_env_path], stream_output=True) @@ -332,34 +505,31 @@ def _maybe_set_run_terminated(active_run, status): """ if active_run is None: return - run_id = active_run.info.run_uuid + run_id = active_run.info.run_id cur_status = tracking.MlflowClient().get_run(run_id).info.status if RunStatus.is_terminated(cur_status): return tracking.MlflowClient().set_terminated(run_id, status) -def _get_entry_point_command(project, entry_point, parameters, conda_env_name, storage_dir): +def _get_entry_point_command(project, entry_point, parameters, storage_dir): """ Returns the shell command to execute in order to run the specified entry point. :param project: Project containing the target entry point :param entry_point: Entry point to run :param parameters: Parameters (dictionary) for the entry point command - :param conda_env_name: Name of conda environment to use for command execution, or None if no - conda environment should be used. :param storage_dir: Base local directory to use for downloading remote artifacts passed to arguments of type 'path'. If None, a temporary base directory is used. """ storage_dir_for_run = _get_storage_dir(storage_dir) - eprint("=== Created directory %s for downloading remote URIs passed to arguments of " - "type 'path' ===" % storage_dir_for_run) + _logger.info( + "=== Created directory %s for downloading remote URIs passed to arguments of" + " type 'path' ===", + storage_dir_for_run) commands = [] - if conda_env_name: - activate_path = _get_conda_bin_executable("activate") - commands.append("source %s %s" % (activate_path, conda_env_name)) commands.append( project.get_entry_point(entry_point).compute_command(parameters, storage_dir_for_run)) - return " && ".join(commands) + return commands def _run_entry_point(command, work_dir, experiment_id, run_id): @@ -372,8 +542,13 @@ def _run_entry_point(command, work_dir, experiment_id, run_id): """ env = os.environ.copy() env.update(_get_run_env_vars(run_id, experiment_id)) - eprint("=== Running command '%s' in run with ID '%s' === " % (command, run_id)) - process = subprocess.Popen(["bash", "-c", command], close_fds=True, cwd=work_dir, env=env) + _logger.info("=== Running command '%s' in run with ID '%s' === ", command, run_id) + # in case os name is not 'nt', we are not running on windows. It introduces + # bash command otherwise. + if os.name != "nt": + process = subprocess.Popen(["bash", "-c", command], close_fds=True, cwd=work_dir, env=env) + else: + process = subprocess.Popen(command, close_fds=True, cwd=work_dir, env=env) return LocalSubmittedRun(run_id, process) @@ -402,8 +577,13 @@ def _run_mlflow_run_cmd(mlflow_run_arr, env_map): final_env.update(env_map) # Launch `mlflow run` command as the leader of its own process group so that we can do a # best-effort cleanup of all its descendant processes if needed - return subprocess.Popen( - mlflow_run_arr, env=final_env, universal_newlines=True, preexec_fn=os.setsid) + if sys.platform == "win32": + return subprocess.Popen( + mlflow_run_arr, env=final_env, universal_newlines=True, + creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) + else: + return subprocess.Popen( + mlflow_run_arr, env=final_env, universal_newlines=True, preexec_fn=os.setsid) def _create_run(uri, experiment_id, work_dir, entry_point): @@ -416,12 +596,25 @@ def _create_run(uri, experiment_id, work_dir, entry_point): source_name = tracking.utils._get_git_url_if_present(_expand_uri(uri)) else: source_name = _expand_uri(uri) - active_run = tracking.MlflowClient().create_run( - experiment_id=experiment_id, - source_name=source_name, - source_version=_get_git_commit(work_dir), - entry_point_name=entry_point, - source_type=SourceType.PROJECT) + source_version = _get_git_commit(work_dir) + existing_run = fluent.active_run() + if existing_run: + parent_run_id = existing_run.info.run_id + else: + parent_run_id = None + + tags = { + MLFLOW_USER: _get_user(), + MLFLOW_SOURCE_NAME: source_name, + MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.PROJECT), + MLFLOW_PROJECT_ENTRY_POINT: entry_point + } + if source_version is not None: + tags[MLFLOW_GIT_COMMIT] = source_version + if parent_run_id is not None: + tags[MLFLOW_PARENT_RUN_ID] = parent_run_id + + active_run = tracking.MlflowClient().create_run(experiment_id=experiment_id, tags=tags) return active_run @@ -443,7 +636,7 @@ def _invoke_mlflow_run_subprocess( Run an MLflow project asynchronously by invoking ``mlflow run`` in a subprocess, returning a SubmittedRun that can be used to query run status. """ - eprint("=== Asynchronously launching MLflow run with ID %s ===" % run_id) + _logger.info("=== Asynchronously launching MLflow run with ID %s ===", run_id) mlflow_run_arr = _build_mlflow_run_cmd( uri=work_dir, entry_point=entry_point, storage_dir=storage_dir, use_conda=use_conda, run_id=run_id, parameters=parameters) @@ -451,6 +644,185 @@ def _invoke_mlflow_run_subprocess( mlflow_run_arr, _get_run_env_vars(run_id, experiment_id)) return LocalSubmittedRun(run_id, mlflow_run_subprocess) + +def _get_conda_command(conda_env_name): + activate_path = _get_conda_bin_executable("activate") + # in case os name is not 'nt', we are not running on windows. It introduces + # bash command otherwise. + if os.name != "nt": + return ["source %s %s" % (activate_path, conda_env_name)] + else: + return ["conda %s %s" % (activate_path, conda_env_name)] + + +def _validate_execution_environment(project, backend): + if project.docker_env and backend == "databricks": + raise ExecutionException( + "Running docker-based projects on Databricks is not yet supported.") + + +def _get_local_uri_or_none(uri): + if uri == "databricks": + return None, None + parsed_uri = urllib.parse.urlparse(uri) + if not parsed_uri.netloc and parsed_uri.scheme in ("", "file", "sqlite"): + path = urllib.request.url2pathname(parsed_uri.path) + if parsed_uri.scheme == "sqlite": + uri = path_to_local_sqlite_uri(_MLFLOW_DOCKER_TRACKING_DIR_PATH) + else: + uri = path_to_local_file_uri(_MLFLOW_DOCKER_TRACKING_DIR_PATH) + return path, uri + else: + return None, None + + +def _get_docker_command(image, active_run): + docker_path = "docker" + cmd = [docker_path, "run", "--rm"] + env_vars = _get_run_env_vars(run_id=active_run.info.run_id, + experiment_id=active_run.info.experiment_id) + tracking_uri = tracking.get_tracking_uri() + local_path, container_tracking_uri = _get_local_uri_or_none(tracking_uri) + if local_path is not None: + cmd += ["-v", "%s:%s" % (local_path, _MLFLOW_DOCKER_TRACKING_DIR_PATH)] + env_vars[tracking._TRACKING_URI_ENV_VAR] = container_tracking_uri + if tracking.utils._is_databricks_uri(tracking_uri): + db_profile = mlflow.tracking.utils.get_db_profile_from_uri(tracking_uri) + config = databricks_utils.get_databricks_host_creds(db_profile) + # We set these via environment variables so that only the current profile is exposed, rather + # than all profiles in ~/.databrickscfg; maybe better would be to mount the necessary + # part of ~/.databrickscfg into the container + env_vars[tracking._TRACKING_URI_ENV_VAR] = 'databricks' + env_vars['DATABRICKS_HOST'] = config.host + if config.username: + env_vars['DATABRICKS_USERNAME'] = config.username + if config.password: + env_vars['DATABRICKS_PASSWORD'] = config.password + if config.token: + env_vars['DATABRICKS_TOKEN'] = config.token + if config.ignore_tls_verification: + env_vars['DATABRICKS_INSECURE'] = config.ignore_tls_verification + + for key, value in env_vars.items(): + cmd += ["-e", "{key}={value}".format(key=key, value=value)] + cmd += [image.tags[0]] + return cmd + + +def _validate_docker_installation(): + """ + Verify if Docker is installed on host machine. + """ + try: + docker_path = "docker" + process.exec_cmd([docker_path, "--help"], throw_on_error=False) + except EnvironmentError: + raise ExecutionException("Could not find Docker executable. " + "Ensure Docker is installed as per the instructions " + "at https://docs.docker.com/install/overview/.") + + +def _validate_docker_env(project): + if not project.name: + raise ExecutionException("Project name in MLProject must be specified when using docker " + "for image tagging.") + if not project.docker_env.get('image'): + raise ExecutionException("Project with docker environment must specify the docker image " + "to use via an 'image' field under the 'docker_env' field.") + + +def _parse_kubernetes_config(backend_config): + """ + Creates build context tarfile containing Dockerfile and project code, returning path to tarfile + """ + if not backend_config: + raise ExecutionException("Backend_config file not found.") + kube_config = backend_config.copy() + if 'kube-job-template-path' not in backend_config.keys(): + raise ExecutionException("'kube-job-template-path' attribute must be specified in " + "backend_config.") + kube_job_template = backend_config['kube-job-template-path'] + if os.path.exists(kube_job_template): + with open(kube_job_template, 'r') as job_template: + yaml_obj = yaml.safe_load(job_template.read()) + kube_job_template = yaml_obj + kube_config['kube-job-template'] = kube_job_template + else: + raise ExecutionException("Could not find 'kube-job-template-path': {}".format( + kube_job_template)) + if 'kube-context' not in backend_config.keys(): + raise ExecutionException("Could not find kube-context in backend_config.") + if 'repository-uri' not in backend_config.keys(): + raise ExecutionException("Could not find 'repository-uri' in backend_config.") + return kube_config + + +def _create_docker_build_ctx(work_dir, dockerfile_contents): + """ + Creates build context tarfile containing Dockerfile and project code, returning path to tarfile + """ + directory = tempfile.mkdtemp() + try: + dst_path = os.path.join(directory, "mlflow-project-contents") + shutil.copytree(src=work_dir, dst=dst_path) + with open(os.path.join(dst_path, _GENERATED_DOCKERFILE_NAME), "w") as handle: + handle.write(dockerfile_contents) + _, result_path = tempfile.mkstemp() + file_utils.make_tarfile( + output_filename=result_path, + source_dir=dst_path, archive_name=_PROJECT_TAR_ARCHIVE_NAME) + finally: + shutil.rmtree(directory) + return result_path + + +def _build_docker_image(work_dir, repository_uri, base_image, run_id): + """ + Build a docker image containing the project in `work_dir`, using the base image. + """ + image_uri = _get_docker_image_uri(repository_uri=repository_uri, work_dir=work_dir) + dockerfile = ( + "FROM {imagename}\n" + "COPY {build_context_path}/ /mlflow/projects/code/\n" + "WORKDIR /mlflow/projects/code/\n" + ).format(imagename=base_image, build_context_path=_PROJECT_TAR_ARCHIVE_NAME) + build_ctx_path = _create_docker_build_ctx(work_dir, dockerfile) + with open(build_ctx_path, 'rb') as docker_build_ctx: + _logger.info("=== Building docker image %s ===", image_uri) + client = docker.from_env() + image, _ = client.images.build( + tag=image_uri, forcerm=True, + dockerfile=posixpath.join(_PROJECT_TAR_ARCHIVE_NAME, _GENERATED_DOCKERFILE_NAME), + fileobj=docker_build_ctx, custom_context=True, encoding="gzip") + try: + os.remove(build_ctx_path) + except Exception: # pylint: disable=broad-except + _logger.info("Temporary docker context file %s was not deleted.", build_ctx_path) + tracking.MlflowClient().set_tag(run_id, + MLFLOW_DOCKER_IMAGE_URI, + image_uri) + tracking.MlflowClient().set_tag(run_id, + MLFLOW_DOCKER_IMAGE_ID, + image.id) + return image + + +def _get_docker_image_uri(repository_uri, work_dir): + """ + Returns an appropriate Docker image URI for a project based on the git hash of the specified + working directory. + + :param repository_uri: The URI of the Docker repository with which to tag the image. The + repository URI is used as the prefix of the image URI. + :param work_dir: Path to the working directory in which to search for a git commit hash + """ + repository_uri = repository_uri if repository_uri else "docker-project" + # Optionally include first 7 digits of git SHA in tag name, if available. + git_commit = _get_git_commit(work_dir) + version_string = ":" + git_commit[:7] if git_commit else "" + return repository_uri + version_string + + __all__ = [ "run", "SubmittedRun" diff --git a/mlflow/projects/_project_spec.py b/mlflow/projects/_project_spec.py index 9748f6de74c26..cfe1f54ee58f9 100644 --- a/mlflow/projects/_project_spec.py +++ b/mlflow/projects/_project_spec.py @@ -8,6 +8,7 @@ from mlflow import data from mlflow.exceptions import ExecutionException +from mlflow.utils.file_utils import get_local_path_or_none MLPROJECT_FILE_NAME = "MLproject" @@ -22,29 +23,43 @@ def load_project(directory): yaml_obj = yaml.safe_load(mlproject_file.read()) else: yaml_obj = {} + project_name = yaml_obj.get("name") + if not project_name: + project_name = None + conda_path = yaml_obj.get("conda_env") + docker_env = yaml_obj.get("docker_env") + if docker_env and not docker_env.get("image"): + raise ExecutionException("Docker environment specified but no image " + "attribute found.") + if conda_path and docker_env: + raise ExecutionException("Project cannot contain both a docker and conda environment.") entry_points = {} for name, entry_point_yaml in yaml_obj.get("entry_points", {}).items(): parameters = entry_point_yaml.get("parameters", {}) command = entry_point_yaml.get("command") entry_points[name] = EntryPoint(name, parameters, command) - conda_path = yaml_obj.get("conda_env") if conda_path: conda_env_path = os.path.join(directory, conda_path) if not os.path.exists(conda_env_path): raise ExecutionException("Project specified conda environment file %s, but no such " "file was found." % conda_env_path) - return Project(conda_env_path=conda_env_path, entry_points=entry_points) + return Project(conda_env_path=conda_env_path, entry_points=entry_points, + docker_env=docker_env, name=project_name,) default_conda_path = os.path.join(directory, DEFAULT_CONDA_FILE_NAME) if os.path.exists(default_conda_path): - return Project(conda_env_path=default_conda_path, entry_points=entry_points) - return Project(conda_env_path=None, entry_points=entry_points) + return Project(conda_env_path=default_conda_path, entry_points=entry_points, + docker_env=docker_env, name=project_name) + return Project(conda_env_path=None, entry_points=entry_points, + docker_env=docker_env, name=project_name) class Project(object): """A project specification loaded from an MLproject file in the passed-in directory.""" - def __init__(self, conda_env_path, entry_points): + def __init__(self, conda_env_path, entry_points, docker_env, name): self.conda_env_path = conda_env_path self._entry_points = entry_points + self.docker_env = docker_env + self.name = name def get_entry_point(self, entry_point): if entry_point in self._entry_points: @@ -144,11 +159,12 @@ def _compute_uri_value(self, user_param_value): return user_param_value def _compute_path_value(self, user_param_value, storage_dir): - if not data.is_uri(user_param_value): - if not os.path.exists(user_param_value): + local_path = get_local_path_or_none(user_param_value) + if local_path: + if not os.path.exists(local_path): raise ExecutionException("Got value %s for parameter %s, but no such file or " "directory was found." % (user_param_value, self.name)) - return os.path.abspath(user_param_value) + return os.path.abspath(local_path) basename = os.path.basename(user_param_value) dest_path = os.path.join(storage_dir, basename) if dest_path != user_param_value: diff --git a/mlflow/projects/databricks.py b/mlflow/projects/databricks.py index e469624da0268..378e7b6762ef4 100644 --- a/mlflow/projects/databricks.py +++ b/mlflow/projects/databricks.py @@ -5,6 +5,7 @@ import tempfile import textwrap import time +import logging from six.moves import shlex_quote @@ -13,7 +14,6 @@ from mlflow.projects.submitted_run import SubmittedRun from mlflow.utils import rest_utils, file_utils, databricks_utils from mlflow.exceptions import ExecutionException -from mlflow.utils.logging_utils import eprint from mlflow import tracking from mlflow.utils.mlflow_tags import MLFLOW_DATABRICKS_RUN_URL, MLFLOW_DATABRICKS_SHELL_JOB_ID, \ MLFLOW_DATABRICKS_SHELL_JOB_RUN_ID, MLFLOW_DATABRICKS_WEBAPP_URL @@ -32,12 +32,16 @@ DBFS_EXPERIMENT_DIR_BASE = "mlflow-experiments" -def before_run_validations(tracking_uri, cluster_spec): +_logger = logging.getLogger(__name__) + + +def before_run_validations(tracking_uri, backend_config): """Validations to perform before running a project on Databricks.""" - if cluster_spec is None: - raise ExecutionException("Cluster spec must be provided when launching MLflow project " + if backend_config is None: + raise ExecutionException("Backend spec must be provided when launching MLflow project " "runs on Databricks.") - if tracking.utils._is_local_uri(tracking_uri): + if not tracking.utils._is_databricks_uri(tracking_uri) and \ + not tracking.utils._is_http_uri(tracking_uri): raise ExecutionException( "When running on Databricks, the MLflow tracking URI must be of the form " "'databricks' or 'databricks://profile', or a remote HTTP URI accessible to both the " @@ -70,7 +74,7 @@ def _upload_to_dbfs(self, src_path, dbfs_fuse_uri): Upload the file at `src_path` to the specified DBFS URI within the Databricks workspace corresponding to the default Databricks CLI profile. """ - eprint("=== Uploading project to DBFS path %s ===" % dbfs_fuse_uri) + _logger.info("=== Uploading project to DBFS path %s ===", dbfs_fuse_uri) http_endpoint = dbfs_fuse_uri with open(src_path, 'rb') as f: self._databricks_api_request(endpoint=http_endpoint, method='POST', data=f) @@ -109,7 +113,7 @@ def _upload_project_to_dbfs(self, project_dir, experiment_id): a directory containing an MLproject file). """ temp_tarfile_dir = tempfile.mkdtemp() - temp_tar_filename = file_utils.build_path(temp_tarfile_dir, "project.tar.gz") + temp_tar_filename = os.path.join(temp_tarfile_dir, "project.tar.gz") def custom_filter(x): return None if os.path.basename(x.name) == "mlruns" else x @@ -125,9 +129,9 @@ def custom_filter(x): dbfs_fuse_uri = os.path.join("/dbfs", dbfs_path) if not self._dbfs_path_exists(dbfs_path): self._upload_to_dbfs(temp_tar_filename, dbfs_fuse_uri) - eprint("=== Finished uploading project to %s ===" % dbfs_fuse_uri) + _logger.info("=== Finished uploading project to %s ===", dbfs_fuse_uri) else: - eprint("=== Project already exists in DBFS ===") + _logger.info("=== Project already exists in DBFS ===") finally: shutil.rmtree(temp_tarfile_dir) return dbfs_fuse_uri @@ -139,8 +143,7 @@ def _run_shell_command_job(self, project_uri, command, env_vars, cluster_spec): :param project_uri: URI of the project from which the shell command originates. :param command: Shell command to run. :param env_vars: Environment variables to set in the process running ``command``. - :param cluster_spec: Path to a JSON file containing a - `Databricks cluster specification + :param cluster_spec: Dictionary containing a `Databricks cluster specification `_ to use when launching a run. :return: ID of the Databricks job run. Can be used to query the run's status via the @@ -172,15 +175,8 @@ def run_databricks(self, uri, entry_point, work_dir, parameters, experiment_id, tracking._TRACKING_URI_ENV_VAR: tracking_uri, tracking._EXPERIMENT_ID_ENV_VAR: experiment_id, } - eprint("=== Running entry point %s of project %s on Databricks ===" % (entry_point, uri)) + _logger.info("=== Running entry point %s of project %s on Databricks ===", entry_point, uri) # Launch run on Databricks - with open(cluster_spec, 'r') as handle: - try: - cluster_spec = json.load(handle) - except ValueError: - eprint("Error when attempting to load and parse JSON cluster spec from file " - "%s. " % cluster_spec) - raise command = _get_databricks_run_cmd(dbfs_fuse_uri, run_id, entry_point, parameters) return self._run_shell_command_job(uri, command, env_vars, cluster_spec) @@ -269,7 +265,7 @@ def run_databricks(remote_run, uri, entry_point, work_dir, parameters, experimen used to query the run's status or wait for the resulting Databricks Job run to terminate. """ profile = tracking.utils.get_db_profile_from_uri(tracking.get_tracking_uri()) - run_id = remote_run.info.run_uuid + run_id = remote_run.info.run_id db_job_runner = DatabricksJobRunner(databricks_profile=profile) db_run_id = db_job_runner.run_databricks( uri, entry_point, work_dir, parameters, experiment_id, cluster_spec, run_id) @@ -298,11 +294,13 @@ def __init__(self, databricks_run_id, mlflow_run_id, databricks_job_runner): self._job_runner = databricks_job_runner def _print_description_and_log_tags(self): - eprint("=== Launched MLflow run as Databricks job run with ID %s. Getting run status " - "page URL... ===" % self._databricks_run_id) + _logger.info( + "=== Launched MLflow run as Databricks job run with ID %s." + " Getting run status page URL... ===", + self._databricks_run_id) run_info = self._job_runner.jobs_runs_get(self._databricks_run_id) jobs_page_url = run_info["run_page_url"] - eprint("=== Check the run's status at %s ===" % jobs_page_url) + _logger.info("=== Check the run's status at %s ===", jobs_page_url) host_creds = databricks_utils.get_databricks_host_creds(self._job_runner.databricks_profile) tracking.MlflowClient().set_tag(self._mlflow_run_id, MLFLOW_DATABRICKS_RUN_URL, jobs_page_url) diff --git a/mlflow/projects/kubernetes.py b/mlflow/projects/kubernetes.py new file mode 100644 index 0000000000000..24c7ab8444557 --- /dev/null +++ b/mlflow/projects/kubernetes.py @@ -0,0 +1,137 @@ +from __future__ import absolute_import +import logging +import docker +import time +from threading import RLock +import kubernetes +from datetime import datetime + +from mlflow.exceptions import ExecutionException +from mlflow.projects.submitted_run import SubmittedRun +from mlflow.entities import RunStatus + +_logger = logging.getLogger(__name__) + + +def push_image_to_registry(image_tag): + client = docker.from_env() + _logger.info("=== Pushing docker image %s ===", image_tag) + for line in client.images.push(repository=image_tag, stream=True, decode=True): + if 'error' in line and line['error']: + raise ExecutionException("Error while pushing to docker registry: " + "{error}".format(error=line['error'])) + return client.images.get_registry_data(image_tag).id + + +def _get_kubernetes_job_definition(project_name, image_tag, image_digest, + command, env_vars, job_template): + container_image = image_tag + '@' + image_digest + timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f') + job_name = "{}-{}".format(project_name, timestamp) + _logger.info("=== Creating Job %s ===", job_name) + environment_variables = [{'name': k, 'value': v} for k, v in env_vars.items()] + job_template['metadata']['name'] = job_name + job_template['spec']['template']['spec']['containers'][0]['name'] = project_name + job_template['spec']['template']['spec']['containers'][0]['image'] = container_image + job_template['spec']['template']['spec']['containers'][0]['command'] = command + if 'env' not in job_template['spec']['template']['spec']['containers'][0].keys(): + job_template['spec']['template']['spec']['containers'][0]['env'] = [] + job_template['spec']['template']['spec']['containers'][0]['env'] += environment_variables + return job_template + + +def _get_run_command(entrypoint_command): + formatted_command = [] + for cmd in entrypoint_command: + formatted_command = cmd.split(" ") + return formatted_command + + +def run_kubernetes_job(project_name, active_run, image_tag, image_digest, command, env_vars, + kube_context, job_template=None): + job_template = _get_kubernetes_job_definition(project_name, + image_tag, + image_digest, + _get_run_command(command), + env_vars, + job_template) + job_name = job_template['metadata']['name'] + job_namespace = job_template['metadata']['namespace'] + kubernetes.config.load_kube_config(context=kube_context) + api_instance = kubernetes.client.BatchV1Api() + api_instance.create_namespaced_job(namespace=job_namespace, + body=job_template, pretty=True) + return KubernetesSubmittedRun(active_run.info.run_id, job_name, job_namespace) + + +class KubernetesSubmittedRun(SubmittedRun): + """ + Instance of SubmittedRun corresponding to a Kubernetes Job run launched to run an MLflow + project. + :param mlflow_run_id: ID of the MLflow project run. + :param job_name: Kubernetes job name. + :param job_namespace: Kubernetes job namespace. + """ + # How often to poll run status when waiting on a run + POLL_STATUS_INTERVAL = 5 + + def __init__(self, mlflow_run_id, job_name, job_namespace): + super(KubernetesSubmittedRun, self).__init__() + self._mlflow_run_id = mlflow_run_id + self._job_name = job_name + self._job_namespace = job_namespace + self._status = RunStatus.SCHEDULED + self._status_lock = RLock() + + @property + def run_id(self): + return self._mlflow_run_id + + def wait(self): + kube_api = kubernetes.client.BatchV1Api() + while not RunStatus.is_terminated(self._update_status(kube_api)): + time.sleep(self.POLL_STATUS_INTERVAL) + + return self._status == RunStatus.FINISHED + + def _update_status(self, kube_api=kubernetes.client.BatchV1Api()): + api_response = kube_api.read_namespaced_job_status(name=self._job_name, + namespace=self._job_namespace, + pretty=True) + status = api_response.status + with self._status_lock: + if RunStatus.is_terminated(self._status): + return self._status + if self._status == RunStatus.SCHEDULED: + if api_response.status.start_time is None: + _logger.info("Waiting for Job to start") + else: + _logger.info("Job started.") + self._status = RunStatus.RUNNING + if status.conditions is not None: + for condition in status.conditions: + if condition.status == "True": + _logger.info(condition.message) + if condition.type == "Failed": + self._status = RunStatus.FAILED + elif condition.type == "Complete": + self._status = RunStatus.FINISHED + return self._status + + def get_status(self): + status = self._status + return status if RunStatus.is_terminated(status) else self._update_status() + + def cancel(self): + with self._status_lock: + if not RunStatus.is_terminated(self._status): + _logger.info("Cancelling job.") + kube_api = kubernetes.client.BatchV1Api() + kube_api.delete_namespaced_job(name=self._job_name, + namespace=self._job_namespace, + body=kubernetes.client.V1DeleteOptions(), + pretty=True) + self._status = RunStatus.KILLED + _logger.info("Job cancelled.") + else: + _logger.info("Attempting to cancel a job that is already terminated.") diff --git a/mlflow/projects/submitted_run.py b/mlflow/projects/submitted_run.py index 8d3f4d76a7d8f..3c9f2855fa517 100644 --- a/mlflow/projects/submitted_run.py +++ b/mlflow/projects/submitted_run.py @@ -2,9 +2,11 @@ import os import signal +import logging from mlflow.entities import RunStatus -from mlflow.utils.logging_utils import eprint + +_logger = logging.getLogger(__name__) class SubmittedRun(object): @@ -84,9 +86,10 @@ def cancel(self): except OSError: # The child process may have exited before we attempted to terminate it, so we # ignore OSErrors raised during child process termination - eprint("Failed to terminate child process (PID %s) corresponding to MLflow " - "run with ID %s. The process may have already " - "exited." % (self.command_proc.pid, self._run_id)) + _logger.info( + "Failed to terminate child process (PID %s) corresponding to MLflow " + "run with ID %s. The process may have already exited.", + self.command_proc.pid, self._run_id) self.command_proc.wait() def _get_status(self): diff --git a/mlflow/protos/databricks.proto b/mlflow/protos/databricks.proto index 332c3b5698eca..849a77424ef9a 100644 --- a/mlflow/protos/databricks.proto +++ b/mlflow/protos/databricks.proto @@ -1,15 +1,17 @@ import "google/protobuf/descriptor.proto"; import "scalapb/scalapb.proto"; +package mlflow; + option java_package = "com.databricks.api.proto.databricks"; option (scalapb.options).flat_package = true; -// Note: 50000 is the beginning of the range of proto extension values for use by applications. +// Note: 51310 is the beginning of the range of proto extension values for use by applications. extend google.protobuf.FieldOptions { // Indicates an overriding visibility for this field. This can only reduce the visibility; // a public field in an internal API will not have an effect. - optional Visibility visibility = 50000; + optional Visibility visibility = 51310; // This annotation indicates that certain fields must be supplied for the request to be carried // out successfully. @@ -17,7 +19,7 @@ extend google.protobuf.FieldOptions { // go from being optional to required, for backwards compatiblity reasons. // Request RPCs are validated automatically prior to processing for required fields, but // returned values are not validated in any way. - optional bool validate_required = 50001; + optional bool validate_required = 51311; // Causes the fields within the tagged Message to be inlined into this Message, for the purposes // of our JSON API. @@ -32,7 +34,7 @@ extend google.protobuf.FieldOptions { // "cluster_name" : "Foo" // } // Note that this is only applicable to singular Message fields. - optional bool json_inline = 50002; + optional bool json_inline = 51312; // Causes a field which conceptually represents a Map to be serialized as a JSON Map. // The given field must be a Message with exactly 2 fields called "key" and "value", where key @@ -41,14 +43,14 @@ extend google.protobuf.FieldOptions { // [ { "key" : "spark.speculation", "value" : "false" } ] // If this field were marked json_map, we would serialize it as // { "spark.speculation" : "false" } - optional bool json_map = 50003; + optional bool json_map = 51313; // The documentation meta data for this field. This gets added automatically when the proto is // parsed. // There are as many doc blocks as visibility levels. // This is not meant to be crafted by hand; this will be automatically generated when parsing // the proto file. - repeated DocumentationMetadata field_doc = 50004; + repeated DocumentationMetadata field_doc = 51314; } // Defines the set of options declared for every service RPC which are used to @@ -71,49 +73,49 @@ message DatabricksRpcOptions { optional string rpc_doc_title = 5; } -// Note: 50000 is the beginning of the range of proto extension values for use by applications. +// Note: 51310 is the beginning of the range of proto extension values for use by applications. extend google.protobuf.MethodOptions { - optional DatabricksRpcOptions rpc = 50000; + optional DatabricksRpcOptions rpc = 51310; // The documentation metadata. // This is not meant to be crafted by hand; this will be automatically generated when parsing // the proto file. - repeated DocumentationMetadata method_doc = 50004; // Same id everywhere + repeated DocumentationMetadata method_doc = 51314; // Same id everywhere } -// Note: 50000 is the beginning of the range of proto extension values for use by applications. +// Note: 51310 is the beginning of the range of proto extension values for use by applications. extend google.protobuf.MessageOptions { // The documentation metadata. // This is not meant to be crafted by hand; this will be automatically generated when parsing // the proto file. - repeated DocumentationMetadata message_doc = 50004; // Same id everywhere + repeated DocumentationMetadata message_doc = 51314; // Same id everywhere } -// Note: 50000 is the beginning of the range of proto extension values for use by applications. +// Note: 51310 is the beginning of the range of proto extension values for use by applications. extend google.protobuf.ServiceOptions { // The documentation metadata. // This is not meant to be crafted by hand; this will be automatically generated when parsing // the proto file. - repeated DocumentationMetadata service_doc = 50004; // Same id everywhere + repeated DocumentationMetadata service_doc = 51314; // Same id everywhere } -// Note: 50000 is the beginning of the range of proto extension values for use by applications. +// Note: 51310 is the beginning of the range of proto extension values for use by applications. extend google.protobuf.EnumOptions { // The documentation metadata. // This is not meant to be crafted by hand; this will be automatically generated when parsing // the proto file. - repeated DocumentationMetadata enum_doc = 50004; // Same id everywhere + repeated DocumentationMetadata enum_doc = 51314; // Same id everywhere } -// Note: 50000 is the beginning of the range of proto extension values for use by applications. +// Note: 51310 is the beginning of the range of proto extension values for use by applications. extend google.protobuf.EnumValueOptions { // Indicates an overriding visibility for this field. This can only reduce the visibility; // a public field in an internal API will not have an effect. - optional Visibility enum_value_visibility = 50000; + optional Visibility enum_value_visibility = 51310; // The documentation metadata. // This is not meant to be crafted by hand; this will be automatically generated when parsing // the proto file. - repeated DocumentationMetadata enum_value_doc = 50004; // Same id everywhere + repeated DocumentationMetadata enum_value_doc = 51314; // Same id everywhere } message HttpEndpoint { diff --git a/mlflow/protos/databricks_pb2.py b/mlflow/protos/databricks_pb2.py index c5ce94f30981e..0747b325c7d3d 100644 --- a/mlflow/protos/databricks_pb2.py +++ b/mlflow/protos/databricks_pb2.py @@ -19,16 +19,16 @@ DESCRIPTOR = _descriptor.FileDescriptor( name='databricks.proto', - package='', + package='mlflow', syntax='proto2', serialized_options=_b('\n#com.databricks.api.proto.databricks\342?\002\020\001'), - serialized_pb=_b('\n\x10\x64\x61tabricks.proto\x1a google/protobuf/descriptor.proto\x1a\x15scalapb/scalapb.proto\"\xb1\x01\n\x14\x44\x61tabricksRpcOptions\x12 \n\tendpoints\x18\x01 \x03(\x0b\x32\r.HttpEndpoint\x12\x1f\n\nvisibility\x18\x02 \x01(\x0e\x32\x0b.Visibility\x12\x1f\n\x0b\x65rror_codes\x18\x03 \x03(\x0e\x32\n.ErrorCode\x12\x1e\n\nrate_limit\x18\x04 \x01(\x0b\x32\n.RateLimit\x12\x15\n\rrpc_doc_title\x18\x05 \x01(\t\"N\n\x0cHttpEndpoint\x12\x14\n\x06method\x18\x01 \x01(\t:\x04POST\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\x1a\n\x05since\x18\x03 \x01(\x0b\x32\x0b.ApiVersion\"*\n\nApiVersion\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\"@\n\tRateLimit\x12\x11\n\tmax_burst\x18\x01 \x01(\x03\x12 \n\x18max_sustained_per_second\x18\x02 \x01(\x03\"\x8c\x01\n\x15\x44ocumentationMetadata\x12\x11\n\tdocstring\x18\x01 \x01(\t\x12\x10\n\x08lead_doc\x18\x02 \x01(\t\x12\x1f\n\nvisibility\x18\x03 \x01(\x0e\x32\x0b.Visibility\x12\x1b\n\x13original_proto_path\x18\x04 \x03(\t\x12\x10\n\x08position\x18\x05 \x01(\x05\"g\n\x1f\x44\x61tabricksServiceExceptionProto\x12\x1e\n\nerror_code\x18\x01 \x01(\x0e\x32\n.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x13\n\x0bstack_trace\x18\x03 \x01(\t*?\n\nVisibility\x12\n\n\x06PUBLIC\x10\x01\x12\x0c\n\x08INTERNAL\x10\x02\x12\x17\n\x13PUBLIC_UNDOCUMENTED\x10\x03*\xf6\x04\n\tErrorCode\x12\x12\n\x0eINTERNAL_ERROR\x10\x01\x12\x1b\n\x17TEMPORARILY_UNAVAILABLE\x10\x02\x12\x0c\n\x08IO_ERROR\x10\x03\x12\x0f\n\x0b\x42\x41\x44_REQUEST\x10\x04\x12\x1c\n\x17INVALID_PARAMETER_VALUE\x10\xe8\x07\x12\x17\n\x12\x45NDPOINT_NOT_FOUND\x10\xe9\x07\x12\x16\n\x11MALFORMED_REQUEST\x10\xea\x07\x12\x12\n\rINVALID_STATE\x10\xeb\x07\x12\x16\n\x11PERMISSION_DENIED\x10\xec\x07\x12\x15\n\x10\x46\x45\x41TURE_DISABLED\x10\xed\x07\x12\x1a\n\x15\x43USTOMER_UNAUTHORIZED\x10\xee\x07\x12\x1b\n\x16REQUEST_LIMIT_EXCEEDED\x10\xef\x07\x12\x1d\n\x18INVALID_STATE_TRANSITION\x10\xd1\x0f\x12\x1b\n\x16\x43OULD_NOT_ACQUIRE_LOCK\x10\xd2\x0f\x12\x1c\n\x17RESOURCE_ALREADY_EXISTS\x10\xb9\x17\x12\x1c\n\x17RESOURCE_DOES_NOT_EXIST\x10\xba\x17\x12\x13\n\x0eQUOTA_EXCEEDED\x10\xa1\x1f\x12\x1c\n\x17MAX_BLOCK_SIZE_EXCEEDED\x10\xa2\x1f\x12\x1b\n\x16MAX_READ_SIZE_EXCEEDED\x10\xa3\x1f\x12\x13\n\x0e\x44RY_RUN_FAILED\x10\x89\'\x12\x1c\n\x17RESOURCE_LIMIT_EXCEEDED\x10\x8a\'\x12\x18\n\x13\x44IRECTORY_NOT_EMPTY\x10\xf1.\x12\x18\n\x13\x44IRECTORY_PROTECTED\x10\xf2.\x12\x1f\n\x1aMAX_NOTEBOOK_SIZE_EXCEEDED\x10\xf3.:@\n\nvisibility\x12\x1d.google.protobuf.FieldOptions\x18\xd0\x86\x03 \x01(\x0e\x32\x0b.Visibility::\n\x11validate_required\x12\x1d.google.protobuf.FieldOptions\x18\xd1\x86\x03 \x01(\x08:4\n\x0bjson_inline\x12\x1d.google.protobuf.FieldOptions\x18\xd2\x86\x03 \x01(\x08:1\n\x08json_map\x12\x1d.google.protobuf.FieldOptions\x18\xd3\x86\x03 \x01(\x08:J\n\tfield_doc\x12\x1d.google.protobuf.FieldOptions\x18\xd4\x86\x03 \x03(\x0b\x32\x16.DocumentationMetadata:D\n\x03rpc\x12\x1e.google.protobuf.MethodOptions\x18\xd0\x86\x03 \x01(\x0b\x32\x15.DatabricksRpcOptions:L\n\nmethod_doc\x12\x1e.google.protobuf.MethodOptions\x18\xd4\x86\x03 \x03(\x0b\x32\x16.DocumentationMetadata:N\n\x0bmessage_doc\x12\x1f.google.protobuf.MessageOptions\x18\xd4\x86\x03 \x03(\x0b\x32\x16.DocumentationMetadata:N\n\x0bservice_doc\x12\x1f.google.protobuf.ServiceOptions\x18\xd4\x86\x03 \x03(\x0b\x32\x16.DocumentationMetadata:H\n\x08\x65num_doc\x12\x1c.google.protobuf.EnumOptions\x18\xd4\x86\x03 \x03(\x0b\x32\x16.DocumentationMetadata:O\n\x15\x65num_value_visibility\x12!.google.protobuf.EnumValueOptions\x18\xd0\x86\x03 \x01(\x0e\x32\x0b.Visibility:S\n\x0e\x65num_value_doc\x12!.google.protobuf.EnumValueOptions\x18\xd4\x86\x03 \x03(\x0b\x32\x16.DocumentationMetadataB*\n#com.databricks.api.proto.databricks\xe2?\x02\x10\x01') + serialized_pb=_b('\n\x10\x64\x61tabricks.proto\x12\x06mlflow\x1a google/protobuf/descriptor.proto\x1a\x15scalapb/scalapb.proto\"\xcd\x01\n\x14\x44\x61tabricksRpcOptions\x12\'\n\tendpoints\x18\x01 \x03(\x0b\x32\x14.mlflow.HttpEndpoint\x12&\n\nvisibility\x18\x02 \x01(\x0e\x32\x12.mlflow.Visibility\x12&\n\x0b\x65rror_codes\x18\x03 \x03(\x0e\x32\x11.mlflow.ErrorCode\x12%\n\nrate_limit\x18\x04 \x01(\x0b\x32\x11.mlflow.RateLimit\x12\x15\n\rrpc_doc_title\x18\x05 \x01(\t\"U\n\x0cHttpEndpoint\x12\x14\n\x06method\x18\x01 \x01(\t:\x04POST\x12\x0c\n\x04path\x18\x02 \x01(\t\x12!\n\x05since\x18\x03 \x01(\x0b\x32\x12.mlflow.ApiVersion\"*\n\nApiVersion\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\"@\n\tRateLimit\x12\x11\n\tmax_burst\x18\x01 \x01(\x03\x12 \n\x18max_sustained_per_second\x18\x02 \x01(\x03\"\x93\x01\n\x15\x44ocumentationMetadata\x12\x11\n\tdocstring\x18\x01 \x01(\t\x12\x10\n\x08lead_doc\x18\x02 \x01(\t\x12&\n\nvisibility\x18\x03 \x01(\x0e\x32\x12.mlflow.Visibility\x12\x1b\n\x13original_proto_path\x18\x04 \x03(\t\x12\x10\n\x08position\x18\x05 \x01(\x05\"n\n\x1f\x44\x61tabricksServiceExceptionProto\x12%\n\nerror_code\x18\x01 \x01(\x0e\x32\x11.mlflow.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x13\n\x0bstack_trace\x18\x03 \x01(\t*?\n\nVisibility\x12\n\n\x06PUBLIC\x10\x01\x12\x0c\n\x08INTERNAL\x10\x02\x12\x17\n\x13PUBLIC_UNDOCUMENTED\x10\x03*\xf6\x04\n\tErrorCode\x12\x12\n\x0eINTERNAL_ERROR\x10\x01\x12\x1b\n\x17TEMPORARILY_UNAVAILABLE\x10\x02\x12\x0c\n\x08IO_ERROR\x10\x03\x12\x0f\n\x0b\x42\x41\x44_REQUEST\x10\x04\x12\x1c\n\x17INVALID_PARAMETER_VALUE\x10\xe8\x07\x12\x17\n\x12\x45NDPOINT_NOT_FOUND\x10\xe9\x07\x12\x16\n\x11MALFORMED_REQUEST\x10\xea\x07\x12\x12\n\rINVALID_STATE\x10\xeb\x07\x12\x16\n\x11PERMISSION_DENIED\x10\xec\x07\x12\x15\n\x10\x46\x45\x41TURE_DISABLED\x10\xed\x07\x12\x1a\n\x15\x43USTOMER_UNAUTHORIZED\x10\xee\x07\x12\x1b\n\x16REQUEST_LIMIT_EXCEEDED\x10\xef\x07\x12\x1d\n\x18INVALID_STATE_TRANSITION\x10\xd1\x0f\x12\x1b\n\x16\x43OULD_NOT_ACQUIRE_LOCK\x10\xd2\x0f\x12\x1c\n\x17RESOURCE_ALREADY_EXISTS\x10\xb9\x17\x12\x1c\n\x17RESOURCE_DOES_NOT_EXIST\x10\xba\x17\x12\x13\n\x0eQUOTA_EXCEEDED\x10\xa1\x1f\x12\x1c\n\x17MAX_BLOCK_SIZE_EXCEEDED\x10\xa2\x1f\x12\x1b\n\x16MAX_READ_SIZE_EXCEEDED\x10\xa3\x1f\x12\x13\n\x0e\x44RY_RUN_FAILED\x10\x89\'\x12\x1c\n\x17RESOURCE_LIMIT_EXCEEDED\x10\x8a\'\x12\x18\n\x13\x44IRECTORY_NOT_EMPTY\x10\xf1.\x12\x18\n\x13\x44IRECTORY_PROTECTED\x10\xf2.\x12\x1f\n\x1aMAX_NOTEBOOK_SIZE_EXCEEDED\x10\xf3.:G\n\nvisibility\x12\x1d.google.protobuf.FieldOptions\x18\xee\x90\x03 \x01(\x0e\x32\x12.mlflow.Visibility::\n\x11validate_required\x12\x1d.google.protobuf.FieldOptions\x18\xef\x90\x03 \x01(\x08:4\n\x0bjson_inline\x12\x1d.google.protobuf.FieldOptions\x18\xf0\x90\x03 \x01(\x08:1\n\x08json_map\x12\x1d.google.protobuf.FieldOptions\x18\xf1\x90\x03 \x01(\x08:Q\n\tfield_doc\x12\x1d.google.protobuf.FieldOptions\x18\xf2\x90\x03 \x03(\x0b\x32\x1d.mlflow.DocumentationMetadata:K\n\x03rpc\x12\x1e.google.protobuf.MethodOptions\x18\xee\x90\x03 \x01(\x0b\x32\x1c.mlflow.DatabricksRpcOptions:S\n\nmethod_doc\x12\x1e.google.protobuf.MethodOptions\x18\xf2\x90\x03 \x03(\x0b\x32\x1d.mlflow.DocumentationMetadata:U\n\x0bmessage_doc\x12\x1f.google.protobuf.MessageOptions\x18\xf2\x90\x03 \x03(\x0b\x32\x1d.mlflow.DocumentationMetadata:U\n\x0bservice_doc\x12\x1f.google.protobuf.ServiceOptions\x18\xf2\x90\x03 \x03(\x0b\x32\x1d.mlflow.DocumentationMetadata:O\n\x08\x65num_doc\x12\x1c.google.protobuf.EnumOptions\x18\xf2\x90\x03 \x03(\x0b\x32\x1d.mlflow.DocumentationMetadata:V\n\x15\x65num_value_visibility\x12!.google.protobuf.EnumValueOptions\x18\xee\x90\x03 \x01(\x0e\x32\x12.mlflow.Visibility:Z\n\x0e\x65num_value_doc\x12!.google.protobuf.EnumValueOptions\x18\xf2\x90\x03 \x03(\x0b\x32\x1d.mlflow.DocumentationMetadataB*\n#com.databricks.api.proto.databricks\xe2?\x02\x10\x01') , dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,scalapb_dot_scalapb__pb2.DESCRIPTOR,]) _VISIBILITY = _descriptor.EnumDescriptor( name='Visibility', - full_name='Visibility', + full_name='mlflow.Visibility', filename=None, file=DESCRIPTOR, values=[ @@ -47,15 +47,15 @@ ], containing_type=None, serialized_options=None, - serialized_start=695, - serialized_end=758, + serialized_start=752, + serialized_end=815, ) _sym_db.RegisterEnumDescriptor(_VISIBILITY) Visibility = enum_type_wrapper.EnumTypeWrapper(_VISIBILITY) _ERRORCODE = _descriptor.EnumDescriptor( name='ErrorCode', - full_name='ErrorCode', + full_name='mlflow.ErrorCode', filename=None, file=DESCRIPTOR, values=[ @@ -158,8 +158,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=761, - serialized_end=1391, + serialized_start=818, + serialized_end=1448, ) _sym_db.RegisterEnumDescriptor(_ERRORCODE) @@ -192,98 +192,98 @@ DIRECTORY_PROTECTED = 6002 MAX_NOTEBOOK_SIZE_EXCEEDED = 6003 -VISIBILITY_FIELD_NUMBER = 50000 +VISIBILITY_FIELD_NUMBER = 51310 visibility = _descriptor.FieldDescriptor( - name='visibility', full_name='visibility', index=0, - number=50000, type=14, cpp_type=8, label=1, + name='visibility', full_name='mlflow.visibility', index=0, + number=51310, type=14, cpp_type=8, label=1, has_default_value=False, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -VALIDATE_REQUIRED_FIELD_NUMBER = 50001 +VALIDATE_REQUIRED_FIELD_NUMBER = 51311 validate_required = _descriptor.FieldDescriptor( - name='validate_required', full_name='validate_required', index=1, - number=50001, type=8, cpp_type=7, label=1, + name='validate_required', full_name='mlflow.validate_required', index=1, + number=51311, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -JSON_INLINE_FIELD_NUMBER = 50002 +JSON_INLINE_FIELD_NUMBER = 51312 json_inline = _descriptor.FieldDescriptor( - name='json_inline', full_name='json_inline', index=2, - number=50002, type=8, cpp_type=7, label=1, + name='json_inline', full_name='mlflow.json_inline', index=2, + number=51312, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -JSON_MAP_FIELD_NUMBER = 50003 +JSON_MAP_FIELD_NUMBER = 51313 json_map = _descriptor.FieldDescriptor( - name='json_map', full_name='json_map', index=3, - number=50003, type=8, cpp_type=7, label=1, + name='json_map', full_name='mlflow.json_map', index=3, + number=51313, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -FIELD_DOC_FIELD_NUMBER = 50004 +FIELD_DOC_FIELD_NUMBER = 51314 field_doc = _descriptor.FieldDescriptor( - name='field_doc', full_name='field_doc', index=4, - number=50004, type=11, cpp_type=10, label=3, + name='field_doc', full_name='mlflow.field_doc', index=4, + number=51314, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -RPC_FIELD_NUMBER = 50000 +RPC_FIELD_NUMBER = 51310 rpc = _descriptor.FieldDescriptor( - name='rpc', full_name='rpc', index=5, - number=50000, type=11, cpp_type=10, label=1, + name='rpc', full_name='mlflow.rpc', index=5, + number=51310, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -METHOD_DOC_FIELD_NUMBER = 50004 +METHOD_DOC_FIELD_NUMBER = 51314 method_doc = _descriptor.FieldDescriptor( - name='method_doc', full_name='method_doc', index=6, - number=50004, type=11, cpp_type=10, label=3, + name='method_doc', full_name='mlflow.method_doc', index=6, + number=51314, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -MESSAGE_DOC_FIELD_NUMBER = 50004 +MESSAGE_DOC_FIELD_NUMBER = 51314 message_doc = _descriptor.FieldDescriptor( - name='message_doc', full_name='message_doc', index=7, - number=50004, type=11, cpp_type=10, label=3, + name='message_doc', full_name='mlflow.message_doc', index=7, + number=51314, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -SERVICE_DOC_FIELD_NUMBER = 50004 +SERVICE_DOC_FIELD_NUMBER = 51314 service_doc = _descriptor.FieldDescriptor( - name='service_doc', full_name='service_doc', index=8, - number=50004, type=11, cpp_type=10, label=3, + name='service_doc', full_name='mlflow.service_doc', index=8, + number=51314, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -ENUM_DOC_FIELD_NUMBER = 50004 +ENUM_DOC_FIELD_NUMBER = 51314 enum_doc = _descriptor.FieldDescriptor( - name='enum_doc', full_name='enum_doc', index=9, - number=50004, type=11, cpp_type=10, label=3, + name='enum_doc', full_name='mlflow.enum_doc', index=9, + number=51314, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -ENUM_VALUE_VISIBILITY_FIELD_NUMBER = 50000 +ENUM_VALUE_VISIBILITY_FIELD_NUMBER = 51310 enum_value_visibility = _descriptor.FieldDescriptor( - name='enum_value_visibility', full_name='enum_value_visibility', index=10, - number=50000, type=14, cpp_type=8, label=1, + name='enum_value_visibility', full_name='mlflow.enum_value_visibility', index=10, + number=51310, type=14, cpp_type=8, label=1, has_default_value=False, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, serialized_options=None, file=DESCRIPTOR) -ENUM_VALUE_DOC_FIELD_NUMBER = 50004 +ENUM_VALUE_DOC_FIELD_NUMBER = 51314 enum_value_doc = _descriptor.FieldDescriptor( - name='enum_value_doc', full_name='enum_value_doc', index=11, - number=50004, type=11, cpp_type=10, label=3, + name='enum_value_doc', full_name='mlflow.enum_value_doc', index=11, + number=51314, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=True, extension_scope=None, @@ -292,41 +292,41 @@ _DATABRICKSRPCOPTIONS = _descriptor.Descriptor( name='DatabricksRpcOptions', - full_name='DatabricksRpcOptions', + full_name='mlflow.DatabricksRpcOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='endpoints', full_name='DatabricksRpcOptions.endpoints', index=0, + name='endpoints', full_name='mlflow.DatabricksRpcOptions.endpoints', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='visibility', full_name='DatabricksRpcOptions.visibility', index=1, + name='visibility', full_name='mlflow.DatabricksRpcOptions.visibility', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='error_codes', full_name='DatabricksRpcOptions.error_codes', index=2, + name='error_codes', full_name='mlflow.DatabricksRpcOptions.error_codes', index=2, number=3, type=14, cpp_type=8, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='rate_limit', full_name='DatabricksRpcOptions.rate_limit', index=3, + name='rate_limit', full_name='mlflow.DatabricksRpcOptions.rate_limit', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='rpc_doc_title', full_name='DatabricksRpcOptions.rpc_doc_title', index=4, + name='rpc_doc_title', full_name='mlflow.DatabricksRpcOptions.rpc_doc_title', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -344,34 +344,34 @@ extension_ranges=[], oneofs=[ ], - serialized_start=78, - serialized_end=255, + serialized_start=86, + serialized_end=291, ) _HTTPENDPOINT = _descriptor.Descriptor( name='HttpEndpoint', - full_name='HttpEndpoint', + full_name='mlflow.HttpEndpoint', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='method', full_name='HttpEndpoint.method', index=0, + name='method', full_name='mlflow.HttpEndpoint.method', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=True, default_value=_b("POST").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='path', full_name='HttpEndpoint.path', index=1, + name='path', full_name='mlflow.HttpEndpoint.path', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='since', full_name='HttpEndpoint.since', index=2, + name='since', full_name='mlflow.HttpEndpoint.since', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -389,27 +389,27 @@ extension_ranges=[], oneofs=[ ], - serialized_start=257, - serialized_end=335, + serialized_start=293, + serialized_end=378, ) _APIVERSION = _descriptor.Descriptor( name='ApiVersion', - full_name='ApiVersion', + full_name='mlflow.ApiVersion', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='major', full_name='ApiVersion.major', index=0, + name='major', full_name='mlflow.ApiVersion.major', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='minor', full_name='ApiVersion.minor', index=1, + name='minor', full_name='mlflow.ApiVersion.minor', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -427,27 +427,27 @@ extension_ranges=[], oneofs=[ ], - serialized_start=337, - serialized_end=379, + serialized_start=380, + serialized_end=422, ) _RATELIMIT = _descriptor.Descriptor( name='RateLimit', - full_name='RateLimit', + full_name='mlflow.RateLimit', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='max_burst', full_name='RateLimit.max_burst', index=0, + name='max_burst', full_name='mlflow.RateLimit.max_burst', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='max_sustained_per_second', full_name='RateLimit.max_sustained_per_second', index=1, + name='max_sustained_per_second', full_name='mlflow.RateLimit.max_sustained_per_second', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -465,48 +465,48 @@ extension_ranges=[], oneofs=[ ], - serialized_start=381, - serialized_end=445, + serialized_start=424, + serialized_end=488, ) _DOCUMENTATIONMETADATA = _descriptor.Descriptor( name='DocumentationMetadata', - full_name='DocumentationMetadata', + full_name='mlflow.DocumentationMetadata', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='docstring', full_name='DocumentationMetadata.docstring', index=0, + name='docstring', full_name='mlflow.DocumentationMetadata.docstring', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='lead_doc', full_name='DocumentationMetadata.lead_doc', index=1, + name='lead_doc', full_name='mlflow.DocumentationMetadata.lead_doc', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='visibility', full_name='DocumentationMetadata.visibility', index=2, + name='visibility', full_name='mlflow.DocumentationMetadata.visibility', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='original_proto_path', full_name='DocumentationMetadata.original_proto_path', index=3, + name='original_proto_path', full_name='mlflow.DocumentationMetadata.original_proto_path', index=3, number=4, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='position', full_name='DocumentationMetadata.position', index=4, + name='position', full_name='mlflow.DocumentationMetadata.position', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -524,34 +524,34 @@ extension_ranges=[], oneofs=[ ], - serialized_start=448, - serialized_end=588, + serialized_start=491, + serialized_end=638, ) _DATABRICKSSERVICEEXCEPTIONPROTO = _descriptor.Descriptor( name='DatabricksServiceExceptionProto', - full_name='DatabricksServiceExceptionProto', + full_name='mlflow.DatabricksServiceExceptionProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='error_code', full_name='DatabricksServiceExceptionProto.error_code', index=0, + name='error_code', full_name='mlflow.DatabricksServiceExceptionProto.error_code', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='message', full_name='DatabricksServiceExceptionProto.message', index=1, + name='message', full_name='mlflow.DatabricksServiceExceptionProto.message', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='stack_trace', full_name='DatabricksServiceExceptionProto.stack_trace', index=2, + name='stack_trace', full_name='mlflow.DatabricksServiceExceptionProto.stack_trace', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -569,8 +569,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=590, - serialized_end=693, + serialized_start=640, + serialized_end=750, ) _DATABRICKSRPCOPTIONS.fields_by_name['endpoints'].message_type = _HTTPENDPOINT @@ -605,42 +605,42 @@ DatabricksRpcOptions = _reflection.GeneratedProtocolMessageType('DatabricksRpcOptions', (_message.Message,), dict( DESCRIPTOR = _DATABRICKSRPCOPTIONS, __module__ = 'databricks_pb2' - # @@protoc_insertion_point(class_scope:DatabricksRpcOptions) + # @@protoc_insertion_point(class_scope:mlflow.DatabricksRpcOptions) )) _sym_db.RegisterMessage(DatabricksRpcOptions) HttpEndpoint = _reflection.GeneratedProtocolMessageType('HttpEndpoint', (_message.Message,), dict( DESCRIPTOR = _HTTPENDPOINT, __module__ = 'databricks_pb2' - # @@protoc_insertion_point(class_scope:HttpEndpoint) + # @@protoc_insertion_point(class_scope:mlflow.HttpEndpoint) )) _sym_db.RegisterMessage(HttpEndpoint) ApiVersion = _reflection.GeneratedProtocolMessageType('ApiVersion', (_message.Message,), dict( DESCRIPTOR = _APIVERSION, __module__ = 'databricks_pb2' - # @@protoc_insertion_point(class_scope:ApiVersion) + # @@protoc_insertion_point(class_scope:mlflow.ApiVersion) )) _sym_db.RegisterMessage(ApiVersion) RateLimit = _reflection.GeneratedProtocolMessageType('RateLimit', (_message.Message,), dict( DESCRIPTOR = _RATELIMIT, __module__ = 'databricks_pb2' - # @@protoc_insertion_point(class_scope:RateLimit) + # @@protoc_insertion_point(class_scope:mlflow.RateLimit) )) _sym_db.RegisterMessage(RateLimit) DocumentationMetadata = _reflection.GeneratedProtocolMessageType('DocumentationMetadata', (_message.Message,), dict( DESCRIPTOR = _DOCUMENTATIONMETADATA, __module__ = 'databricks_pb2' - # @@protoc_insertion_point(class_scope:DocumentationMetadata) + # @@protoc_insertion_point(class_scope:mlflow.DocumentationMetadata) )) _sym_db.RegisterMessage(DocumentationMetadata) DatabricksServiceExceptionProto = _reflection.GeneratedProtocolMessageType('DatabricksServiceExceptionProto', (_message.Message,), dict( DESCRIPTOR = _DATABRICKSSERVICEEXCEPTIONPROTO, __module__ = 'databricks_pb2' - # @@protoc_insertion_point(class_scope:DatabricksServiceExceptionProto) + # @@protoc_insertion_point(class_scope:mlflow.DatabricksServiceExceptionProto) )) _sym_db.RegisterMessage(DatabricksServiceExceptionProto) diff --git a/mlflow/protos/service.proto b/mlflow/protos/service.proto index 88e4ab4d7ed08..8cbff2fb52ad5 100644 --- a/mlflow/protos/service.proto +++ b/mlflow/protos/service.proto @@ -22,6 +22,10 @@ service MlflowService { rpc createExperiment (CreateExperiment) returns (CreateExperiment.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/experiments/create" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/experiments/create" since { major: 2, minor: 0 }, @@ -36,6 +40,10 @@ service MlflowService { rpc listExperiments (ListExperiments) returns (ListExperiments.Response) { option (rpc) = { endpoints: [{ + method: "GET", + path: "/mlflow/experiments/list" + since { major: 2, minor: 0 }, + }, { method: "GET", path: "/preview/mlflow/experiments/list" since { major: 2, minor: 0 }, @@ -46,10 +54,14 @@ service MlflowService { } // Get metadata for an experiment and a list of runs for the experiment. - // This RPC will work on deleted experiments. + // This method works on deleted experiments. rpc getExperiment (GetExperiment) returns (GetExperiment.Response) { option (rpc) = { endpoints: [{ + method: "GET", + path: "/mlflow/experiments/get" + since { major: 2, minor: 0 }, + }, { method: "GET", path: "/preview/mlflow/experiments/get" since { major: 2, minor: 0 }, @@ -59,12 +71,16 @@ service MlflowService { }; } - // Mark an experiment and associated runs, params, metrics, ... etc for deletion. + // Mark an experiment and associated metadata, runs, metrics, params, and tags for deletion. // If the experiment uses FileStore, artifacts associated with experiment are also deleted. // rpc deleteExperiment (DeleteExperiment) returns (DeleteExperiment.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/experiments/delete" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/experiments/delete" since { major: 2, minor: 0 }, @@ -75,7 +91,7 @@ service MlflowService { } // Restore an experiment marked for deletion. This also restores - // associated metadata, runs, metrics, and params. If experiment uses FileStore, underlying + // associated metadata, runs, metrics, params, and tags. If experiment uses FileStore, underlying // artifacts associated with experiment are also restored. // // Throws ``RESOURCE_DOES_NOT_EXIST`` if experiment was never created or was permanently deleted. @@ -83,6 +99,10 @@ service MlflowService { rpc restoreExperiment (RestoreExperiment) returns (RestoreExperiment.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/experiments/restore" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/experiments/restore" since { major: 2, minor: 0 }, @@ -92,11 +112,15 @@ service MlflowService { }; } - // Updates an experiment metadata. + // Update experiment metadata. // rpc updateExperiment (UpdateExperiment) returns (UpdateExperiment.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/experiments/update" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/experiments/update" since { major: 2, minor: 0 }, @@ -113,6 +137,10 @@ service MlflowService { rpc createRun(CreateRun) returns (CreateRun.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/runs/create" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/runs/create" since { major: 2, minor: 0 }, @@ -128,6 +156,10 @@ service MlflowService { rpc updateRun(UpdateRun) returns (UpdateRun.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/runs/update" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/runs/update" since { major: 2, minor: 0 }, @@ -138,15 +170,20 @@ service MlflowService { }; } - // This operation will mark the run for deletion. + // Mark a run for deletion. rpc deleteRun(DeleteRun) returns (DeleteRun.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/runs/delete" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/runs/delete" since { major: 2, minor: 0 }, }], visibility: PUBLIC, + rpc_doc_title: "Delete Run", }; } @@ -154,15 +191,19 @@ service MlflowService { rpc restoreRun(RestoreRun) returns (RestoreRun.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/runs/restore" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/runs/restore" since { major: 2, minor: 0 }, }], visibility: PUBLIC, + rpc_doc_title: "Restore Run", }; } - // Log a metric for a run. A metric is a key-value pair (string key, float value) with an // associated timestamp. Examples include the various metrics that represent ML model accuracy. // A metric can be logged multiple times. @@ -170,6 +211,10 @@ service MlflowService { rpc logMetric(LogMetric) returns (LogMetric.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/runs/log-metric" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/runs/log-metric" since { major: 2, minor: 0 }, @@ -187,6 +232,10 @@ service MlflowService { rpc logParam(LogParam) returns (LogParam.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/runs/log-parameter" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/runs/log-parameter" since { major: 2, minor: 0 }, @@ -203,6 +252,10 @@ service MlflowService { rpc setTag(SetTag) returns (SetTag.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/runs/set-tag" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/runs/set-tag" since { major: 2, minor: 0 }, @@ -213,50 +266,43 @@ service MlflowService { }; } - // Get metadata, params, tags, and metrics for a run. Only the last logged value for each metric - // is returned. + // Delete a tag on a run. Tags are run metadata that can be updated during a run and after + // a run completes. // - rpc getRun (GetRun) returns (GetRun.Response) { + rpc deleteTag(DeleteTag) returns (DeleteTag.Response) { option (rpc) = { endpoints: [{ - method: "GET", - path: "/preview/mlflow/runs/get" + method: "POST", + path: "/mlflow/runs/delete-tag" + since { major: 2, minor: 0 }, + }, { + method: "POST", + path: "/preview/mlflow/runs/delete-tag" since { major: 2, minor: 0 }, }], visibility: PUBLIC, - rpc_doc_title: "Get Run", + rpc_doc_title: "Delete Tag", }; } - // Get the value for a metric logged during a run. If the metric is logged more - // than once, returns the last logged value. - // - rpc getMetric (GetMetric) returns (GetMetric.Response) { + // Get metadata, metrics, params, and tags for a run. In the case where multiple metrics + // with the same key are logged for a run, return only the value with the latest timestamp. + // If there are multiple values with the latest timestamp, return the maximum of these values. + rpc getRun (GetRun) returns (GetRun.Response) { option (rpc) = { endpoints: [{ method: "GET", - path: "/preview/mlflow/metrics/get" + path: "/mlflow/runs/get" since { major: 2, minor: 0 }, - }], - - visibility: PUBLIC, - rpc_doc_title: "Get Metric", - }; - } - - // Get a param value. - // - rpc getParam (GetParam) returns (GetParam.Response) { - option (rpc) = { - endpoints: [{ + }, { method: "GET", - path: "/preview/mlflow/params/get" + path: "/preview/mlflow/runs/get" since { major: 2, minor: 0 }, }], visibility: PUBLIC, - rpc_doc_title: "Get Param", + rpc_doc_title: "Get Run", }; } @@ -266,6 +312,10 @@ service MlflowService { rpc searchRuns (SearchRuns) returns (SearchRuns.Response) { option (rpc) = { endpoints: [{ + method: "POST", + path: "/mlflow/runs/search" + since { major: 2, minor: 0 }, + }, { method: "POST", path: "/preview/mlflow/runs/search" since { major: 2, minor: 0 }, @@ -285,6 +335,10 @@ service MlflowService { rpc listArtifacts (ListArtifacts) returns (ListArtifacts.Response) { option (rpc) = { endpoints: [{ + method: "GET", + path: "/mlflow/artifacts/list" + since { major: 2, minor: 0 }, + }, { method: "GET", path: "/preview/mlflow/artifacts/list" since { major: 2, minor: 0 }, @@ -299,6 +353,10 @@ service MlflowService { rpc getMetricHistory (GetMetricHistory) returns (GetMetricHistory.Response) { option (rpc) = { endpoints: [{ + method: "GET", + path: "/mlflow/metrics/get-history" + since { major: 2, minor: 0 }, + }, { method: "GET", path: "/preview/mlflow/metrics/get-history" since { major: 2, minor: 0 }, @@ -307,6 +365,72 @@ service MlflowService { rpc_doc_title: "Get Metric History", }; } + + + // Log a batch of metrics, params, and tags for a run. + // If any data failed to be persisted, the server will respond with an error (non-200 status code). + // In case of error (due to internal server error or an invalid request), partial data may + // be written. + // + // You can write metrics, params, and tags in interleaving fashion, but within a given entity + // type are guaranteed to follow the order specified in the request body. That is, for an API + // request like + // + // .. code-block:: json + // + // { + // "run_id": "2a14ed5c6a87499199e0106c3501eab8", + // "metrics": [ + // {"key": "mae", "value": 2.5, "timestamp": 1552550804}, + // {"key": "rmse", "value": 2.7, "timestamp": 1552550804}, + // ], + // "params": [ + // {"key": "model_class", "value": "LogisticRegression"}, + // ] + // } + // + // the server is guaranteed to write metric "rmse" after "mae", though it may write param + // "model_class" before both metrics, after "mae", or after both metrics. + // + // The overwrite behavior for metrics, params, and tags is as follows: + // + // - Metrics: metric values are never overwritten. Logging a metric (key, value, timestamp) appends to the set of values for the metric with the provided key. + // + // - Tags: tag values can be overwritten by successive writes to the same tag key. That is, if multiple tag values with the same key are provided in the same API request, the last-provided tag value is written. Logging the same tag (key, value) is permitted - that is, logging a tag is idempotent. + // + // - Params: once written, param values cannot be changed (attempting to overwrite a param value will result in an error). However, logging the same param (key, value) is permitted - that is, logging a param is idempotent. + // + // Request Limits + // -------------- + // A single JSON-serialized API request may be up to 1 MB in size and contain: + // + // - No more than 1000 metrics, params, and tags in total + // - Up to 1000 metrics + // - Up to 100 params + // - Up to 100 tags + // + // For example, a valid request might contain 900 metrics, 50 params, and 50 tags, but logging + // 900 metrics, 50 params, and 51 tags is invalid. The following limits also apply + // to metric, param, and tag keys and values: + // + // - Metric, param, and tag keys can be up to 250 characters in length + // - Param and tag values can be up to 250 characters in length + // + rpc logBatch (LogBatch) returns (LogBatch.Response) { + option (rpc) = { + endpoints: [{ + method: "POST", + path: "/mlflow/runs/log-batch" + since { major: 2, minor: 0 }, + }, { + method: "POST", + path: "/preview/mlflow/runs/log-batch" + since { major: 2, minor: 0 }, + }], + visibility: PUBLIC, + rpc_doc_title: "Log Batch", + }; + } } // View type for ListExperiments query. @@ -367,6 +491,9 @@ message Metric { // The timestamp at which this metric was recorded. optional int64 timestamp = 3; + + // Step at which to log the metric. + optional int64 step = 4 [default = 0]; } // Param associated with a run. @@ -386,7 +513,7 @@ message Run { optional RunData data = 2; } -// Run data (metrics, params, etc). +// Run data (metrics, params, and tags). message RunData { // Run metrics. repeated Metric metrics = 1; @@ -408,21 +535,18 @@ message RunTag { // Metadata of a single run. message RunInfo { // Unique identifier for the run. + optional string run_id = 15; + + // [Deprecated, use run_id instead] Unique identifier for the run. This field will + // be removed in a future MLflow version. optional string run_uuid = 1; // The experiment ID. - optional int64 experiment_id = 2; - - // Human readable name that identifies this run. - optional string name = 3; - - // Source type. - optional SourceType source_type = 4; - - // Source identifier: GitHub URL, name of notebook, name of job, etc. - optional string source_name = 5; + optional string experiment_id = 2; // User who initiated the run. + // This field is deprecated as of MLflow 1.0, and will be removed in a future + // MLflow release. Use 'mlflow.user' tag instead. optional string user_id = 6; // Current status of the run. @@ -434,12 +558,6 @@ message RunInfo { // Unix timestamp of when the run ended in milliseconds. optional int64 end_time = 9; - // Git commit hash of the code used for the run. - optional string source_version = 10; - - // Name of the entry point for the run. - optional string entry_point_name = 11; - // URI of the directory where artifacts should be uploaded. // This can be a local path (starting with "/"), or a distributed file system (DFS) // path, like ``s3://bucket/directory`` or ``dbfs:/my/directory``. @@ -453,7 +571,7 @@ message RunInfo { // Experiment message Experiment { // Unique identifier for the experiment. - optional int64 experiment_id = 1; + optional string experiment_id = 1; // Human readable name that identifies the experiment. optional string name = 2; @@ -484,7 +602,7 @@ message CreateExperiment { message Response { // Unique identifier for the experiment. - optional int64 experiment_id = 1; + optional string experiment_id = 1; } } @@ -496,7 +614,7 @@ message ListExperiments { optional ViewType view_type = 1; message Response { - // All experiments + // All experiments. repeated Experiment experiments = 1; } } @@ -505,10 +623,10 @@ message GetExperiment { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; // ID of the associated experiment. - optional int64 experiment_id = 1 [(validate_required) = true]; + optional string experiment_id = 1 [(validate_required) = true]; message Response { - // Returns experiment details. + // Experiment details. optional Experiment experiment = 1; // All (max limit to be imposed) active runs associated with this experiment. @@ -520,7 +638,7 @@ message DeleteExperiment { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; // ID of the associated experiment. - optional int64 experiment_id = 1 [(validate_required) = true]; + optional string experiment_id = 1 [(validate_required) = true]; message Response { } @@ -529,8 +647,8 @@ message DeleteExperiment { message RestoreExperiment { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - // Identifier to get an experiment - optional int64 experiment_id = 1 [(validate_required) = true]; + // ID of the associated experiment. + optional string experiment_id = 1 [(validate_required) = true]; message Response { } @@ -540,9 +658,9 @@ message UpdateExperiment { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; // ID of the associated experiment. - optional int64 experiment_id = 1 [(validate_required) = true]; + optional string experiment_id = 1 [(validate_required) = true]; - // If provided, the experiment's name will be changed to this. The new name must be unique. + // If provided, the experiment's name is changed to the new name. The new name must be unique. optional string new_name = 2; message Response { @@ -553,36 +671,19 @@ message CreateRun { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; // ID of the associated experiment. - optional int64 experiment_id = 1; + optional string experiment_id = 1; // ID of the user executing the run. + // This field is deprecated as of MLflow 1.0, and will be removed in a future + // MLflow release. Use 'mlflow.user' tag instead. optional string user_id = 2; - // Human readable name for the run. - optional string run_name = 3; - - // Originating source for the run. - optional SourceType source_type = 4; - - // String descriptor for the run's source. For example, name or description of a notebook, or the - // URL or path to a project. - optional string source_name = 5; - - // Name of the project entry point associated with the current run, if any. - optional string entry_point_name = 6; - - // Unix timestamp of when the run started in milliseconds. + // Unix timestamp in milliseconds of when the run started. optional int64 start_time = 7; - // Git commit hash of the source code used to create run. - optional string source_version = 8; - // Additional metadata for run. repeated RunTag tags = 9; - // ID of the parent run which started this run. - optional string parent_run_id = 10; - message Response { // The newly created run. optional Run run = 1; @@ -592,13 +693,17 @@ message CreateRun { message UpdateRun { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - // ID of the run to update. - optional string run_uuid = 1 [(validate_required) = true]; + // ID of the run to update. Must be provided. + optional string run_id = 4; + + // [Deprecated, use run_id instead] ID of the run to update.. This field will + // be removed in a future MLflow version. + optional string run_uuid = 1; // Updated status of the run. optional RunStatus status = 2; - //Unix timestamp of when the run ended in milliseconds. + //Unix timestamp in milliseconds of when the run ended. optional int64 end_time = 3; message Response { @@ -610,6 +715,7 @@ message UpdateRun { message DeleteRun { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; + // ID of the run to delete. optional string run_id = 1 [(validate_required) = true]; message Response {} @@ -618,6 +724,7 @@ message DeleteRun { message RestoreRun { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; + // ID of the run to restore. optional string run_id = 1 [(validate_required) = true]; message Response {} @@ -627,8 +734,12 @@ message RestoreRun { message LogMetric { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - // ID of the run under which to log the metric. - optional string run_uuid = 1 [(validate_required) = true]; + // ID of the run under which to log the metric. Must be provided. + optional string run_id = 6; + + // [Deprecated, use run_id instead] ID of the run under which to log the metric. This field will + // be removed in a future MLflow version. + optional string run_uuid = 1; // Name of the metric. optional string key = 2 [(validate_required) = true]; @@ -639,15 +750,21 @@ message LogMetric { // Unix timestamp in milliseconds at the time metric was logged. optional int64 timestamp = 4 [(validate_required) = true]; - message Response { - } + // Step at which to log the metric + optional int64 step = 5 [default = 0]; + + message Response {} } message LogParam { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - // ID of the run under which to log the param. - optional string run_uuid = 1 [(validate_required) = true]; + // ID of the run under which to log the param. Must be provided. + optional string run_id = 4; + + // [Deprecated, use run_id instead] ID of the run under which to log the param. This field will + // be removed in a future MLflow version. + optional string run_uuid = 1; // Name of the param. Maximum size is 255 bytes. optional string key = 2 [(validate_required) = true]; @@ -662,8 +779,12 @@ message LogParam { message SetTag { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - // ID of the run under which to set the tag. - optional string run_uuid = 1 [(validate_required) = true]; + // ID of the run under which to log the tag. Must be provided. + optional string run_id = 4; + + // [Deprecated, use run_id instead] ID of the run under which to log the tag. This field will + // be removed in a future MLflow version. + optional string run_uuid = 1; // Name of the tag. Maximum size is 255 bytes. optional string key = 2 [(validate_required) = true]; @@ -675,127 +796,84 @@ message SetTag { } } -message GetRun { +message DeleteTag { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - // ID of the run to fetch. - optional string run_uuid = 1 [(validate_required) = true]; - - message Response { - // Run metadata (name, start time, etc) and data (metrics, params, etc). - optional Run run = 1; - } -} - -message GetMetric { - option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - - // ID of the run from which to retrieve the metric value. - optional string run_uuid = 1 [(validate_required) = true]; + // ID of the run that the tag was logged under. Must be provided. + optional string run_id = 1 [(validate_required) = true]; - // Name of the metric. - optional string metric_key = 2 [(validate_required) = true]; + // Name of the tag. Maximum size is 255 bytes. Must be provided. + optional string key = 2 [(validate_required) = true]; message Response { - // Latest reported value of the specified metric. - optional Metric metric = 1; } } -message GetParam { +message GetRun { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - // ID of the run from which to retrieve the param value. - optional string run_uuid = 1 [(validate_required) = true]; + // ID of the run to fetch. Must be provided. + optional string run_id = 2; - // Name of the param. - optional string param_name = 2 [(validate_required) = true]; + // [Deprecated, use run_id instead] ID of the run to fetch. This field will + // be removed in a future MLflow version. + optional string run_uuid = 1; message Response { - // Param key-value pair. - optional Param parameter = 1; - } -} - -message SearchExpression { - // A single search filter. - oneof expression { - // A metric search expression. - MetricSearchExpression metric = 1; - // A parameter search expression. - ParameterSearchExpression parameter = 2; - } -} - -message MetricSearchExpression { - // :ref:`mlflowMetric` key for search. - optional string key = 1; - - oneof clause { - // [Deprecated in 0.7.0, to be removed in future version] - // Float clause for comparison. Use 'double' instead. - FloatClause float = 2; - - // Double clause of comparison - DoubleClause double = 3; - } -} - -message ParameterSearchExpression { - // :ref:`mlflowParam` key for search. - optional string key = 1; - - oneof clause { - // String clause for comparison. - StringClause string = 2; + // Run metadata (name, start time, etc) and data (metrics, params, and tags). + optional Run run = 1; } } -message StringClause { - // OneOf ("==", "!=", "~") - optional string comparator = 1; - - // String value for comparison. - optional string value = 2; -} - -message FloatClause { - // OneOf (">", ">=", "==", "!=", "<=", "<") - optional string comparator = 1; - - // Float value for comparison. - optional float value = 2; -} - -message DoubleClause { - // OneOf (">", ">=", "==", "!=", "<=", "<") - optional string comparator = 1; - - // Float value for comparison. - optional double value = 2; -} - message SearchRuns { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; // List of experiment IDs to search over. - repeated int64 experiment_ids = 1; + repeated string experiment_ids = 1; - // Expressions describing runs (AND-ed together when filtering runs). - repeated SearchExpression anded_expressions = 2; + // A filter expression over params, metrics, and tags, that allows returning a subset of + // runs. The syntax is a subset of SQL that supports ANDing together binary operations + // between a param, metric, or tag and a constant. + // + // Example: ``metrics.rmse < 1 and params.model_class = 'LogisticRegression'`` + // + // You can select columns with special characters (hyphen, space, period, etc.) by using double quotes: + // ``metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'`` + // + // Supported operators are ``=``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``. + optional string filter = 4; + // Whether to display only active, only deleted, or all runs. + // Defaults to only active runs. optional ViewType run_view_type = 3 [default = ACTIVE_ONLY]; + // Maximum number of runs desired. Max threshold is 50000 + optional int32 max_results = 5 [default = 1000]; + + // List of columns to be ordered by, including attributes, params, metrics, and tags with an + // optional "DESC" or "ASC" annotation, where "ASC" is the default. + // Example: ["params.input DESC", "metrics.alpha ASC", "metrics.rmse"] + // Tiebreaks are done by start_time DESC followed by run_id for runs with the same start time + // (and this is the default ordering criterion if order_by is not provided). + repeated string order_by = 6; + + optional string page_token = 7; + message Response { // Runs that match the search criteria. repeated Run runs = 1; + optional string next_page_token = 2; } } message ListArtifacts { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - // ID of the run whose artifacts to list. + // ID of the run whose artifacts to list. Must be provided. + optional string run_id = 3; + + // [Deprecated, use run_id instead] ID of the run whose artifacts to list. This field will + // be removed in a future MLflow version. optional string run_uuid = 1; // Filter artifacts matching this path (a relative path from the root artifact directory). @@ -822,32 +900,38 @@ message FileInfo { optional int64 file_size = 3; } -message GetArtifact { +message GetMetricHistory { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - // ID of the run from which to fetch the artifact. + // ID of the run from which to fetch metric values. Must be provided. + optional string run_id = 3; + + // [Deprecated, use run_id instead] ID of the run from which to fetch metric values. This field + // will be removed in a future MLflow version. optional string run_uuid = 1; - // Path of the artifact to fetch (relative to the root artifact directory for the run). - optional string path = 2; + // Name of the metric. + optional string metric_key = 2 [(validate_required) = true]; message Response { - // Empty because the data of the file will be streamed back in the HTTP response. - // The response will have an HTTP status code of 404 if the artifact path is not found. + // All logged values for this metric. + repeated Metric metrics = 1; } } -message GetMetricHistory { +message LogBatch { option (scalapb.message).extends = "com.databricks.rpc.RPC[$this.Response]"; - - // ID of the run from which to fetch metric values. - optional string run_uuid = 1 [(validate_required) = true]; - - // Name of the metric. - optional string metric_key = 2 [(validate_required) = true]; - + // ID of the run to log under + optional string run_id = 1; + // Metrics to log. A single request can contain up to 1000 metrics, and up to 1000 + // metrics, params, and tags in total. + repeated Metric metrics = 2; + // Params to log. A single request can contain up to 100 params, and up to 1000 + // metrics, params, and tags in total. + repeated Param params = 3; + // Tags to log. A single request can contain up to 100 tags, and up to 1000 + // metrics, params, and tags in total. + repeated RunTag tags = 4; message Response { - // All logged values for this metric. - repeated Metric metrics = 1; } } diff --git a/mlflow/protos/service_pb2.py b/mlflow/protos/service_pb2.py index b110c4375bde8..a3be81d7f96e2 100644 --- a/mlflow/protos/service_pb2.py +++ b/mlflow/protos/service_pb2.py @@ -24,7 +24,7 @@ package='mlflow', syntax='proto2', serialized_options=_b('\n\024org.mlflow.api.proto\220\001\001\342?\002\020\001'), - serialized_pb=_b('\n\rservice.proto\x12\x06mlflow\x1a\x15scalapb/scalapb.proto\x1a\x10\x64\x61tabricks.proto\"7\n\x06Metric\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x11\n\ttimestamp\x18\x03 \x01(\x03\"#\n\x05Param\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"C\n\x03Run\x12\x1d\n\x04info\x18\x01 \x01(\x0b\x32\x0f.mlflow.RunInfo\x12\x1d\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x0f.mlflow.RunData\"g\n\x07RunData\x12\x1f\n\x07metrics\x18\x01 \x03(\x0b\x32\x0e.mlflow.Metric\x12\x1d\n\x06params\x18\x02 \x03(\x0b\x32\r.mlflow.Param\x12\x1c\n\x04tags\x18\x03 \x03(\x0b\x32\x0e.mlflow.RunTag\"$\n\x06RunTag\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb9\x02\n\x07RunInfo\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12\x15\n\rexperiment_id\x18\x02 \x01(\x03\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\'\n\x0bsource_type\x18\x04 \x01(\x0e\x32\x12.mlflow.SourceType\x12\x13\n\x0bsource_name\x18\x05 \x01(\t\x12\x0f\n\x07user_id\x18\x06 \x01(\t\x12!\n\x06status\x18\x07 \x01(\x0e\x32\x11.mlflow.RunStatus\x12\x12\n\nstart_time\x18\x08 \x01(\x03\x12\x10\n\x08\x65nd_time\x18\t \x01(\x03\x12\x16\n\x0esource_version\x18\n \x01(\t\x12\x18\n\x10\x65ntry_point_name\x18\x0b \x01(\t\x12\x14\n\x0c\x61rtifact_uri\x18\r \x01(\t\x12\x17\n\x0flifecycle_stage\x18\x0e \x01(\t\"\x96\x01\n\nExperiment\x12\x15\n\rexperiment_id\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x19\n\x11\x61rtifact_location\x18\x03 \x01(\t\x12\x17\n\x0flifecycle_stage\x18\x04 \x01(\t\x12\x18\n\x10last_update_time\x18\x05 \x01(\x03\x12\x15\n\rcreation_time\x18\x06 \x01(\x03\"\x91\x01\n\x10\x43reateExperiment\x12\x12\n\x04name\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x12\x19\n\x11\x61rtifact_location\x18\x02 \x01(\t\x1a!\n\x08Response\x12\x15\n\rexperiment_id\x18\x01 \x01(\x03:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x98\x01\n\x0fListExperiments\x12#\n\tview_type\x18\x01 \x01(\x0e\x32\x10.mlflow.ViewType\x1a\x33\n\x08Response\x12\'\n\x0b\x65xperiments\x18\x01 \x03(\x0b\x32\x12.mlflow.Experiment:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\xac\x01\n\rGetExperiment\x12\x1b\n\rexperiment_id\x18\x01 \x01(\x03\x42\x04\x88\xb5\x18\x01\x1aQ\n\x08Response\x12&\n\nexperiment\x18\x01 \x01(\x0b\x32\x12.mlflow.Experiment\x12\x1d\n\x04runs\x18\x02 \x03(\x0b\x32\x0f.mlflow.RunInfo:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"h\n\x10\x44\x65leteExperiment\x12\x1b\n\rexperiment_id\x18\x01 \x01(\x03\x42\x04\x88\xb5\x18\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"i\n\x11RestoreExperiment\x12\x1b\n\rexperiment_id\x18\x01 \x01(\x03\x42\x04\x88\xb5\x18\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"z\n\x10UpdateExperiment\x12\x1b\n\rexperiment_id\x18\x01 \x01(\x03\x42\x04\x88\xb5\x18\x01\x12\x10\n\x08new_name\x18\x02 \x01(\t\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\xd1\x02\n\tCreateRun\x12\x15\n\rexperiment_id\x18\x01 \x01(\x03\x12\x0f\n\x07user_id\x18\x02 \x01(\t\x12\x10\n\x08run_name\x18\x03 \x01(\t\x12\'\n\x0bsource_type\x18\x04 \x01(\x0e\x32\x12.mlflow.SourceType\x12\x13\n\x0bsource_name\x18\x05 \x01(\t\x12\x18\n\x10\x65ntry_point_name\x18\x06 \x01(\t\x12\x12\n\nstart_time\x18\x07 \x01(\x03\x12\x16\n\x0esource_version\x18\x08 \x01(\t\x12\x1c\n\x04tags\x18\t \x03(\x0b\x32\x0e.mlflow.RunTag\x12\x15\n\rparent_run_id\x18\n \x01(\t\x1a$\n\x08Response\x12\x18\n\x03run\x18\x01 \x01(\x0b\x32\x0b.mlflow.Run:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\xb4\x01\n\tUpdateRun\x12\x16\n\x08run_uuid\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x12!\n\x06status\x18\x02 \x01(\x0e\x32\x11.mlflow.RunStatus\x12\x10\n\x08\x65nd_time\x18\x03 \x01(\x03\x1a-\n\x08Response\x12!\n\x08run_info\x18\x01 \x01(\x0b\x32\x0f.mlflow.RunInfo:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"Z\n\tDeleteRun\x12\x14\n\x06run_id\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"[\n\nRestoreRun\x12\x14\n\x06run_id\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x9d\x01\n\tLogMetric\x12\x16\n\x08run_uuid\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x12\x11\n\x03key\x18\x02 \x01(\tB\x04\x88\xb5\x18\x01\x12\x13\n\x05value\x18\x03 \x01(\x01\x42\x04\x88\xb5\x18\x01\x12\x17\n\ttimestamp\x18\x04 \x01(\x03\x42\x04\x88\xb5\x18\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x83\x01\n\x08LogParam\x12\x16\n\x08run_uuid\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x12\x11\n\x03key\x18\x02 \x01(\tB\x04\x88\xb5\x18\x01\x12\x13\n\x05value\x18\x03 \x01(\tB\x04\x88\xb5\x18\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x81\x01\n\x06SetTag\x12\x16\n\x08run_uuid\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x12\x11\n\x03key\x18\x02 \x01(\tB\x04\x88\xb5\x18\x01\x12\x13\n\x05value\x18\x03 \x01(\tB\x04\x88\xb5\x18\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"s\n\x06GetRun\x12\x16\n\x08run_uuid\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x1a$\n\x08Response\x12\x18\n\x03run\x18\x01 \x01(\x0b\x32\x0b.mlflow.Run:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x96\x01\n\tGetMetric\x12\x16\n\x08run_uuid\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x12\x18\n\nmetric_key\x18\x02 \x01(\tB\x04\x88\xb5\x18\x01\x1a*\n\x08Response\x12\x1e\n\x06metric\x18\x01 \x01(\x0b\x32\x0e.mlflow.Metric:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x97\x01\n\x08GetParam\x12\x16\n\x08run_uuid\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x12\x18\n\nparam_name\x18\x02 \x01(\tB\x04\x88\xb5\x18\x01\x1a,\n\x08Response\x12 \n\tparameter\x18\x01 \x01(\x0b\x32\r.mlflow.Param:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x8a\x01\n\x10SearchExpression\x12\x30\n\x06metric\x18\x01 \x01(\x0b\x32\x1e.mlflow.MetricSearchExpressionH\x00\x12\x36\n\tparameter\x18\x02 \x01(\x0b\x32!.mlflow.ParameterSearchExpressionH\x00\x42\x0c\n\nexpression\"}\n\x16MetricSearchExpression\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05\x66loat\x18\x02 \x01(\x0b\x32\x13.mlflow.FloatClauseH\x00\x12&\n\x06\x64ouble\x18\x03 \x01(\x0b\x32\x14.mlflow.DoubleClauseH\x00\x42\x08\n\x06\x63lause\"Z\n\x19ParameterSearchExpression\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x06string\x18\x02 \x01(\x0b\x32\x14.mlflow.StringClauseH\x00\x42\x08\n\x06\x63lause\"1\n\x0cStringClause\x12\x12\n\ncomparator\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"0\n\x0b\x46loatClause\x12\x12\n\ncomparator\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02\"1\n\x0c\x44oubleClause\x12\x12\n\ncomparator\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01\"\xe3\x01\n\nSearchRuns\x12\x16\n\x0e\x65xperiment_ids\x18\x01 \x03(\x03\x12\x33\n\x11\x61nded_expressions\x18\x02 \x03(\x0b\x32\x18.mlflow.SearchExpression\x12\x34\n\rrun_view_type\x18\x03 \x01(\x0e\x32\x10.mlflow.ViewType:\x0b\x41\x43TIVE_ONLY\x1a%\n\x08Response\x12\x19\n\x04runs\x18\x01 \x03(\x0b\x32\x0b.mlflow.Run:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x9b\x01\n\rListArtifacts\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x1a=\n\x08Response\x12\x10\n\x08root_uri\x18\x01 \x01(\t\x12\x1f\n\x05\x66iles\x18\x02 \x03(\x0b\x32\x10.mlflow.FileInfo:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\";\n\x08\x46ileInfo\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0e\n\x06is_dir\x18\x02 \x01(\x08\x12\x11\n\tfile_size\x18\x03 \x01(\x03\"f\n\x0bGetArtifact\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x9e\x01\n\x10GetMetricHistory\x12\x16\n\x08run_uuid\x18\x01 \x01(\tB\x04\x88\xb5\x18\x01\x12\x18\n\nmetric_key\x18\x02 \x01(\tB\x04\x88\xb5\x18\x01\x1a+\n\x08Response\x12\x1f\n\x07metrics\x18\x01 \x03(\x0b\x32\x0e.mlflow.Metric:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]*6\n\x08ViewType\x12\x0f\n\x0b\x41\x43TIVE_ONLY\x10\x01\x12\x10\n\x0c\x44\x45LETED_ONLY\x10\x02\x12\x07\n\x03\x41LL\x10\x03*I\n\nSourceType\x12\x0c\n\x08NOTEBOOK\x10\x01\x12\x07\n\x03JOB\x10\x02\x12\x0b\n\x07PROJECT\x10\x03\x12\t\n\x05LOCAL\x10\x04\x12\x0c\n\x07UNKNOWN\x10\xe8\x07*M\n\tRunStatus\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0c\n\x08\x46INISHED\x10\x03\x12\n\n\x06\x46\x41ILED\x10\x04\x12\n\n\x06KILLED\x10\x05\x32\xae\x14\n\rMlflowService\x12\x9c\x01\n\x10\x63reateExperiment\x12\x18.mlflow.CreateExperiment\x1a!.mlflow.CreateExperiment.Response\"K\x82\xb5\x18G\n0\n\x04POST\x12\"/preview/mlflow/experiments/create\x1a\x04\x08\x02\x10\x00\x10\x01*\x11\x43reate Experiment\x12\x95\x01\n\x0flistExperiments\x12\x17.mlflow.ListExperiments\x1a .mlflow.ListExperiments.Response\"G\x82\xb5\x18\x43\n-\n\x03GET\x12 /preview/mlflow/experiments/list\x1a\x04\x08\x02\x10\x00\x10\x01*\x10List Experiments\x12\x8c\x01\n\rgetExperiment\x12\x15.mlflow.GetExperiment\x1a\x1e.mlflow.GetExperiment.Response\"D\x82\xb5\x18@\n,\n\x03GET\x12\x1f/preview/mlflow/experiments/get\x1a\x04\x08\x02\x10\x00\x10\x01*\x0eGet Experiment\x12\x9c\x01\n\x10\x64\x65leteExperiment\x12\x18.mlflow.DeleteExperiment\x1a!.mlflow.DeleteExperiment.Response\"K\x82\xb5\x18G\n0\n\x04POST\x12\"/preview/mlflow/experiments/delete\x1a\x04\x08\x02\x10\x00\x10\x01*\x11\x44\x65lete Experiment\x12\xa1\x01\n\x11restoreExperiment\x12\x19.mlflow.RestoreExperiment\x1a\".mlflow.RestoreExperiment.Response\"M\x82\xb5\x18I\n1\n\x04POST\x12#/preview/mlflow/experiments/restore\x1a\x04\x08\x02\x10\x00\x10\x01*\x12Restore Experiment\x12\x9c\x01\n\x10updateExperiment\x12\x18.mlflow.UpdateExperiment\x1a!.mlflow.UpdateExperiment.Response\"K\x82\xb5\x18G\n0\n\x04POST\x12\"/preview/mlflow/experiments/update\x1a\x04\x08\x02\x10\x00\x10\x01*\x11Update Experiment\x12y\n\tcreateRun\x12\x11.mlflow.CreateRun\x1a\x1a.mlflow.CreateRun.Response\"=\x82\xb5\x18\x39\n)\n\x04POST\x12\x1b/preview/mlflow/runs/create\x1a\x04\x08\x02\x10\x00\x10\x01*\nCreate Run\x12y\n\tupdateRun\x12\x11.mlflow.UpdateRun\x1a\x1a.mlflow.UpdateRun.Response\"=\x82\xb5\x18\x39\n)\n\x04POST\x12\x1b/preview/mlflow/runs/update\x1a\x04\x08\x02\x10\x00\x10\x01*\nUpdate Run\x12m\n\tdeleteRun\x12\x11.mlflow.DeleteRun\x1a\x1a.mlflow.DeleteRun.Response\"1\x82\xb5\x18-\n)\n\x04POST\x12\x1b/preview/mlflow/runs/delete\x1a\x04\x08\x02\x10\x00\x10\x01\x12q\n\nrestoreRun\x12\x12.mlflow.RestoreRun\x1a\x1b.mlflow.RestoreRun.Response\"2\x82\xb5\x18.\n*\n\x04POST\x12\x1c/preview/mlflow/runs/restore\x1a\x04\x08\x02\x10\x00\x10\x01\x12}\n\tlogMetric\x12\x11.mlflow.LogMetric\x1a\x1a.mlflow.LogMetric.Response\"A\x82\xb5\x18=\n-\n\x04POST\x12\x1f/preview/mlflow/runs/log-metric\x1a\x04\x08\x02\x10\x00\x10\x01*\nLog Metric\x12|\n\x08logParam\x12\x10.mlflow.LogParam\x1a\x19.mlflow.LogParam.Response\"C\x82\xb5\x18?\n0\n\x04POST\x12\"/preview/mlflow/runs/log-parameter\x1a\x04\x08\x02\x10\x00\x10\x01*\tLog Param\x12n\n\x06setTag\x12\x0e.mlflow.SetTag\x1a\x17.mlflow.SetTag.Response\";\x82\xb5\x18\x37\n*\n\x04POST\x12\x1c/preview/mlflow/runs/set-tag\x1a\x04\x08\x02\x10\x00\x10\x01*\x07Set Tag\x12i\n\x06getRun\x12\x0e.mlflow.GetRun\x1a\x17.mlflow.GetRun.Response\"6\x82\xb5\x18\x32\n%\n\x03GET\x12\x18/preview/mlflow/runs/get\x1a\x04\x08\x02\x10\x00\x10\x01*\x07Get Run\x12x\n\tgetMetric\x12\x11.mlflow.GetMetric\x1a\x1a.mlflow.GetMetric.Response\"<\x82\xb5\x18\x38\n(\n\x03GET\x12\x1b/preview/mlflow/metrics/get\x1a\x04\x08\x02\x10\x00\x10\x01*\nGet Metric\x12s\n\x08getParam\x12\x10.mlflow.GetParam\x1a\x19.mlflow.GetParam.Response\":\x82\xb5\x18\x36\n\'\n\x03GET\x12\x1a/preview/mlflow/params/get\x1a\x04\x08\x02\x10\x00\x10\x01*\tGet Param\x12\xa7\x01\n\nsearchRuns\x12\x12.mlflow.SearchRuns\x1a\x1b.mlflow.SearchRuns.Response\"h\x82\xb5\x18\x64\n)\n\x04POST\x12\x1b/preview/mlflow/runs/search\x1a\x04\x08\x02\x10\x00\n(\n\x03GET\x12\x1b/preview/mlflow/runs/search\x1a\x04\x08\x02\x10\x00\x10\x01*\x0bSearch Runs\x12\x8b\x01\n\rlistArtifacts\x12\x15.mlflow.ListArtifacts\x1a\x1e.mlflow.ListArtifacts.Response\"C\x82\xb5\x18?\n+\n\x03GET\x12\x1e/preview/mlflow/artifacts/list\x1a\x04\x08\x02\x10\x00\x10\x01*\x0eList Artifacts\x12\x9d\x01\n\x10getMetricHistory\x12\x18.mlflow.GetMetricHistory\x1a!.mlflow.GetMetricHistory.Response\"L\x82\xb5\x18H\n0\n\x03GET\x12#/preview/mlflow/metrics/get-history\x1a\x04\x08\x02\x10\x00\x10\x01*\x12Get Metric HistoryB\x1e\n\x14org.mlflow.api.proto\x90\x01\x01\xe2?\x02\x10\x01') + serialized_pb=_b('\n\rservice.proto\x12\x06mlflow\x1a\x15scalapb/scalapb.proto\x1a\x10\x64\x61tabricks.proto\"H\n\x06Metric\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x11\n\ttimestamp\x18\x03 \x01(\x03\x12\x0f\n\x04step\x18\x04 \x01(\x03:\x01\x30\"#\n\x05Param\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"C\n\x03Run\x12\x1d\n\x04info\x18\x01 \x01(\x0b\x32\x0f.mlflow.RunInfo\x12\x1d\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x0f.mlflow.RunData\"g\n\x07RunData\x12\x1f\n\x07metrics\x18\x01 \x03(\x0b\x32\x0e.mlflow.Metric\x12\x1d\n\x06params\x18\x02 \x03(\x0b\x32\r.mlflow.Param\x12\x1c\n\x04tags\x18\x03 \x03(\x0b\x32\x0e.mlflow.RunTag\"$\n\x06RunTag\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xcb\x01\n\x07RunInfo\x12\x0e\n\x06run_id\x18\x0f \x01(\t\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12\x15\n\rexperiment_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x06 \x01(\t\x12!\n\x06status\x18\x07 \x01(\x0e\x32\x11.mlflow.RunStatus\x12\x12\n\nstart_time\x18\x08 \x01(\x03\x12\x10\n\x08\x65nd_time\x18\t \x01(\x03\x12\x14\n\x0c\x61rtifact_uri\x18\r \x01(\t\x12\x17\n\x0flifecycle_stage\x18\x0e \x01(\t\"\x96\x01\n\nExperiment\x12\x15\n\rexperiment_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x19\n\x11\x61rtifact_location\x18\x03 \x01(\t\x12\x17\n\x0flifecycle_stage\x18\x04 \x01(\t\x12\x18\n\x10last_update_time\x18\x05 \x01(\x03\x12\x15\n\rcreation_time\x18\x06 \x01(\x03\"\x91\x01\n\x10\x43reateExperiment\x12\x12\n\x04name\x18\x01 \x01(\tB\x04\xf8\x86\x19\x01\x12\x19\n\x11\x61rtifact_location\x18\x02 \x01(\t\x1a!\n\x08Response\x12\x15\n\rexperiment_id\x18\x01 \x01(\t:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x98\x01\n\x0fListExperiments\x12#\n\tview_type\x18\x01 \x01(\x0e\x32\x10.mlflow.ViewType\x1a\x33\n\x08Response\x12\'\n\x0b\x65xperiments\x18\x01 \x03(\x0b\x32\x12.mlflow.Experiment:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\xac\x01\n\rGetExperiment\x12\x1b\n\rexperiment_id\x18\x01 \x01(\tB\x04\xf8\x86\x19\x01\x1aQ\n\x08Response\x12&\n\nexperiment\x18\x01 \x01(\x0b\x32\x12.mlflow.Experiment\x12\x1d\n\x04runs\x18\x02 \x03(\x0b\x32\x0f.mlflow.RunInfo:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"h\n\x10\x44\x65leteExperiment\x12\x1b\n\rexperiment_id\x18\x01 \x01(\tB\x04\xf8\x86\x19\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"i\n\x11RestoreExperiment\x12\x1b\n\rexperiment_id\x18\x01 \x01(\tB\x04\xf8\x86\x19\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"z\n\x10UpdateExperiment\x12\x1b\n\rexperiment_id\x18\x01 \x01(\tB\x04\xf8\x86\x19\x01\x12\x10\n\x08new_name\x18\x02 \x01(\t\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\xb8\x01\n\tCreateRun\x12\x15\n\rexperiment_id\x18\x01 \x01(\t\x12\x0f\n\x07user_id\x18\x02 \x01(\t\x12\x12\n\nstart_time\x18\x07 \x01(\x03\x12\x1c\n\x04tags\x18\t \x03(\x0b\x32\x0e.mlflow.RunTag\x1a$\n\x08Response\x12\x18\n\x03run\x18\x01 \x01(\x0b\x32\x0b.mlflow.Run:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\xbe\x01\n\tUpdateRun\x12\x0e\n\x06run_id\x18\x04 \x01(\t\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0e\x32\x11.mlflow.RunStatus\x12\x10\n\x08\x65nd_time\x18\x03 \x01(\x03\x1a-\n\x08Response\x12!\n\x08run_info\x18\x01 \x01(\x0b\x32\x0f.mlflow.RunInfo:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"Z\n\tDeleteRun\x12\x14\n\x06run_id\x18\x01 \x01(\tB\x04\xf8\x86\x19\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"[\n\nRestoreRun\x12\x14\n\x06run_id\x18\x01 \x01(\tB\x04\xf8\x86\x19\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\xb8\x01\n\tLogMetric\x12\x0e\n\x06run_id\x18\x06 \x01(\t\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12\x11\n\x03key\x18\x02 \x01(\tB\x04\xf8\x86\x19\x01\x12\x13\n\x05value\x18\x03 \x01(\x01\x42\x04\xf8\x86\x19\x01\x12\x17\n\ttimestamp\x18\x04 \x01(\x03\x42\x04\xf8\x86\x19\x01\x12\x0f\n\x04step\x18\x05 \x01(\x03:\x01\x30\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x8d\x01\n\x08LogParam\x12\x0e\n\x06run_id\x18\x04 \x01(\t\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12\x11\n\x03key\x18\x02 \x01(\tB\x04\xf8\x86\x19\x01\x12\x13\n\x05value\x18\x03 \x01(\tB\x04\xf8\x86\x19\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x8b\x01\n\x06SetTag\x12\x0e\n\x06run_id\x18\x04 \x01(\t\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12\x11\n\x03key\x18\x02 \x01(\tB\x04\xf8\x86\x19\x01\x12\x13\n\x05value\x18\x03 \x01(\tB\x04\xf8\x86\x19\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"m\n\tDeleteTag\x12\x14\n\x06run_id\x18\x01 \x01(\tB\x04\xf8\x86\x19\x01\x12\x11\n\x03key\x18\x02 \x01(\tB\x04\xf8\x86\x19\x01\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"}\n\x06GetRun\x12\x0e\n\x06run_id\x18\x02 \x01(\t\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x1a$\n\x08Response\x12\x18\n\x03run\x18\x01 \x01(\x0b\x32\x0b.mlflow.Run:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\x98\x02\n\nSearchRuns\x12\x16\n\x0e\x65xperiment_ids\x18\x01 \x03(\t\x12\x0e\n\x06\x66ilter\x18\x04 \x01(\t\x12\x34\n\rrun_view_type\x18\x03 \x01(\x0e\x32\x10.mlflow.ViewType:\x0b\x41\x43TIVE_ONLY\x12\x19\n\x0bmax_results\x18\x05 \x01(\x05:\x04\x31\x30\x30\x30\x12\x10\n\x08order_by\x18\x06 \x03(\t\x12\x12\n\npage_token\x18\x07 \x01(\t\x1a>\n\x08Response\x12\x19\n\x04runs\x18\x01 \x03(\x0b\x32\x0b.mlflow.Run\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\xab\x01\n\rListArtifacts\x12\x0e\n\x06run_id\x18\x03 \x01(\t\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x1a=\n\x08Response\x12\x10\n\x08root_uri\x18\x01 \x01(\t\x12\x1f\n\x05\x66iles\x18\x02 \x03(\x0b\x32\x10.mlflow.FileInfo:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\";\n\x08\x46ileInfo\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0e\n\x06is_dir\x18\x02 \x01(\x08\x12\x11\n\tfile_size\x18\x03 \x01(\x03\"\xa8\x01\n\x10GetMetricHistory\x12\x0e\n\x06run_id\x18\x03 \x01(\t\x12\x10\n\x08run_uuid\x18\x01 \x01(\t\x12\x18\n\nmetric_key\x18\x02 \x01(\tB\x04\xf8\x86\x19\x01\x1a+\n\x08Response\x12\x1f\n\x07metrics\x18\x01 \x03(\x0b\x32\x0e.mlflow.Metric:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]\"\xb1\x01\n\x08LogBatch\x12\x0e\n\x06run_id\x18\x01 \x01(\t\x12\x1f\n\x07metrics\x18\x02 \x03(\x0b\x32\x0e.mlflow.Metric\x12\x1d\n\x06params\x18\x03 \x03(\x0b\x32\r.mlflow.Param\x12\x1c\n\x04tags\x18\x04 \x03(\x0b\x32\x0e.mlflow.RunTag\x1a\n\n\x08Response:+\xe2?(\n&com.databricks.rpc.RPC[$this.Response]*6\n\x08ViewType\x12\x0f\n\x0b\x41\x43TIVE_ONLY\x10\x01\x12\x10\n\x0c\x44\x45LETED_ONLY\x10\x02\x12\x07\n\x03\x41LL\x10\x03*I\n\nSourceType\x12\x0c\n\x08NOTEBOOK\x10\x01\x12\x07\n\x03JOB\x10\x02\x12\x0b\n\x07PROJECT\x10\x03\x12\t\n\x05LOCAL\x10\x04\x12\x0c\n\x07UNKNOWN\x10\xe8\x07*M\n\tRunStatus\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tSCHEDULED\x10\x02\x12\x0c\n\x08\x46INISHED\x10\x03\x12\n\n\x06\x46\x41ILED\x10\x04\x12\n\n\x06KILLED\x10\x05\x32\xb3\x1a\n\rMlflowService\x12\xc6\x01\n\x10\x63reateExperiment\x12\x18.mlflow.CreateExperiment\x1a!.mlflow.CreateExperiment.Response\"u\xf2\x86\x19q\n(\n\x04POST\x12\x1a/mlflow/experiments/create\x1a\x04\x08\x02\x10\x00\n0\n\x04POST\x12\"/preview/mlflow/experiments/create\x1a\x04\x08\x02\x10\x00\x10\x01*\x11\x43reate Experiment\x12\xbc\x01\n\x0flistExperiments\x12\x17.mlflow.ListExperiments\x1a .mlflow.ListExperiments.Response\"n\xf2\x86\x19j\n%\n\x03GET\x12\x18/mlflow/experiments/list\x1a\x04\x08\x02\x10\x00\n-\n\x03GET\x12 /preview/mlflow/experiments/list\x1a\x04\x08\x02\x10\x00\x10\x01*\x10List Experiments\x12\xb2\x01\n\rgetExperiment\x12\x15.mlflow.GetExperiment\x1a\x1e.mlflow.GetExperiment.Response\"j\xf2\x86\x19\x66\n$\n\x03GET\x12\x17/mlflow/experiments/get\x1a\x04\x08\x02\x10\x00\n,\n\x03GET\x12\x1f/preview/mlflow/experiments/get\x1a\x04\x08\x02\x10\x00\x10\x01*\x0eGet Experiment\x12\xc6\x01\n\x10\x64\x65leteExperiment\x12\x18.mlflow.DeleteExperiment\x1a!.mlflow.DeleteExperiment.Response\"u\xf2\x86\x19q\n(\n\x04POST\x12\x1a/mlflow/experiments/delete\x1a\x04\x08\x02\x10\x00\n0\n\x04POST\x12\"/preview/mlflow/experiments/delete\x1a\x04\x08\x02\x10\x00\x10\x01*\x11\x44\x65lete Experiment\x12\xcc\x01\n\x11restoreExperiment\x12\x19.mlflow.RestoreExperiment\x1a\".mlflow.RestoreExperiment.Response\"x\xf2\x86\x19t\n)\n\x04POST\x12\x1b/mlflow/experiments/restore\x1a\x04\x08\x02\x10\x00\n1\n\x04POST\x12#/preview/mlflow/experiments/restore\x1a\x04\x08\x02\x10\x00\x10\x01*\x12Restore Experiment\x12\xc6\x01\n\x10updateExperiment\x12\x18.mlflow.UpdateExperiment\x1a!.mlflow.UpdateExperiment.Response\"u\xf2\x86\x19q\n(\n\x04POST\x12\x1a/mlflow/experiments/update\x1a\x04\x08\x02\x10\x00\n0\n\x04POST\x12\"/preview/mlflow/experiments/update\x1a\x04\x08\x02\x10\x00\x10\x01*\x11Update Experiment\x12\x9c\x01\n\tcreateRun\x12\x11.mlflow.CreateRun\x1a\x1a.mlflow.CreateRun.Response\"`\xf2\x86\x19\\\n!\n\x04POST\x12\x13/mlflow/runs/create\x1a\x04\x08\x02\x10\x00\n)\n\x04POST\x12\x1b/preview/mlflow/runs/create\x1a\x04\x08\x02\x10\x00\x10\x01*\nCreate Run\x12\x9c\x01\n\tupdateRun\x12\x11.mlflow.UpdateRun\x1a\x1a.mlflow.UpdateRun.Response\"`\xf2\x86\x19\\\n!\n\x04POST\x12\x13/mlflow/runs/update\x1a\x04\x08\x02\x10\x00\n)\n\x04POST\x12\x1b/preview/mlflow/runs/update\x1a\x04\x08\x02\x10\x00\x10\x01*\nUpdate Run\x12\x9c\x01\n\tdeleteRun\x12\x11.mlflow.DeleteRun\x1a\x1a.mlflow.DeleteRun.Response\"`\xf2\x86\x19\\\n!\n\x04POST\x12\x13/mlflow/runs/delete\x1a\x04\x08\x02\x10\x00\n)\n\x04POST\x12\x1b/preview/mlflow/runs/delete\x1a\x04\x08\x02\x10\x00\x10\x01*\nDelete Run\x12\xa2\x01\n\nrestoreRun\x12\x12.mlflow.RestoreRun\x1a\x1b.mlflow.RestoreRun.Response\"c\xf2\x86\x19_\n\"\n\x04POST\x12\x14/mlflow/runs/restore\x1a\x04\x08\x02\x10\x00\n*\n\x04POST\x12\x1c/preview/mlflow/runs/restore\x1a\x04\x08\x02\x10\x00\x10\x01*\x0bRestore Run\x12\xa4\x01\n\tlogMetric\x12\x11.mlflow.LogMetric\x1a\x1a.mlflow.LogMetric.Response\"h\xf2\x86\x19\x64\n%\n\x04POST\x12\x17/mlflow/runs/log-metric\x1a\x04\x08\x02\x10\x00\n-\n\x04POST\x12\x1f/preview/mlflow/runs/log-metric\x1a\x04\x08\x02\x10\x00\x10\x01*\nLog Metric\x12\xa6\x01\n\x08logParam\x12\x10.mlflow.LogParam\x1a\x19.mlflow.LogParam.Response\"m\xf2\x86\x19i\n(\n\x04POST\x12\x1a/mlflow/runs/log-parameter\x1a\x04\x08\x02\x10\x00\n0\n\x04POST\x12\"/preview/mlflow/runs/log-parameter\x1a\x04\x08\x02\x10\x00\x10\x01*\tLog Param\x12\x92\x01\n\x06setTag\x12\x0e.mlflow.SetTag\x1a\x17.mlflow.SetTag.Response\"_\xf2\x86\x19[\n\"\n\x04POST\x12\x14/mlflow/runs/set-tag\x1a\x04\x08\x02\x10\x00\n*\n\x04POST\x12\x1c/preview/mlflow/runs/set-tag\x1a\x04\x08\x02\x10\x00\x10\x01*\x07Set Tag\x12\xa4\x01\n\tdeleteTag\x12\x11.mlflow.DeleteTag\x1a\x1a.mlflow.DeleteTag.Response\"h\xf2\x86\x19\x64\n%\n\x04POST\x12\x17/mlflow/runs/delete-tag\x1a\x04\x08\x02\x10\x00\n-\n\x04POST\x12\x1f/preview/mlflow/runs/delete-tag\x1a\x04\x08\x02\x10\x00\x10\x01*\nDelete Tag\x12\x88\x01\n\x06getRun\x12\x0e.mlflow.GetRun\x1a\x17.mlflow.GetRun.Response\"U\xf2\x86\x19Q\n\x1d\n\x03GET\x12\x10/mlflow/runs/get\x1a\x04\x08\x02\x10\x00\n%\n\x03GET\x12\x18/preview/mlflow/runs/get\x1a\x04\x08\x02\x10\x00\x10\x01*\x07Get Run\x12\xcc\x01\n\nsearchRuns\x12\x12.mlflow.SearchRuns\x1a\x1b.mlflow.SearchRuns.Response\"\x8c\x01\xf2\x86\x19\x87\x01\n!\n\x04POST\x12\x13/mlflow/runs/search\x1a\x04\x08\x02\x10\x00\n)\n\x04POST\x12\x1b/preview/mlflow/runs/search\x1a\x04\x08\x02\x10\x00\n(\n\x03GET\x12\x1b/preview/mlflow/runs/search\x1a\x04\x08\x02\x10\x00\x10\x01*\x0bSearch Runs\x12\xb0\x01\n\rlistArtifacts\x12\x15.mlflow.ListArtifacts\x1a\x1e.mlflow.ListArtifacts.Response\"h\xf2\x86\x19\x64\n#\n\x03GET\x12\x16/mlflow/artifacts/list\x1a\x04\x08\x02\x10\x00\n+\n\x03GET\x12\x1e/preview/mlflow/artifacts/list\x1a\x04\x08\x02\x10\x00\x10\x01*\x0eList Artifacts\x12\xc7\x01\n\x10getMetricHistory\x12\x18.mlflow.GetMetricHistory\x1a!.mlflow.GetMetricHistory.Response\"v\xf2\x86\x19r\n(\n\x03GET\x12\x1b/mlflow/metrics/get-history\x1a\x04\x08\x02\x10\x00\n0\n\x03GET\x12#/preview/mlflow/metrics/get-history\x1a\x04\x08\x02\x10\x00\x10\x01*\x12Get Metric History\x12\x9e\x01\n\x08logBatch\x12\x10.mlflow.LogBatch\x1a\x19.mlflow.LogBatch.Response\"e\xf2\x86\x19\x61\n$\n\x04POST\x12\x16/mlflow/runs/log-batch\x1a\x04\x08\x02\x10\x00\n,\n\x04POST\x12\x1e/preview/mlflow/runs/log-batch\x1a\x04\x08\x02\x10\x00\x10\x01*\tLog BatchB\x1e\n\x14org.mlflow.api.proto\x90\x01\x01\xe2?\x02\x10\x01') , dependencies=[scalapb_dot_scalapb__pb2.DESCRIPTOR,databricks__pb2.DESCRIPTOR,]) @@ -49,8 +49,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4440, - serialized_end=4494, + serialized_start=3708, + serialized_end=3762, ) _sym_db.RegisterEnumDescriptor(_VIEWTYPE) @@ -84,8 +84,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4496, - serialized_end=4569, + serialized_start=3764, + serialized_end=3837, ) _sym_db.RegisterEnumDescriptor(_SOURCETYPE) @@ -119,8 +119,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4571, - serialized_end=4648, + serialized_start=3839, + serialized_end=3916, ) _sym_db.RegisterEnumDescriptor(_RUNSTATUS) @@ -169,6 +169,13 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='step', full_name='mlflow.Metric.step', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -182,7 +189,7 @@ oneofs=[ ], serialized_start=66, - serialized_end=121, + serialized_end=138, ) @@ -219,8 +226,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=123, - serialized_end=158, + serialized_start=140, + serialized_end=175, ) @@ -257,8 +264,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=160, - serialized_end=227, + serialized_start=177, + serialized_end=244, ) @@ -302,8 +309,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=229, - serialized_end=332, + serialized_start=246, + serialized_end=349, ) @@ -340,8 +347,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=334, - serialized_end=370, + serialized_start=351, + serialized_end=387, ) @@ -353,91 +360,63 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.RunInfo.run_uuid', index=0, - number=1, type=9, cpp_type=9, label=1, + name='run_id', full_name='mlflow.RunInfo.run_id', index=0, + number=15, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='experiment_id', full_name='mlflow.RunInfo.experiment_id', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='name', full_name='mlflow.RunInfo.name', index=2, - number=3, type=9, cpp_type=9, label=1, + name='run_uuid', full_name='mlflow.RunInfo.run_uuid', index=1, + number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='source_type', full_name='mlflow.RunInfo.source_type', index=3, - number=4, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='source_name', full_name='mlflow.RunInfo.source_name', index=4, - number=5, type=9, cpp_type=9, label=1, + name='experiment_id', full_name='mlflow.RunInfo.experiment_id', index=2, + number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='user_id', full_name='mlflow.RunInfo.user_id', index=5, + name='user_id', full_name='mlflow.RunInfo.user_id', index=3, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='status', full_name='mlflow.RunInfo.status', index=6, + name='status', full_name='mlflow.RunInfo.status', index=4, number=7, type=14, cpp_type=8, label=1, has_default_value=False, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='start_time', full_name='mlflow.RunInfo.start_time', index=7, + name='start_time', full_name='mlflow.RunInfo.start_time', index=5, number=8, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='end_time', full_name='mlflow.RunInfo.end_time', index=8, + name='end_time', full_name='mlflow.RunInfo.end_time', index=6, number=9, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='source_version', full_name='mlflow.RunInfo.source_version', index=9, - number=10, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='entry_point_name', full_name='mlflow.RunInfo.entry_point_name', index=10, - number=11, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='artifact_uri', full_name='mlflow.RunInfo.artifact_uri', index=11, + name='artifact_uri', full_name='mlflow.RunInfo.artifact_uri', index=7, number=13, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='lifecycle_stage', full_name='mlflow.RunInfo.lifecycle_stage', index=12, + name='lifecycle_stage', full_name='mlflow.RunInfo.lifecycle_stage', index=8, number=14, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -455,8 +434,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=373, - serialized_end=686, + serialized_start=390, + serialized_end=593, ) @@ -469,8 +448,8 @@ fields=[ _descriptor.FieldDescriptor( name='experiment_id', full_name='mlflow.Experiment.experiment_id', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -521,8 +500,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=689, - serialized_end=839, + serialized_start=596, + serialized_end=746, ) @@ -535,8 +514,8 @@ fields=[ _descriptor.FieldDescriptor( name='experiment_id', full_name='mlflow.CreateExperiment.Response.experiment_id', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -552,8 +531,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=942, + serialized_start=816, + serialized_end=849, ) _CREATEEXPERIMENT = _descriptor.Descriptor( @@ -569,7 +548,7 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='artifact_location', full_name='mlflow.CreateExperiment.artifact_location', index=1, number=2, type=9, cpp_type=9, label=1, @@ -589,8 +568,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=842, - serialized_end=987, + serialized_start=749, + serialized_end=894, ) @@ -620,8 +599,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1046, - serialized_end=1097, + serialized_start=953, + serialized_end=1004, ) _LISTEXPERIMENTS = _descriptor.Descriptor( @@ -650,8 +629,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=990, - serialized_end=1142, + serialized_start=897, + serialized_end=1049, ) @@ -688,8 +667,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1191, - serialized_end=1272, + serialized_start=1098, + serialized_end=1179, ) _GETEXPERIMENT = _descriptor.Descriptor( @@ -701,11 +680,11 @@ fields=[ _descriptor.FieldDescriptor( name='experiment_id', full_name='mlflow.GetExperiment.experiment_id', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), ], extensions=[ ], @@ -718,8 +697,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1145, - serialized_end=1317, + serialized_start=1052, + serialized_end=1224, ) @@ -742,8 +721,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=919, + serialized_start=816, + serialized_end=826, ) _DELETEEXPERIMENT = _descriptor.Descriptor( @@ -755,11 +734,11 @@ fields=[ _descriptor.FieldDescriptor( name='experiment_id', full_name='mlflow.DeleteExperiment.experiment_id', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), ], extensions=[ ], @@ -772,8 +751,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1319, - serialized_end=1423, + serialized_start=1226, + serialized_end=1330, ) @@ -796,8 +775,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=919, + serialized_start=816, + serialized_end=826, ) _RESTOREEXPERIMENT = _descriptor.Descriptor( @@ -809,11 +788,11 @@ fields=[ _descriptor.FieldDescriptor( name='experiment_id', full_name='mlflow.RestoreExperiment.experiment_id', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), ], extensions=[ ], @@ -826,8 +805,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1425, - serialized_end=1530, + serialized_start=1332, + serialized_end=1437, ) @@ -850,8 +829,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=919, + serialized_start=816, + serialized_end=826, ) _UPDATEEXPERIMENT = _descriptor.Descriptor( @@ -863,11 +842,11 @@ fields=[ _descriptor.FieldDescriptor( name='experiment_id', full_name='mlflow.UpdateExperiment.experiment_id', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_name', full_name='mlflow.UpdateExperiment.new_name', index=1, number=2, type=9, cpp_type=9, label=1, @@ -887,8 +866,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1532, - serialized_end=1654, + serialized_start=1439, + serialized_end=1561, ) @@ -918,8 +897,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1913, - serialized_end=1949, + serialized_start=1667, + serialized_end=1703, ) _CREATERUN = _descriptor.Descriptor( @@ -931,8 +910,8 @@ fields=[ _descriptor.FieldDescriptor( name='experiment_id', full_name='mlflow.CreateRun.experiment_id', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -944,61 +923,19 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='run_name', full_name='mlflow.CreateRun.run_name', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='source_type', full_name='mlflow.CreateRun.source_type', index=3, - number=4, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='source_name', full_name='mlflow.CreateRun.source_name', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='entry_point_name', full_name='mlflow.CreateRun.entry_point_name', index=5, - number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='start_time', full_name='mlflow.CreateRun.start_time', index=6, + name='start_time', full_name='mlflow.CreateRun.start_time', index=2, number=7, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='source_version', full_name='mlflow.CreateRun.source_version', index=7, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='tags', full_name='mlflow.CreateRun.tags', index=8, + name='tags', full_name='mlflow.CreateRun.tags', index=3, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='parent_run_id', full_name='mlflow.CreateRun.parent_run_id', index=9, - number=10, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -1011,8 +948,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1657, - serialized_end=1994, + serialized_start=1564, + serialized_end=1748, ) @@ -1042,8 +979,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2087, - serialized_end=2132, + serialized_start=1851, + serialized_end=1896, ) _UPDATERUN = _descriptor.Descriptor( @@ -1054,21 +991,28 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.UpdateRun.run_uuid', index=0, + name='run_id', full_name='mlflow.UpdateRun.run_id', index=0, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='run_uuid', full_name='mlflow.UpdateRun.run_uuid', index=1, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='status', full_name='mlflow.UpdateRun.status', index=1, + name='status', full_name='mlflow.UpdateRun.status', index=2, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='end_time', full_name='mlflow.UpdateRun.end_time', index=2, + name='end_time', full_name='mlflow.UpdateRun.end_time', index=3, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -1086,8 +1030,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1997, - serialized_end=2177, + serialized_start=1751, + serialized_end=1941, ) @@ -1110,8 +1054,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=919, + serialized_start=816, + serialized_end=826, ) _DELETERUN = _descriptor.Descriptor( @@ -1127,7 +1071,7 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), ], extensions=[ ], @@ -1140,8 +1084,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2179, - serialized_end=2269, + serialized_start=1943, + serialized_end=2033, ) @@ -1164,8 +1108,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=919, + serialized_start=816, + serialized_end=826, ) _RESTORERUN = _descriptor.Descriptor( @@ -1181,7 +1125,7 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), ], extensions=[ ], @@ -1194,8 +1138,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2271, - serialized_end=2362, + serialized_start=2035, + serialized_end=2126, ) @@ -1218,8 +1162,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=919, + serialized_start=816, + serialized_end=826, ) _LOGMETRIC = _descriptor.Descriptor( @@ -1230,33 +1174,47 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.LogMetric.run_uuid', index=0, + name='run_id', full_name='mlflow.LogMetric.run_id', index=0, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='run_uuid', full_name='mlflow.LogMetric.run_uuid', index=1, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='key', full_name='mlflow.LogMetric.key', index=1, + name='key', full_name='mlflow.LogMetric.key', index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='value', full_name='mlflow.LogMetric.value', index=2, + name='value', full_name='mlflow.LogMetric.value', index=3, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='timestamp', full_name='mlflow.LogMetric.timestamp', index=3, + name='timestamp', full_name='mlflow.LogMetric.timestamp', index=4, number=4, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='step', full_name='mlflow.LogMetric.step', index=5, + number=5, type=3, cpp_type=2, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -1269,8 +1227,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2365, - serialized_end=2522, + serialized_start=2129, + serialized_end=2313, ) @@ -1293,8 +1251,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=919, + serialized_start=816, + serialized_end=826, ) _LOGPARAM = _descriptor.Descriptor( @@ -1305,26 +1263,33 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.LogParam.run_uuid', index=0, + name='run_id', full_name='mlflow.LogParam.run_id', index=0, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='run_uuid', full_name='mlflow.LogParam.run_uuid', index=1, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='key', full_name='mlflow.LogParam.key', index=1, + name='key', full_name='mlflow.LogParam.key', index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='value', full_name='mlflow.LogParam.value', index=2, + name='value', full_name='mlflow.LogParam.value', index=3, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), ], extensions=[ ], @@ -1337,8 +1302,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2525, - serialized_end=2656, + serialized_start=2316, + serialized_end=2457, ) @@ -1361,8 +1326,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=919, + serialized_start=816, + serialized_end=826, ) _SETTAG = _descriptor.Descriptor( @@ -1373,26 +1338,33 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.SetTag.run_uuid', index=0, + name='run_id', full_name='mlflow.SetTag.run_id', index=0, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='run_uuid', full_name='mlflow.SetTag.run_uuid', index=1, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='key', full_name='mlflow.SetTag.key', index=1, + name='key', full_name='mlflow.SetTag.key', index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='value', full_name='mlflow.SetTag.value', index=2, + name='value', full_name='mlflow.SetTag.value', index=3, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), ], extensions=[ ], @@ -1405,25 +1377,18 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2659, - serialized_end=2788, + serialized_start=2460, + serialized_end=2599, ) -_GETRUN_RESPONSE = _descriptor.Descriptor( +_DELETETAG_RESPONSE = _descriptor.Descriptor( name='Response', - full_name='mlflow.GetRun.Response', + full_name='mlflow.DeleteTag.Response', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ - _descriptor.FieldDescriptor( - name='run', full_name='mlflow.GetRun.Response.run', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -1436,28 +1401,35 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1913, - serialized_end=1949, + serialized_start=816, + serialized_end=826, ) -_GETRUN = _descriptor.Descriptor( - name='GetRun', - full_name='mlflow.GetRun', +_DELETETAG = _descriptor.Descriptor( + name='DeleteTag', + full_name='mlflow.DeleteTag', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.GetRun.run_uuid', index=0, + name='run_id', full_name='mlflow.DeleteTag.run_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='key', full_name='mlflow.DeleteTag.key', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), ], extensions=[ ], - nested_types=[_GETRUN_RESPONSE, ], + nested_types=[_DELETETAG_RESPONSE, ], enum_types=[ ], serialized_options=_b('\342?(\n&com.databricks.rpc.RPC[$this.Response]'), @@ -1466,20 +1438,20 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2790, - serialized_end=2905, + serialized_start=2601, + serialized_end=2710, ) -_GETMETRIC_RESPONSE = _descriptor.Descriptor( +_GETRUN_RESPONSE = _descriptor.Descriptor( name='Response', - full_name='mlflow.GetMetric.Response', + full_name='mlflow.GetRun.Response', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='metric', full_name='mlflow.GetMetric.Response.metric', index=0, + name='run', full_name='mlflow.GetRun.Response.run', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -1497,35 +1469,35 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2971, - serialized_end=3013, + serialized_start=1667, + serialized_end=1703, ) -_GETMETRIC = _descriptor.Descriptor( - name='GetMetric', - full_name='mlflow.GetMetric', +_GETRUN = _descriptor.Descriptor( + name='GetRun', + full_name='mlflow.GetRun', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.GetMetric.run_uuid', index=0, - number=1, type=9, cpp_type=9, label=1, + name='run_id', full_name='mlflow.GetRun.run_id', index=0, + number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='metric_key', full_name='mlflow.GetMetric.metric_key', index=1, - number=2, type=9, cpp_type=9, label=1, + name='run_uuid', full_name='mlflow.GetRun.run_uuid', index=1, + number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], - nested_types=[_GETMETRIC_RESPONSE, ], + nested_types=[_GETRUN_RESPONSE, ], enum_types=[ ], serialized_options=_b('\342?(\n&com.databricks.rpc.RPC[$this.Response]'), @@ -1534,22 +1506,29 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2908, - serialized_end=3058, + serialized_start=2712, + serialized_end=2837, ) -_GETPARAM_RESPONSE = _descriptor.Descriptor( +_SEARCHRUNS_RESPONSE = _descriptor.Descriptor( name='Response', - full_name='mlflow.GetParam.Response', + full_name='mlflow.SearchRuns.Response', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='parameter', full_name='mlflow.GetParam.Response.parameter', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + name='runs', full_name='mlflow.SearchRuns.Response.runs', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='mlflow.SearchRuns.Response.next_page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -1565,35 +1544,63 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3123, - serialized_end=3167, + serialized_start=3013, + serialized_end=3075, ) -_GETPARAM = _descriptor.Descriptor( - name='GetParam', - full_name='mlflow.GetParam', +_SEARCHRUNS = _descriptor.Descriptor( + name='SearchRuns', + full_name='mlflow.SearchRuns', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.GetParam.run_uuid', index=0, - number=1, type=9, cpp_type=9, label=1, + name='experiment_ids', full_name='mlflow.SearchRuns.experiment_ids', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='filter', full_name='mlflow.SearchRuns.filter', index=1, + number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='param_name', full_name='mlflow.GetParam.param_name', index=1, - number=2, type=9, cpp_type=9, label=1, + name='run_view_type', full_name='mlflow.SearchRuns.run_view_type', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='max_results', full_name='mlflow.SearchRuns.max_results', index=3, + number=5, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=1000, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='order_by', full_name='mlflow.SearchRuns.order_by', index=4, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page_token', full_name='mlflow.SearchRuns.page_token', index=5, + number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], - nested_types=[_GETPARAM_RESPONSE, ], + nested_types=[_SEARCHRUNS_RESPONSE, ], enum_types=[ ], serialized_options=_b('\342?(\n&com.databricks.rpc.RPC[$this.Response]'), @@ -1602,29 +1609,29 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3061, - serialized_end=3212, + serialized_start=2840, + serialized_end=3120, ) -_SEARCHEXPRESSION = _descriptor.Descriptor( - name='SearchExpression', - full_name='mlflow.SearchExpression', +_LISTARTIFACTS_RESPONSE = _descriptor.Descriptor( + name='Response', + full_name='mlflow.ListArtifacts.Response', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='metric', full_name='mlflow.SearchExpression.metric', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + name='root_uri', full_name='mlflow.ListArtifacts.Response.root_uri', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='parameter', full_name='mlflow.SearchExpression.parameter', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + name='files', full_name='mlflow.ListArtifacts.Response.files', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -1639,81 +1646,81 @@ syntax='proto2', extension_ranges=[], oneofs=[ - _descriptor.OneofDescriptor( - name='expression', full_name='mlflow.SearchExpression.expression', - index=0, containing_type=None, fields=[]), ], - serialized_start=3215, - serialized_end=3353, + serialized_start=3188, + serialized_end=3249, ) - -_METRICSEARCHEXPRESSION = _descriptor.Descriptor( - name='MetricSearchExpression', - full_name='mlflow.MetricSearchExpression', +_LISTARTIFACTS = _descriptor.Descriptor( + name='ListArtifacts', + full_name='mlflow.ListArtifacts', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mlflow.MetricSearchExpression.key', index=0, - number=1, type=9, cpp_type=9, label=1, + name='run_id', full_name='mlflow.ListArtifacts.run_id', index=0, + number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='float', full_name='mlflow.MetricSearchExpression.float', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + name='run_uuid', full_name='mlflow.ListArtifacts.run_uuid', index=1, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='double', full_name='mlflow.MetricSearchExpression.double', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + name='path', full_name='mlflow.ListArtifacts.path', index=2, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], - nested_types=[], + nested_types=[_LISTARTIFACTS_RESPONSE, ], enum_types=[ ], - serialized_options=None, + serialized_options=_b('\342?(\n&com.databricks.rpc.RPC[$this.Response]'), is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ - _descriptor.OneofDescriptor( - name='clause', full_name='mlflow.MetricSearchExpression.clause', - index=0, containing_type=None, fields=[]), ], - serialized_start=3355, - serialized_end=3480, + serialized_start=3123, + serialized_end=3294, ) -_PARAMETERSEARCHEXPRESSION = _descriptor.Descriptor( - name='ParameterSearchExpression', - full_name='mlflow.ParameterSearchExpression', +_FILEINFO = _descriptor.Descriptor( + name='FileInfo', + full_name='mlflow.FileInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mlflow.ParameterSearchExpression.key', index=0, + name='path', full_name='mlflow.FileInfo.path', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='string', full_name='mlflow.ParameterSearchExpression.string', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + name='is_dir', full_name='mlflow.FileInfo.is_dir', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='file_size', full_name='mlflow.FileInfo.file_size', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -1728,33 +1735,23 @@ syntax='proto2', extension_ranges=[], oneofs=[ - _descriptor.OneofDescriptor( - name='clause', full_name='mlflow.ParameterSearchExpression.clause', - index=0, containing_type=None, fields=[]), ], - serialized_start=3482, - serialized_end=3572, + serialized_start=3296, + serialized_end=3355, ) -_STRINGCLAUSE = _descriptor.Descriptor( - name='StringClause', - full_name='mlflow.StringClause', +_GETMETRICHISTORY_RESPONSE = _descriptor.Descriptor( + name='Response', + full_name='mlflow.GetMetricHistory.Response', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='comparator', full_name='mlflow.StringClause.comparator', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='mlflow.StringClause.value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + name='metrics', full_name='mlflow.GetMetricHistory.Response.metrics', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -1770,224 +1767,42 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3574, - serialized_end=3623, + serialized_start=3438, + serialized_end=3481, ) - -_FLOATCLAUSE = _descriptor.Descriptor( - name='FloatClause', - full_name='mlflow.FloatClause', +_GETMETRICHISTORY = _descriptor.Descriptor( + name='GetMetricHistory', + full_name='mlflow.GetMetricHistory', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='comparator', full_name='mlflow.FloatClause.comparator', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='mlflow.FloatClause.value', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=3625, - serialized_end=3673, -) - - -_DOUBLECLAUSE = _descriptor.Descriptor( - name='DoubleClause', - full_name='mlflow.DoubleClause', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='comparator', full_name='mlflow.DoubleClause.comparator', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='mlflow.DoubleClause.value', index=1, - number=2, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=3675, - serialized_end=3724, -) - - -_SEARCHRUNS_RESPONSE = _descriptor.Descriptor( - name='Response', - full_name='mlflow.SearchRuns.Response', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='runs', full_name='mlflow.SearchRuns.Response.runs', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=3872, - serialized_end=3909, -) - -_SEARCHRUNS = _descriptor.Descriptor( - name='SearchRuns', - full_name='mlflow.SearchRuns', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='experiment_ids', full_name='mlflow.SearchRuns.experiment_ids', index=0, - number=1, type=3, cpp_type=2, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='anded_expressions', full_name='mlflow.SearchRuns.anded_expressions', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='run_view_type', full_name='mlflow.SearchRuns.run_view_type', index=2, - number=3, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_SEARCHRUNS_RESPONSE, ], - enum_types=[ - ], - serialized_options=_b('\342?(\n&com.databricks.rpc.RPC[$this.Response]'), - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=3727, - serialized_end=3954, -) - - -_LISTARTIFACTS_RESPONSE = _descriptor.Descriptor( - name='Response', - full_name='mlflow.ListArtifacts.Response', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='root_uri', full_name='mlflow.ListArtifacts.Response.root_uri', index=0, - number=1, type=9, cpp_type=9, label=1, + name='run_id', full_name='mlflow.GetMetricHistory.run_id', index=0, + number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='files', full_name='mlflow.ListArtifacts.Response.files', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=4006, - serialized_end=4067, -) - -_LISTARTIFACTS = _descriptor.Descriptor( - name='ListArtifacts', - full_name='mlflow.ListArtifacts', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.ListArtifacts.run_uuid', index=0, + name='run_uuid', full_name='mlflow.GetMetricHistory.run_uuid', index=1, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='path', full_name='mlflow.ListArtifacts.path', index=1, + name='metric_key', full_name='mlflow.GetMetricHistory.metric_key', index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), + serialized_options=_b('\370\206\031\001'), file=DESCRIPTOR), ], extensions=[ ], - nested_types=[_LISTARTIFACTS_RESPONSE, ], + nested_types=[_GETMETRICHISTORY_RESPONSE, ], enum_types=[ ], serialized_options=_b('\342?(\n&com.databricks.rpc.RPC[$this.Response]'), @@ -1996,59 +1811,14 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3957, - serialized_end=4112, + serialized_start=3358, + serialized_end=3526, ) -_FILEINFO = _descriptor.Descriptor( - name='FileInfo', - full_name='mlflow.FileInfo', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='path', full_name='mlflow.FileInfo.path', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='is_dir', full_name='mlflow.FileInfo.is_dir', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='file_size', full_name='mlflow.FileInfo.file_size', index=2, - number=3, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=4114, - serialized_end=4173, -) - - -_GETARTIFACT_RESPONSE = _descriptor.Descriptor( +_LOGBATCH_RESPONSE = _descriptor.Descriptor( name='Response', - full_name='mlflow.GetArtifact.Response', + full_name='mlflow.LogBatch.Response', filename=None, file=DESCRIPTOR, containing_type=None, @@ -2065,103 +1835,49 @@ extension_ranges=[], oneofs=[ ], - serialized_start=909, - serialized_end=919, + serialized_start=816, + serialized_end=826, ) -_GETARTIFACT = _descriptor.Descriptor( - name='GetArtifact', - full_name='mlflow.GetArtifact', +_LOGBATCH = _descriptor.Descriptor( + name='LogBatch', + full_name='mlflow.LogBatch', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.GetArtifact.run_uuid', index=0, + name='run_id', full_name='mlflow.LogBatch.run_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='path', full_name='mlflow.GetArtifact.path', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + name='metrics', full_name='mlflow.LogBatch.metrics', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_GETARTIFACT_RESPONSE, ], - enum_types=[ - ], - serialized_options=_b('\342?(\n&com.databricks.rpc.RPC[$this.Response]'), - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=4175, - serialized_end=4277, -) - - -_GETMETRICHISTORY_RESPONSE = _descriptor.Descriptor( - name='Response', - full_name='mlflow.GetMetricHistory.Response', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ _descriptor.FieldDescriptor( - name='metrics', full_name='mlflow.GetMetricHistory.Response.metrics', index=0, - number=1, type=11, cpp_type=10, label=3, + name='params', full_name='mlflow.LogBatch.params', index=2, + number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=4350, - serialized_end=4393, -) - -_GETMETRICHISTORY = _descriptor.Descriptor( - name='GetMetricHistory', - full_name='mlflow.GetMetricHistory', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ _descriptor.FieldDescriptor( - name='run_uuid', full_name='mlflow.GetMetricHistory.run_uuid', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='metric_key', full_name='mlflow.GetMetricHistory.metric_key', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + name='tags', full_name='mlflow.LogBatch.tags', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b('\210\265\030\001'), file=DESCRIPTOR), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], - nested_types=[_GETMETRICHISTORY_RESPONSE, ], + nested_types=[_LOGBATCH_RESPONSE, ], enum_types=[ ], serialized_options=_b('\342?(\n&com.databricks.rpc.RPC[$this.Response]'), @@ -2170,8 +1886,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=4280, - serialized_end=4438, + serialized_start=3529, + serialized_end=3706, ) _RUN.fields_by_name['info'].message_type = _RUNINFO @@ -2179,7 +1895,6 @@ _RUNDATA.fields_by_name['metrics'].message_type = _METRIC _RUNDATA.fields_by_name['params'].message_type = _PARAM _RUNDATA.fields_by_name['tags'].message_type = _RUNTAG -_RUNINFO.fields_by_name['source_type'].enum_type = _SOURCETYPE _RUNINFO.fields_by_name['status'].enum_type = _RUNSTATUS _CREATEEXPERIMENT_RESPONSE.containing_type = _CREATEEXPERIMENT _LISTEXPERIMENTS_RESPONSE.fields_by_name['experiments'].message_type = _EXPERIMENT @@ -2193,7 +1908,6 @@ _UPDATEEXPERIMENT_RESPONSE.containing_type = _UPDATEEXPERIMENT _CREATERUN_RESPONSE.fields_by_name['run'].message_type = _RUN _CREATERUN_RESPONSE.containing_type = _CREATERUN -_CREATERUN.fields_by_name['source_type'].enum_type = _SOURCETYPE _CREATERUN.fields_by_name['tags'].message_type = _RUNTAG _UPDATERUN_RESPONSE.fields_by_name['run_info'].message_type = _RUNINFO _UPDATERUN_RESPONSE.containing_type = _UPDATERUN @@ -2203,41 +1917,20 @@ _LOGMETRIC_RESPONSE.containing_type = _LOGMETRIC _LOGPARAM_RESPONSE.containing_type = _LOGPARAM _SETTAG_RESPONSE.containing_type = _SETTAG +_DELETETAG_RESPONSE.containing_type = _DELETETAG _GETRUN_RESPONSE.fields_by_name['run'].message_type = _RUN _GETRUN_RESPONSE.containing_type = _GETRUN -_GETMETRIC_RESPONSE.fields_by_name['metric'].message_type = _METRIC -_GETMETRIC_RESPONSE.containing_type = _GETMETRIC -_GETPARAM_RESPONSE.fields_by_name['parameter'].message_type = _PARAM -_GETPARAM_RESPONSE.containing_type = _GETPARAM -_SEARCHEXPRESSION.fields_by_name['metric'].message_type = _METRICSEARCHEXPRESSION -_SEARCHEXPRESSION.fields_by_name['parameter'].message_type = _PARAMETERSEARCHEXPRESSION -_SEARCHEXPRESSION.oneofs_by_name['expression'].fields.append( - _SEARCHEXPRESSION.fields_by_name['metric']) -_SEARCHEXPRESSION.fields_by_name['metric'].containing_oneof = _SEARCHEXPRESSION.oneofs_by_name['expression'] -_SEARCHEXPRESSION.oneofs_by_name['expression'].fields.append( - _SEARCHEXPRESSION.fields_by_name['parameter']) -_SEARCHEXPRESSION.fields_by_name['parameter'].containing_oneof = _SEARCHEXPRESSION.oneofs_by_name['expression'] -_METRICSEARCHEXPRESSION.fields_by_name['float'].message_type = _FLOATCLAUSE -_METRICSEARCHEXPRESSION.fields_by_name['double'].message_type = _DOUBLECLAUSE -_METRICSEARCHEXPRESSION.oneofs_by_name['clause'].fields.append( - _METRICSEARCHEXPRESSION.fields_by_name['float']) -_METRICSEARCHEXPRESSION.fields_by_name['float'].containing_oneof = _METRICSEARCHEXPRESSION.oneofs_by_name['clause'] -_METRICSEARCHEXPRESSION.oneofs_by_name['clause'].fields.append( - _METRICSEARCHEXPRESSION.fields_by_name['double']) -_METRICSEARCHEXPRESSION.fields_by_name['double'].containing_oneof = _METRICSEARCHEXPRESSION.oneofs_by_name['clause'] -_PARAMETERSEARCHEXPRESSION.fields_by_name['string'].message_type = _STRINGCLAUSE -_PARAMETERSEARCHEXPRESSION.oneofs_by_name['clause'].fields.append( - _PARAMETERSEARCHEXPRESSION.fields_by_name['string']) -_PARAMETERSEARCHEXPRESSION.fields_by_name['string'].containing_oneof = _PARAMETERSEARCHEXPRESSION.oneofs_by_name['clause'] _SEARCHRUNS_RESPONSE.fields_by_name['runs'].message_type = _RUN _SEARCHRUNS_RESPONSE.containing_type = _SEARCHRUNS -_SEARCHRUNS.fields_by_name['anded_expressions'].message_type = _SEARCHEXPRESSION _SEARCHRUNS.fields_by_name['run_view_type'].enum_type = _VIEWTYPE _LISTARTIFACTS_RESPONSE.fields_by_name['files'].message_type = _FILEINFO _LISTARTIFACTS_RESPONSE.containing_type = _LISTARTIFACTS -_GETARTIFACT_RESPONSE.containing_type = _GETARTIFACT _GETMETRICHISTORY_RESPONSE.fields_by_name['metrics'].message_type = _METRIC _GETMETRICHISTORY_RESPONSE.containing_type = _GETMETRICHISTORY +_LOGBATCH_RESPONSE.containing_type = _LOGBATCH +_LOGBATCH.fields_by_name['metrics'].message_type = _METRIC +_LOGBATCH.fields_by_name['params'].message_type = _PARAM +_LOGBATCH.fields_by_name['tags'].message_type = _RUNTAG DESCRIPTOR.message_types_by_name['Metric'] = _METRIC DESCRIPTOR.message_types_by_name['Param'] = _PARAM DESCRIPTOR.message_types_by_name['Run'] = _RUN @@ -2258,20 +1951,13 @@ DESCRIPTOR.message_types_by_name['LogMetric'] = _LOGMETRIC DESCRIPTOR.message_types_by_name['LogParam'] = _LOGPARAM DESCRIPTOR.message_types_by_name['SetTag'] = _SETTAG +DESCRIPTOR.message_types_by_name['DeleteTag'] = _DELETETAG DESCRIPTOR.message_types_by_name['GetRun'] = _GETRUN -DESCRIPTOR.message_types_by_name['GetMetric'] = _GETMETRIC -DESCRIPTOR.message_types_by_name['GetParam'] = _GETPARAM -DESCRIPTOR.message_types_by_name['SearchExpression'] = _SEARCHEXPRESSION -DESCRIPTOR.message_types_by_name['MetricSearchExpression'] = _METRICSEARCHEXPRESSION -DESCRIPTOR.message_types_by_name['ParameterSearchExpression'] = _PARAMETERSEARCHEXPRESSION -DESCRIPTOR.message_types_by_name['StringClause'] = _STRINGCLAUSE -DESCRIPTOR.message_types_by_name['FloatClause'] = _FLOATCLAUSE -DESCRIPTOR.message_types_by_name['DoubleClause'] = _DOUBLECLAUSE DESCRIPTOR.message_types_by_name['SearchRuns'] = _SEARCHRUNS DESCRIPTOR.message_types_by_name['ListArtifacts'] = _LISTARTIFACTS DESCRIPTOR.message_types_by_name['FileInfo'] = _FILEINFO -DESCRIPTOR.message_types_by_name['GetArtifact'] = _GETARTIFACT DESCRIPTOR.message_types_by_name['GetMetricHistory'] = _GETMETRICHISTORY +DESCRIPTOR.message_types_by_name['LogBatch'] = _LOGBATCH DESCRIPTOR.enum_types_by_name['ViewType'] = _VIEWTYPE DESCRIPTOR.enum_types_by_name['SourceType'] = _SOURCETYPE DESCRIPTOR.enum_types_by_name['RunStatus'] = _RUNSTATUS @@ -2521,92 +2207,35 @@ _sym_db.RegisterMessage(SetTag) _sym_db.RegisterMessage(SetTag.Response) -GetRun = _reflection.GeneratedProtocolMessageType('GetRun', (_message.Message,), dict( +DeleteTag = _reflection.GeneratedProtocolMessageType('DeleteTag', (_message.Message,), dict( Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict( - DESCRIPTOR = _GETRUN_RESPONSE, + DESCRIPTOR = _DELETETAG_RESPONSE, __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetRun.Response) + # @@protoc_insertion_point(class_scope:mlflow.DeleteTag.Response) )) , - DESCRIPTOR = _GETRUN, + DESCRIPTOR = _DELETETAG, __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetRun) + # @@protoc_insertion_point(class_scope:mlflow.DeleteTag) )) -_sym_db.RegisterMessage(GetRun) -_sym_db.RegisterMessage(GetRun.Response) +_sym_db.RegisterMessage(DeleteTag) +_sym_db.RegisterMessage(DeleteTag.Response) -GetMetric = _reflection.GeneratedProtocolMessageType('GetMetric', (_message.Message,), dict( - - Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict( - DESCRIPTOR = _GETMETRIC_RESPONSE, - __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetMetric.Response) - )) - , - DESCRIPTOR = _GETMETRIC, - __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetMetric) - )) -_sym_db.RegisterMessage(GetMetric) -_sym_db.RegisterMessage(GetMetric.Response) - -GetParam = _reflection.GeneratedProtocolMessageType('GetParam', (_message.Message,), dict( +GetRun = _reflection.GeneratedProtocolMessageType('GetRun', (_message.Message,), dict( Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict( - DESCRIPTOR = _GETPARAM_RESPONSE, + DESCRIPTOR = _GETRUN_RESPONSE, __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetParam.Response) + # @@protoc_insertion_point(class_scope:mlflow.GetRun.Response) )) , - DESCRIPTOR = _GETPARAM, - __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetParam) - )) -_sym_db.RegisterMessage(GetParam) -_sym_db.RegisterMessage(GetParam.Response) - -SearchExpression = _reflection.GeneratedProtocolMessageType('SearchExpression', (_message.Message,), dict( - DESCRIPTOR = _SEARCHEXPRESSION, - __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.SearchExpression) - )) -_sym_db.RegisterMessage(SearchExpression) - -MetricSearchExpression = _reflection.GeneratedProtocolMessageType('MetricSearchExpression', (_message.Message,), dict( - DESCRIPTOR = _METRICSEARCHEXPRESSION, - __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.MetricSearchExpression) - )) -_sym_db.RegisterMessage(MetricSearchExpression) - -ParameterSearchExpression = _reflection.GeneratedProtocolMessageType('ParameterSearchExpression', (_message.Message,), dict( - DESCRIPTOR = _PARAMETERSEARCHEXPRESSION, - __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.ParameterSearchExpression) - )) -_sym_db.RegisterMessage(ParameterSearchExpression) - -StringClause = _reflection.GeneratedProtocolMessageType('StringClause', (_message.Message,), dict( - DESCRIPTOR = _STRINGCLAUSE, - __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.StringClause) - )) -_sym_db.RegisterMessage(StringClause) - -FloatClause = _reflection.GeneratedProtocolMessageType('FloatClause', (_message.Message,), dict( - DESCRIPTOR = _FLOATCLAUSE, - __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.FloatClause) - )) -_sym_db.RegisterMessage(FloatClause) - -DoubleClause = _reflection.GeneratedProtocolMessageType('DoubleClause', (_message.Message,), dict( - DESCRIPTOR = _DOUBLECLAUSE, + DESCRIPTOR = _GETRUN, __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.DoubleClause) + # @@protoc_insertion_point(class_scope:mlflow.GetRun) )) -_sym_db.RegisterMessage(DoubleClause) +_sym_db.RegisterMessage(GetRun) +_sym_db.RegisterMessage(GetRun.Response) SearchRuns = _reflection.GeneratedProtocolMessageType('SearchRuns', (_message.Message,), dict( @@ -2645,35 +2274,35 @@ )) _sym_db.RegisterMessage(FileInfo) -GetArtifact = _reflection.GeneratedProtocolMessageType('GetArtifact', (_message.Message,), dict( +GetMetricHistory = _reflection.GeneratedProtocolMessageType('GetMetricHistory', (_message.Message,), dict( Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict( - DESCRIPTOR = _GETARTIFACT_RESPONSE, + DESCRIPTOR = _GETMETRICHISTORY_RESPONSE, __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetArtifact.Response) + # @@protoc_insertion_point(class_scope:mlflow.GetMetricHistory.Response) )) , - DESCRIPTOR = _GETARTIFACT, + DESCRIPTOR = _GETMETRICHISTORY, __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetArtifact) + # @@protoc_insertion_point(class_scope:mlflow.GetMetricHistory) )) -_sym_db.RegisterMessage(GetArtifact) -_sym_db.RegisterMessage(GetArtifact.Response) +_sym_db.RegisterMessage(GetMetricHistory) +_sym_db.RegisterMessage(GetMetricHistory.Response) -GetMetricHistory = _reflection.GeneratedProtocolMessageType('GetMetricHistory', (_message.Message,), dict( +LogBatch = _reflection.GeneratedProtocolMessageType('LogBatch', (_message.Message,), dict( Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict( - DESCRIPTOR = _GETMETRICHISTORY_RESPONSE, + DESCRIPTOR = _LOGBATCH_RESPONSE, __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetMetricHistory.Response) + # @@protoc_insertion_point(class_scope:mlflow.LogBatch.Response) )) , - DESCRIPTOR = _GETMETRICHISTORY, + DESCRIPTOR = _LOGBATCH, __module__ = 'service_pb2' - # @@protoc_insertion_point(class_scope:mlflow.GetMetricHistory) + # @@protoc_insertion_point(class_scope:mlflow.LogBatch) )) -_sym_db.RegisterMessage(GetMetricHistory) -_sym_db.RegisterMessage(GetMetricHistory.Response) +_sym_db.RegisterMessage(LogBatch) +_sym_db.RegisterMessage(LogBatch.Response) DESCRIPTOR._options = None @@ -2689,39 +2318,30 @@ _UPDATEEXPERIMENT.fields_by_name['experiment_id']._options = None _UPDATEEXPERIMENT._options = None _CREATERUN._options = None -_UPDATERUN.fields_by_name['run_uuid']._options = None _UPDATERUN._options = None _DELETERUN.fields_by_name['run_id']._options = None _DELETERUN._options = None _RESTORERUN.fields_by_name['run_id']._options = None _RESTORERUN._options = None -_LOGMETRIC.fields_by_name['run_uuid']._options = None _LOGMETRIC.fields_by_name['key']._options = None _LOGMETRIC.fields_by_name['value']._options = None _LOGMETRIC.fields_by_name['timestamp']._options = None _LOGMETRIC._options = None -_LOGPARAM.fields_by_name['run_uuid']._options = None _LOGPARAM.fields_by_name['key']._options = None _LOGPARAM.fields_by_name['value']._options = None _LOGPARAM._options = None -_SETTAG.fields_by_name['run_uuid']._options = None _SETTAG.fields_by_name['key']._options = None _SETTAG.fields_by_name['value']._options = None _SETTAG._options = None -_GETRUN.fields_by_name['run_uuid']._options = None +_DELETETAG.fields_by_name['run_id']._options = None +_DELETETAG.fields_by_name['key']._options = None +_DELETETAG._options = None _GETRUN._options = None -_GETMETRIC.fields_by_name['run_uuid']._options = None -_GETMETRIC.fields_by_name['metric_key']._options = None -_GETMETRIC._options = None -_GETPARAM.fields_by_name['run_uuid']._options = None -_GETPARAM.fields_by_name['param_name']._options = None -_GETPARAM._options = None _SEARCHRUNS._options = None _LISTARTIFACTS._options = None -_GETARTIFACT._options = None -_GETMETRICHISTORY.fields_by_name['run_uuid']._options = None _GETMETRICHISTORY.fields_by_name['metric_key']._options = None _GETMETRICHISTORY._options = None +_LOGBATCH._options = None _MLFLOWSERVICE = _descriptor.ServiceDescriptor( name='MlflowService', @@ -2729,8 +2349,8 @@ file=DESCRIPTOR, index=0, serialized_options=None, - serialized_start=4651, - serialized_end=7257, + serialized_start=3919, + serialized_end=7298, methods=[ _descriptor.MethodDescriptor( name='createExperiment', @@ -2739,7 +2359,7 @@ containing_service=None, input_type=_CREATEEXPERIMENT, output_type=_CREATEEXPERIMENT_RESPONSE, - serialized_options=_b('\202\265\030G\n0\n\004POST\022\"/preview/mlflow/experiments/create\032\004\010\002\020\000\020\001*\021Create Experiment'), + serialized_options=_b('\362\206\031q\n(\n\004POST\022\032/mlflow/experiments/create\032\004\010\002\020\000\n0\n\004POST\022\"/preview/mlflow/experiments/create\032\004\010\002\020\000\020\001*\021Create Experiment'), ), _descriptor.MethodDescriptor( name='listExperiments', @@ -2748,7 +2368,7 @@ containing_service=None, input_type=_LISTEXPERIMENTS, output_type=_LISTEXPERIMENTS_RESPONSE, - serialized_options=_b('\202\265\030C\n-\n\003GET\022 /preview/mlflow/experiments/list\032\004\010\002\020\000\020\001*\020List Experiments'), + serialized_options=_b('\362\206\031j\n%\n\003GET\022\030/mlflow/experiments/list\032\004\010\002\020\000\n-\n\003GET\022 /preview/mlflow/experiments/list\032\004\010\002\020\000\020\001*\020List Experiments'), ), _descriptor.MethodDescriptor( name='getExperiment', @@ -2757,7 +2377,7 @@ containing_service=None, input_type=_GETEXPERIMENT, output_type=_GETEXPERIMENT_RESPONSE, - serialized_options=_b('\202\265\030@\n,\n\003GET\022\037/preview/mlflow/experiments/get\032\004\010\002\020\000\020\001*\016Get Experiment'), + serialized_options=_b('\362\206\031f\n$\n\003GET\022\027/mlflow/experiments/get\032\004\010\002\020\000\n,\n\003GET\022\037/preview/mlflow/experiments/get\032\004\010\002\020\000\020\001*\016Get Experiment'), ), _descriptor.MethodDescriptor( name='deleteExperiment', @@ -2766,7 +2386,7 @@ containing_service=None, input_type=_DELETEEXPERIMENT, output_type=_DELETEEXPERIMENT_RESPONSE, - serialized_options=_b('\202\265\030G\n0\n\004POST\022\"/preview/mlflow/experiments/delete\032\004\010\002\020\000\020\001*\021Delete Experiment'), + serialized_options=_b('\362\206\031q\n(\n\004POST\022\032/mlflow/experiments/delete\032\004\010\002\020\000\n0\n\004POST\022\"/preview/mlflow/experiments/delete\032\004\010\002\020\000\020\001*\021Delete Experiment'), ), _descriptor.MethodDescriptor( name='restoreExperiment', @@ -2775,7 +2395,7 @@ containing_service=None, input_type=_RESTOREEXPERIMENT, output_type=_RESTOREEXPERIMENT_RESPONSE, - serialized_options=_b('\202\265\030I\n1\n\004POST\022#/preview/mlflow/experiments/restore\032\004\010\002\020\000\020\001*\022Restore Experiment'), + serialized_options=_b('\362\206\031t\n)\n\004POST\022\033/mlflow/experiments/restore\032\004\010\002\020\000\n1\n\004POST\022#/preview/mlflow/experiments/restore\032\004\010\002\020\000\020\001*\022Restore Experiment'), ), _descriptor.MethodDescriptor( name='updateExperiment', @@ -2784,7 +2404,7 @@ containing_service=None, input_type=_UPDATEEXPERIMENT, output_type=_UPDATEEXPERIMENT_RESPONSE, - serialized_options=_b('\202\265\030G\n0\n\004POST\022\"/preview/mlflow/experiments/update\032\004\010\002\020\000\020\001*\021Update Experiment'), + serialized_options=_b('\362\206\031q\n(\n\004POST\022\032/mlflow/experiments/update\032\004\010\002\020\000\n0\n\004POST\022\"/preview/mlflow/experiments/update\032\004\010\002\020\000\020\001*\021Update Experiment'), ), _descriptor.MethodDescriptor( name='createRun', @@ -2793,7 +2413,7 @@ containing_service=None, input_type=_CREATERUN, output_type=_CREATERUN_RESPONSE, - serialized_options=_b('\202\265\0309\n)\n\004POST\022\033/preview/mlflow/runs/create\032\004\010\002\020\000\020\001*\nCreate Run'), + serialized_options=_b('\362\206\031\\\n!\n\004POST\022\023/mlflow/runs/create\032\004\010\002\020\000\n)\n\004POST\022\033/preview/mlflow/runs/create\032\004\010\002\020\000\020\001*\nCreate Run'), ), _descriptor.MethodDescriptor( name='updateRun', @@ -2802,7 +2422,7 @@ containing_service=None, input_type=_UPDATERUN, output_type=_UPDATERUN_RESPONSE, - serialized_options=_b('\202\265\0309\n)\n\004POST\022\033/preview/mlflow/runs/update\032\004\010\002\020\000\020\001*\nUpdate Run'), + serialized_options=_b('\362\206\031\\\n!\n\004POST\022\023/mlflow/runs/update\032\004\010\002\020\000\n)\n\004POST\022\033/preview/mlflow/runs/update\032\004\010\002\020\000\020\001*\nUpdate Run'), ), _descriptor.MethodDescriptor( name='deleteRun', @@ -2811,7 +2431,7 @@ containing_service=None, input_type=_DELETERUN, output_type=_DELETERUN_RESPONSE, - serialized_options=_b('\202\265\030-\n)\n\004POST\022\033/preview/mlflow/runs/delete\032\004\010\002\020\000\020\001'), + serialized_options=_b('\362\206\031\\\n!\n\004POST\022\023/mlflow/runs/delete\032\004\010\002\020\000\n)\n\004POST\022\033/preview/mlflow/runs/delete\032\004\010\002\020\000\020\001*\nDelete Run'), ), _descriptor.MethodDescriptor( name='restoreRun', @@ -2820,7 +2440,7 @@ containing_service=None, input_type=_RESTORERUN, output_type=_RESTORERUN_RESPONSE, - serialized_options=_b('\202\265\030.\n*\n\004POST\022\034/preview/mlflow/runs/restore\032\004\010\002\020\000\020\001'), + serialized_options=_b('\362\206\031_\n\"\n\004POST\022\024/mlflow/runs/restore\032\004\010\002\020\000\n*\n\004POST\022\034/preview/mlflow/runs/restore\032\004\010\002\020\000\020\001*\013Restore Run'), ), _descriptor.MethodDescriptor( name='logMetric', @@ -2829,7 +2449,7 @@ containing_service=None, input_type=_LOGMETRIC, output_type=_LOGMETRIC_RESPONSE, - serialized_options=_b('\202\265\030=\n-\n\004POST\022\037/preview/mlflow/runs/log-metric\032\004\010\002\020\000\020\001*\nLog Metric'), + serialized_options=_b('\362\206\031d\n%\n\004POST\022\027/mlflow/runs/log-metric\032\004\010\002\020\000\n-\n\004POST\022\037/preview/mlflow/runs/log-metric\032\004\010\002\020\000\020\001*\nLog Metric'), ), _descriptor.MethodDescriptor( name='logParam', @@ -2838,7 +2458,7 @@ containing_service=None, input_type=_LOGPARAM, output_type=_LOGPARAM_RESPONSE, - serialized_options=_b('\202\265\030?\n0\n\004POST\022\"/preview/mlflow/runs/log-parameter\032\004\010\002\020\000\020\001*\tLog Param'), + serialized_options=_b('\362\206\031i\n(\n\004POST\022\032/mlflow/runs/log-parameter\032\004\010\002\020\000\n0\n\004POST\022\"/preview/mlflow/runs/log-parameter\032\004\010\002\020\000\020\001*\tLog Param'), ), _descriptor.MethodDescriptor( name='setTag', @@ -2847,61 +2467,61 @@ containing_service=None, input_type=_SETTAG, output_type=_SETTAG_RESPONSE, - serialized_options=_b('\202\265\0307\n*\n\004POST\022\034/preview/mlflow/runs/set-tag\032\004\010\002\020\000\020\001*\007Set Tag'), + serialized_options=_b('\362\206\031[\n\"\n\004POST\022\024/mlflow/runs/set-tag\032\004\010\002\020\000\n*\n\004POST\022\034/preview/mlflow/runs/set-tag\032\004\010\002\020\000\020\001*\007Set Tag'), ), _descriptor.MethodDescriptor( - name='getRun', - full_name='mlflow.MlflowService.getRun', + name='deleteTag', + full_name='mlflow.MlflowService.deleteTag', index=13, containing_service=None, - input_type=_GETRUN, - output_type=_GETRUN_RESPONSE, - serialized_options=_b('\202\265\0302\n%\n\003GET\022\030/preview/mlflow/runs/get\032\004\010\002\020\000\020\001*\007Get Run'), + input_type=_DELETETAG, + output_type=_DELETETAG_RESPONSE, + serialized_options=_b('\362\206\031d\n%\n\004POST\022\027/mlflow/runs/delete-tag\032\004\010\002\020\000\n-\n\004POST\022\037/preview/mlflow/runs/delete-tag\032\004\010\002\020\000\020\001*\nDelete Tag'), ), _descriptor.MethodDescriptor( - name='getMetric', - full_name='mlflow.MlflowService.getMetric', + name='getRun', + full_name='mlflow.MlflowService.getRun', index=14, containing_service=None, - input_type=_GETMETRIC, - output_type=_GETMETRIC_RESPONSE, - serialized_options=_b('\202\265\0308\n(\n\003GET\022\033/preview/mlflow/metrics/get\032\004\010\002\020\000\020\001*\nGet Metric'), - ), - _descriptor.MethodDescriptor( - name='getParam', - full_name='mlflow.MlflowService.getParam', - index=15, - containing_service=None, - input_type=_GETPARAM, - output_type=_GETPARAM_RESPONSE, - serialized_options=_b('\202\265\0306\n\'\n\003GET\022\032/preview/mlflow/params/get\032\004\010\002\020\000\020\001*\tGet Param'), + input_type=_GETRUN, + output_type=_GETRUN_RESPONSE, + serialized_options=_b('\362\206\031Q\n\035\n\003GET\022\020/mlflow/runs/get\032\004\010\002\020\000\n%\n\003GET\022\030/preview/mlflow/runs/get\032\004\010\002\020\000\020\001*\007Get Run'), ), _descriptor.MethodDescriptor( name='searchRuns', full_name='mlflow.MlflowService.searchRuns', - index=16, + index=15, containing_service=None, input_type=_SEARCHRUNS, output_type=_SEARCHRUNS_RESPONSE, - serialized_options=_b('\202\265\030d\n)\n\004POST\022\033/preview/mlflow/runs/search\032\004\010\002\020\000\n(\n\003GET\022\033/preview/mlflow/runs/search\032\004\010\002\020\000\020\001*\013Search Runs'), + serialized_options=_b('\362\206\031\207\001\n!\n\004POST\022\023/mlflow/runs/search\032\004\010\002\020\000\n)\n\004POST\022\033/preview/mlflow/runs/search\032\004\010\002\020\000\n(\n\003GET\022\033/preview/mlflow/runs/search\032\004\010\002\020\000\020\001*\013Search Runs'), ), _descriptor.MethodDescriptor( name='listArtifacts', full_name='mlflow.MlflowService.listArtifacts', - index=17, + index=16, containing_service=None, input_type=_LISTARTIFACTS, output_type=_LISTARTIFACTS_RESPONSE, - serialized_options=_b('\202\265\030?\n+\n\003GET\022\036/preview/mlflow/artifacts/list\032\004\010\002\020\000\020\001*\016List Artifacts'), + serialized_options=_b('\362\206\031d\n#\n\003GET\022\026/mlflow/artifacts/list\032\004\010\002\020\000\n+\n\003GET\022\036/preview/mlflow/artifacts/list\032\004\010\002\020\000\020\001*\016List Artifacts'), ), _descriptor.MethodDescriptor( name='getMetricHistory', full_name='mlflow.MlflowService.getMetricHistory', - index=18, + index=17, containing_service=None, input_type=_GETMETRICHISTORY, output_type=_GETMETRICHISTORY_RESPONSE, - serialized_options=_b('\202\265\030H\n0\n\003GET\022#/preview/mlflow/metrics/get-history\032\004\010\002\020\000\020\001*\022Get Metric History'), + serialized_options=_b('\362\206\031r\n(\n\003GET\022\033/mlflow/metrics/get-history\032\004\010\002\020\000\n0\n\003GET\022#/preview/mlflow/metrics/get-history\032\004\010\002\020\000\020\001*\022Get Metric History'), + ), + _descriptor.MethodDescriptor( + name='logBatch', + full_name='mlflow.MlflowService.logBatch', + index=18, + containing_service=None, + input_type=_LOGBATCH, + output_type=_LOGBATCH_RESPONSE, + serialized_options=_b('\362\206\031a\n$\n\004POST\022\026/mlflow/runs/log-batch\032\004\010\002\020\000\n,\n\004POST\022\036/preview/mlflow/runs/log-batch\032\004\010\002\020\000\020\001*\tLog Batch'), ), ]) _sym_db.RegisterServiceDescriptor(_MLFLOWSERVICE) diff --git a/mlflow/pyfunc/__init__.py b/mlflow/pyfunc/__init__.py index 7c462bb3b3c36..f731592f46955 100644 --- a/mlflow/pyfunc/__init__.py +++ b/mlflow/pyfunc/__init__.py @@ -1,19 +1,23 @@ # -*- coding: utf-8 -*- """ -The ``mlflow.pyfunc`` module defines a generic filesystem format for Python models and provides -utilities for saving to and loading from this format. The format is self contained in the sense -that it includes all necessary information for anyone to load it and use it. Dependencies -are either stored directly with the model or referenced via a Conda environment. +The ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format ` +for Python models and provides utilities for saving to and loading from this format. The format is +self contained in the sense that it includes all necessary information for anyone to load it and +use it. Dependencies are either stored directly with the model or referenced via a Conda +environment. -The convention for pyfunc models is to have a ``predict`` method or function with the following -signature:: +The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models +using frameworks and inference logic that may not be natively included in MLflow. See +:ref:`pyfunc-create-custom`. - predict(data: pandas.DataFrame) -> numpy.ndarray | pandas.Series | pandas.DataFrame +.. _pyfunc-filesystem-format: -This convention is relied on by other MLflow components. +***************** +Filesystem format +***************** -Pyfunc model format is defined as a directory structure containing all required data, code, and +The Pyfunc format is defined as a directory structure containing all required data, code, and configuration:: ./dst-path/ @@ -22,13 +26,21 @@ : data packaged with the model (specified in the MLmodel file) : Conda environment definition (specified in the MLmodel file) -A Python model contains an ``MLmodel`` file in "python_function" format in its root with the +The directory structure may contain additional contents that can be referenced by the ``MLmodel`` +configuration. + +.. _pyfunc-model-config: + +MLModel configuration +##################### + +A Python model contains an ``MLmodel`` file in **python_function** format in its root with the following parameters: - loader_module [required]: Python module that can load the model. Expected as module identifier - e.g. ``mlflow.sklearn``, it will be imported via ``importlib.import_module``. - The imported module must contain function with the following signature:: + e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``. + The imported module must contain a function with the following signature:: _load_pyfunc(path: string) -> @@ -48,6 +60,9 @@ Relative path to an exported Conda environment. If present this environment should be activated prior to running the model. +- Optionally, any additional parameters necessary for interpreting the serialized model in + ``pyfunc`` format. + .. rubric:: Example >>> tree example/sklearn_iris/mlruns/run1/outputs/linear-lr @@ -72,20 +87,126 @@ loader_module: mlflow.sklearn env: mlflow_env.yml main: sklearn_iris + +.. _pyfunc-inference-api: + +************* +Inference API +************* + +The convention for pyfunc models is to have a ``predict`` method or function with the following +signature:: + + predict(model_input: pandas.DataFrame) -> [numpy.ndarray | pandas.Series | pandas.DataFrame] + +This convention is relied on by other MLflow components. + +.. _pyfunc-create-custom: + +****************************** +Creating custom Pyfunc models +****************************** + +MLflow's persistence modules provide convenience functions for creating models with the +``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and +more); however, they do not cover every use case. For example, you may want to create an MLflow +model with the ``pyfunc`` flavor using a framework that MLflow does not natively support. +Alternatively, you may want to build an MLflow model that executes custom logic when evaluating +queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc`` +provides utilities for creating ``pyfunc`` models from arbitrary code and model data. + +The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows +for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts +that the logic may require. + +An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a +serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact. + +.. _pyfunc-create-custom-workflows: + +Workflows +######### + +:meth:`save_model()` and :meth:`log_model()` support the following workflows: + +1. Programmatically defining a new MLflow model, including its attributes and artifacts. + + Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can + automatically download artifacts from their URIs and create an MLflow model directory. + + In this case, you must define a Python class which inherits from :class:`~PythonModel`, + defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is + specified via the ``python_model`` parameter; it is automatically serialized and deserialized + as a Python class, including all of its attributes. + +2. Interpreting pre-existing data as an MLflow model. + + If you already have a directory containing model data, :meth:`save_model()` and + :meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter + specifies the local filesystem path to the directory containing model data. + + In this case, you must provide a Python module, called a `loader module`. The + loader module defines a ``_load_pyfunc()`` method that performs the following tasks: + + - Load data from the specified ``data_path``. For example, this process may include + deserializing pickled Python objects or models or parsing CSV files. + + - Construct and return a pyfunc-compatible model wrapper. As in the first + use case, this wrapper must define a ``predict()`` method that is used to evaluate + queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`. + + The ``loader_module`` parameter specifies the name of your loader module. + + For an example loader module implementation, refer to the `loader module + implementation in mlflow.keras `_. + +.. _pyfunc-create-custom-selecting-workflow: + +Which workflow is right for my use case? +######################################## + +We consider the first workflow to be more user-friendly and generally recommend it for the +following reasons: + +- It automatically resolves and collects specified model artifacts. + +- It automatically serializes and deserializes the ``python_model`` instance and all of + its attributes, reducing the amount of user logic that is required to load the model + +- You can create Models using logic that is defined in the ``__main__`` scope. This allows + custom models to be constructed in interactive environments, such as notebooks and the Python + REPL. + +You may prefer the second, lower-level workflow for the following reasons: + +- Inference logic is always persisted as code, rather than a Python object. This makes logic + easier to inspect and modify later. + +- If you have already collected all of your model data in a single location, the second + workflow allows it to be saved in MLflow format directly, without enumerating constituent + artifacts. """ import importlib +import logging +import numpy as np import os -import shutil -import sys import pandas +import shutil +from copy import deepcopy -from mlflow.tracking.fluent import active_run, log_artifacts -from mlflow import tracking +import mlflow +import mlflow.pyfunc.model +import mlflow.pyfunc.utils from mlflow.models import Model -from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version +from mlflow.pyfunc.model import PythonModel, PythonModelContext, get_default_conda_env +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils import PYTHON_VERSION, deprecated, get_major_minor_py_version from mlflow.utils.file_utils import TempDir, _copy_file_or_tree -from mlflow.utils.logging_utils import eprint +from mlflow.utils.model_utils import _get_flavor_configuration +from mlflow.exceptions import MlflowException +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS FLAVOR_NAME = "python_function" MAIN = "loader_module" @@ -94,14 +215,16 @@ ENV = "env" PY_VERSION = "python_version" +_logger = logging.getLogger(__name__) + -def add_to_model(model, loader_module, data=None, code=None, env=None): +def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs): """ - Add a pyfunc spec to the model configuration. + Add a ``pyfunc`` spec to the model configuration. - Defines pyfunc configuration schema. Caller can use this to create a valid pyfunc model flavor - out of an existing directory structure. For example, other model flavors can use this to specify - how to use their output as a pyfunc. + Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model + flavor out of an existing directory structure. For example, other model flavors can use this to + specify how to use their output as a ``pyfunc``. NOTE: @@ -112,9 +235,12 @@ def add_to_model(model, loader_module, data=None, code=None, env=None): :param data: Path to the model data. :param code: Path to the code dependencies. :param env: Conda environment. + :param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification. + Values must be YAML-serializable. :return: Updated model configuration. """ - parms = {MAIN: loader_module} + parms = deepcopy(kwargs) + parms[MAIN] = loader_module parms[PY_VERSION] = PYTHON_VERSION if code: parms[CODE] = code @@ -125,97 +251,171 @@ def add_to_model(model, loader_module, data=None, code=None, env=None): return model.add_flavor(FLAVOR_NAME, **parms) -def _load_model_conf(path, run_id=None): - """Load a model configuration stored in Python function format.""" - if run_id: - path = tracking.utils._get_model_log_dir(path, run_id) - conf_path = os.path.join(path, "MLmodel") - model = Model.load(conf_path) - if FLAVOR_NAME not in model.flavors: - raise Exception("Format '{format}' not found not in {path}.".format(format=FLAVOR_NAME, - path=conf_path)) - return model.flavors[FLAVOR_NAME] +def _load_model_env(path): + """ + Get ENV file string from a model configuration stored in Python Function format. + Returned value is a model-relative path to a Conda Environment file, + or None if none was specified at model save time + """ + return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None) -def _load_model_env(path, run_id=None): +def load_model(model_uri, suppress_warnings=False): """ - Get ENV file string from a model configuration stored in Python Function format. - Returned value is a model-relative path to a Conda Environment file, - or None if none was specified at model save time + Load a model stored in Python function format. + + :param model_uri: The location, in URI format, of the MLflow model. For example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. + :param suppress_warnings: If ``True``, non-fatal warning messages associated with the model + loading process will be suppressed. If ``False``, these warning + messages will be emitted. """ - return _load_model_conf(path, run_id).get(ENV, None) + return load_pyfunc(model_uri, suppress_warnings) -def load_pyfunc(path, run_id=None, suppress_warnings=False): +@deprecated("pyfunc.load_model", 1.0) +def load_pyfunc(model_uri, suppress_warnings=False): """ Load a model stored in Python function format. - :param path: Path to the model. - :param run_id: MLflow run ID. - :param suppress_warnings: If True, non-fatal warning messages associated with the model - loading process will be suppressed. If False, these warning messages - will be emitted. + :param model_uri: The location, in URI format, of the MLflow model. For example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. + + :param suppress_warnings: If ``True``, non-fatal warning messages associated with the model + loading process will be suppressed. If ``False``, these warning + messages will be emitted. """ - if run_id: - path = tracking.utils._get_model_log_dir(path, run_id) - conf = _load_model_conf(path) + local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) + conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) model_py_version = conf.get(PY_VERSION) if not suppress_warnings: _warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version) if CODE in conf and conf[CODE]: - code_path = os.path.join(path, conf[CODE]) - sys.path = [code_path] + _get_code_dirs(code_path) + sys.path - data_path = os.path.join(path, conf[DATA]) if (DATA in conf) else path + code_path = os.path.join(local_model_path, conf[CODE]) + mlflow.pyfunc.utils._add_code_to_system_path(code_path=code_path) + data_path = os.path.join(local_model_path, conf[DATA]) if (DATA in conf) else local_model_path return importlib.import_module(conf[MAIN])._load_pyfunc(data_path) -def _warn_potentially_incompatible_py_version_if_necessary(model_py_version): +def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None): + """ + Compares the version of Python that was used to save a given model with the version + of Python that is currently running. If a major or minor version difference is detected, + logs an appropriate warning. + """ if model_py_version is None: - eprint("The specified model does not have a specified Python version. It may be" - " incompatible with the version of Python that is currently running:" - " Python {version}".format( - version=PYTHON_VERSION)) + _logger.warning( + "The specified model does not have a specified Python version. It may be" + " incompatible with the version of Python that is currently running: Python %s", + PYTHON_VERSION) elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION): - eprint("The version of Python that the model was saved in, Python {model_version}, differs" - " from the version of Python that is currently running, Python {system_version}," - " and may be incompatible".format( - model_version=model_py_version, system_version=PYTHON_VERSION)) - - -def _get_code_dirs(src_code_path, dst_code_path=None): - if not dst_code_path: - dst_code_path = src_code_path - return [(os.path.join(dst_code_path, x)) - for x in os.listdir(src_code_path) if not x.endswith(".py") and not - x.endswith(".pyc") and not x == "__pycache__"] + _logger.warning( + "The version of Python that the model was saved in, `Python %s`, differs" + " from the version of Python that is currently running, `Python %s`," + " and may be incompatible", + model_py_version, PYTHON_VERSION) -def spark_udf(spark, path, run_id=None, result_type="double"): +def spark_udf(spark, model_uri, result_type="double"): """ A Spark UDF that can be used to invoke the Python function formatted model. Parameters passed to the UDF are forwarded to the model as a DataFrame where the names are ordinals (0, 1, ...). + The predictions are filtered to contain only the columns that can be represented as the + ``result_type``. If the ``result_type`` is string or array of strings, all predictions are + converted to string. If the result type is not an array type, the left most column with + matching type is returned. + >>> predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model") >>> df.withColumn("prediction", predict("name", "age")).show() :param spark: A SparkSession object. - :param path: A path containing a :py:mod:`mlflow.pyfunc` model. - :param run_id: ID of the run that produced this model. If provided, ``run_id`` is used to - retrieve the model logged with MLflow. - :return: Spark UDF type returned by the model's prediction method. Default double. + :param model_uri: The location, in URI format, of the MLflow model with the + :py:mod:`mlflow.pyfunc` flavor. For example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. + + :param result_type: the return type of the user-defined function. The value can be either a + :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. Only a primitive + type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed. + The following classes of result type are supported: + + - "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an + ``int32`` or an exception if there is none. + + - "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an + ``int64`` or an exception if there is none. + + - ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested + size. + + - "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to + ``float32`` or an exception if there is none. + + - "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to + ``double`` or an exception if there is none. + + - ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or + an exception if there are no numeric columns. + + - "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``. + + - ``ArrayType(StringType)``: All columns converted to ``string``. + + :return: Spark UDF that applies the model's ``predict`` method to the data and returns a + type specified by ``result_type``, which by default is a double. """ # Scope Spark import to this method so users don't need pyspark to use non-Spark-related # functionality. from mlflow.pyfunc.spark_model_cache import SparkModelCache from pyspark.sql.functions import pandas_udf + from pyspark.sql.types import _parse_datatype_string + from pyspark.sql.types import ArrayType, DataType + from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType + + if not isinstance(result_type, DataType): + result_type = _parse_datatype_string(result_type) - if run_id: - path = tracking.utils._get_model_log_dir(path, run_id) + elem_type = result_type + if isinstance(elem_type, ArrayType): + elem_type = elem_type.elementType - archive_path = SparkModelCache.add_local_model(spark, path) + supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType] + + if not any([isinstance(elem_type, x) for x in supported_types]): + raise MlflowException( + message="Invalid result_type '{}'. Result type can only be one of or an array of one " + "of the following types types: {}".format(str(elem_type), str(supported_types)), + error_code=INVALID_PARAMETER_VALUE) + + local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) + archive_path = SparkModelCache.add_local_model(spark, local_model_path) def predict(*args): model = SparkModelCache.get_or_load(archive_path) @@ -224,100 +424,289 @@ def predict(*args): columns = [str(i) for i, _ in enumerate(args)] pdf = pandas.DataFrame(schema, columns=columns) result = model.predict(pdf) - return pandas.Series(result) + if not isinstance(result, pandas.DataFrame): + result = pandas.DataFrame(data=result) + + elif type(elem_type) == IntegerType: + result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, + np.int32]).astype(np.int32) + + elif type(elem_type) == LongType: + result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, np.int, np.long]) + + elif type(elem_type) == FloatType: + result = result.select_dtypes(include=(np.number,)).astype(np.float32) + + elif type(elem_type) == DoubleType: + result = result.select_dtypes(include=(np.number,)).astype(np.float64) + + if len(result.columns) == 0: + raise MlflowException( + message="The the model did not produce any values compatible with the requested " + "type '{}'. Consider requesting udf with StringType or " + "Arraytype(StringType).".format(str(elem_type)), + error_code=INVALID_PARAMETER_VALUE) + + if type(elem_type) == StringType: + result = result.applymap(str) + + if type(result_type) == ArrayType: + return pandas.Series([row[1].values for row in result.iterrows()]) + else: + return result[result.columns[0]] return pandas_udf(predict, result_type) -def save_model(dst_path, loader_module, data_path=None, code_path=(), conda_env=None, - model=Model()): +def save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None, + mlflow_model=Model(), python_model=None, artifacts=None, **kwargs): """ - Export model as a generic Python function model. + save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\ + mlflow_model=Model(), python_model=None, artifacts=None) + + Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the + local filesystem. + + For information about the workflows that this method supports, please see :ref:`"workflows for + creating custom pyfunc models" ` and + :ref:`"which workflow is right for my use case?" `. + Note that the parameters for the first workflow: ``loader_module``, ``data_path`` and the + parameters for the second workflow: ``python_model``, ``artifacts``, cannot be + specified together. + + :param path: The path to which to save the Python model. + :param loader_module: The name of the Python module that is used to load the model + from ``data_path``. This module must define a method with the prototype + ``_load_pyfunc(data_path)``. If not ``None``, this module and its + dependencies must be included in one of the following locations: + + - The MLflow library. + - Package(s) listed in the model's Conda environment, specified by + the ``conda_env`` parameter. + - One or more of the files specified by the ``code_path`` parameter. - :param dst_path: Path where the model is stored. - :param loader_module: The module to be used to load the model. :param data_path: Path to a file or directory containing model data. - :param code_path: List of paths (file or dir) contains code dependencies not present in - the environment. Every path in the ``code_path`` is added to the Python + :param code_path: A list of local filesystem paths to Python file dependencies (or directories + containing file dependencies). These files are *prepended* to the system path before the model is loaded. - :param conda_env: Path to the Conda environment definition. This environment is activated - prior to running model code. - :return: Model configuration containing model info. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. This decribes the environment this model should + be run in. If ``python_model`` is not ``None``, the Conda environment must + at least specify the dependencies contained in + :func:`get_default_conda_env()`. If ``None``, the default + :func:`get_default_conda_env()` environment is added to the + model. The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'cloudpickle==0.5.8' + ] + } + :param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the + **python_function** flavor. + :param python_model: An instance of a subclass of :class:`~PythonModel`. This class is + serialized using the CloudPickle library. Any dependencies of the class + should be included in one of the following locations: + + - The MLflow library. + - Package(s) listed in the model's Conda environment, specified by + the ``conda_env`` parameter. + - One or more of the files specified by the ``code_path`` parameter. + + Note: If the class is imported from another module, as opposed to being + defined in the ``__main__`` scope, the defining module should also be + included in one of the listed locations. + :param artifacts: A dictionary containing ```` entries. Remote artifact URIs + are resolved to absolute filesystem paths, producing a dictionary of + ```` entries. ``python_model`` can reference these + resolved entries as the ``artifacts`` property of the ``context`` parameter + in :func:`PythonModel.load_context() ` + and :func:`PythonModel.predict() `. + For example, consider the following ``artifacts`` dictionary:: + + { + "my_file": "s3://my-bucket/path/to/my/file" + } + + In this case, the ``"my_file"`` artifact is downloaded from S3. The + ``python_model`` can then refer to ``"my_file"`` as an absolute filesystem + path via ``context.artifacts["my_file"]``. + + If ``None``, no artifacts are added to the model. + """ + mlflow_model = kwargs.pop('model', mlflow_model) + if len(kwargs) > 0: + raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs)) + first_argument_set = { + "loader_module": loader_module, + "data_path": data_path, + } + second_argument_set = { + "artifacts": artifacts, + "python_model": python_model, + } + first_argument_set_specified = any([item is not None for item in first_argument_set.values()]) + second_argument_set_specified = any([item is not None for item in second_argument_set.values()]) + if first_argument_set_specified and second_argument_set_specified: + raise MlflowException( + message=( + "The following sets of parameters cannot be specified together: {first_set_keys}" + " and {second_set_keys}. All parameters in one set must be `None`. Instead, found" + " the following values: {first_set_entries} and {second_set_entries}".format( + first_set_keys=first_argument_set.keys(), + second_set_keys=second_argument_set.keys(), + first_set_entries=first_argument_set, + second_set_entries=second_argument_set)), + error_code=INVALID_PARAMETER_VALUE) + elif (loader_module is None) and (python_model is None): + raise MlflowException( + message="Either `loader_module` or `python_model` must be specified!", + error_code=INVALID_PARAMETER_VALUE) + + if first_argument_set_specified: + return _save_model_with_loader_module_and_data_path( + path=path, loader_module=loader_module, data_path=data_path, + code_paths=code_path, conda_env=conda_env, mlflow_model=mlflow_model) + elif second_argument_set_specified: + return mlflow.pyfunc.model._save_model_with_class_artifacts_params( + path=path, python_model=python_model, artifacts=artifacts, conda_env=conda_env, + code_paths=code_path, mlflow_model=mlflow_model) + + +def log_model(artifact_path, loader_module=None, data_path=None, code_path=None, conda_env=None, + python_model=None, artifacts=None): + """ + Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow + artifact for the current run. + + For information about the workflows that this method supports, see :ref:`Workflows for + creating custom pyfunc models ` and + :ref:`Which workflow is right for my use case? `. + You cannot specify the parameters for the first workflow: ``loader_module``, ``data_path`` + and the parameters for the second workflow: ``python_model``, ``artifacts`` together. + + :param artifact_path: The run-relative artifact path to which to log the Python model. + :param loader_module: The name of the Python module that is used to load the model + from ``data_path``. This module must define a method with the prototype + ``_load_pyfunc(data_path)``. If not ``None``, this module and its + dependencies must be included in one of the following locations: + + - The MLflow library. + - Package(s) listed in the model's Conda environment, specified by + the ``conda_env`` parameter. + - One or more of the files specified by the ``code_path`` parameter. + :param data_path: Path to a file or directory containing model data. + :param code_path: A list of local filesystem paths to Python file dependencies (or directories + containing file dependencies). These files are *prepended* to the system + path before the model is loaded. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. This decribes the environment this model should + be run in. If ``python_model`` is not ``None``, the Conda environment must + at least specify the dependencies contained in + :func:`get_default_conda_env()`. If `None`, the default + :func:`get_default_conda_env()` environment is added to the + model. The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'cloudpickle==0.5.8' + ] + } + + :param python_model: An instance of a subclass of :class:`~PythonModel`. This class is + serialized using the CloudPickle library. Any dependencies of the class + should be included in one of the following locations: + + - The MLflow library. + - Package(s) listed in the model's Conda environment, specified by + the ``conda_env`` parameter. + - One or more of the files specified by the ``code_path`` parameter. + + Note: If the class is imported from another module, as opposed to being + defined in the ``__main__`` scope, the defining module should also be + included in one of the listed locations. + :param artifacts: A dictionary containing ```` entries. Remote artifact URIs + are resolved to absolute filesystem paths, producing a dictionary of + ```` entries. ``python_model`` can reference these + resolved entries as the ``artifacts`` property of the ``context`` parameter + in :func:`PythonModel.load_context() ` + and :func:`PythonModel.predict() `. + For example, consider the following ``artifacts`` dictionary:: + + { + "my_file": "s3://my-bucket/path/to/my/file" + } + + In this case, the ``"my_file"`` artifact is downloaded from S3. The + ``python_model`` can then refer to ``"my_file"`` as an absolute filesystem + path via ``context.artifacts["my_file"]``. + + If ``None``, no artifacts are added to the model. + """ + return Model.log(artifact_path=artifact_path, + flavor=mlflow.pyfunc, + loader_module=loader_module, + data_path=data_path, + code_path=code_path, + python_model=python_model, + artifacts=artifacts, + conda_env=conda_env) + + +def _save_model_with_loader_module_and_data_path(path, loader_module, data_path=None, + code_paths=None, conda_env=None, + mlflow_model=Model()): """ - if os.path.exists(dst_path): - raise Exception("Path '{}' already exists".format(dst_path)) - os.makedirs(dst_path) + Export model as a generic Python function model. + :param path: The path to which to save the Python model. + :param loader_module: The name of the Python module that is used to load the model + from ``data_path``. This module must define a method with the prototype + ``_load_pyfunc(data_path)``. + :param data_path: Path to a file or directory containing model data. + :param code_paths: A list of local filesystem paths to Python file dependencies (or directories + containing file dependencies). These files are *prepended* to the system + path before the model is loaded. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. + :return: Model configuration containing model info. + """ + if os.path.exists(path): + raise MlflowException( + message="Path '{}' already exists".format(path), + error_code=RESOURCE_ALREADY_EXISTS) + os.makedirs(path) + code = None data = None env = None - if data_path: - model_file = _copy_file_or_tree(src=data_path, dst=dst_path, dst_dir="data") + if data_path is not None: + model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data") data = model_file - if code_path: - for path in code_path: - _copy_file_or_tree(src=path, dst=dst_path, dst_dir="code") + if code_paths is not None: + for code_path in code_paths: + _copy_file_or_tree(src=code_path, dst=path, dst_dir="code") code = "code" - if conda_env: - shutil.copy(src=conda_env, dst=os.path.join(dst_path, "mlflow_env.yml")) + if conda_env is not None: + shutil.copy(src=conda_env, dst=os.path.join(path, "mlflow_env.yml")) env = "mlflow_env.yml" - add_to_model(model, loader_module=loader_module, code=code, data=data, env=env) - model.save(os.path.join(dst_path, 'MLmodel')) - return model - - -def log_model(artifact_path, **kwargs): - """ - Export model in Python function form and log it with current MLflow tracking service. - - Model is exported by calling :py:meth:`save_model` and logging the result with - :py:meth:`mlflow.tracking.log_artifacts`. - """ - with TempDir() as tmp: - local_path = tmp.path(artifact_path) - run_id = active_run().info.run_uuid - if 'model' in kwargs: - raise Exception("Unused argument 'model'. log_model creates a new model object") - - save_model(dst_path=local_path, model=Model(artifact_path=artifact_path, run_id=run_id), - **kwargs) - log_artifacts(local_path, artifact_path) - - -def get_module_loader_src(src_path, dst_path): - """ - Generate Python source of the model loader. - - Model loader contains ``load_pyfunc`` method with no parameters. It hardcodes model - loading of the given model into a Python source. This is done so that the exported model has no - unnecessary dependencies on MLflow or any other configuration file format or parsing library. - - :param src_path: Current path to the model. - :param dst_path: Relative or absolute path where the model will be stored in the deployment - environment. - :return: Python source code of the model loader as string. - - """ - conf_path = os.path.join(src_path, "MLmodel") - model = Model.load(conf_path) - if FLAVOR_NAME not in model.flavors: - raise Exception("Format '{format}' not found not in {path}.".format(format=FLAVOR_NAME, - path=conf_path)) - conf = model.flavors[FLAVOR_NAME] - update_path = "" - if CODE in conf and conf[CODE]: - src_code_path = os.path.join(src_path, conf[CODE]) - dst_code_path = os.path.join(dst_path, conf[CODE]) - code_path = ["os.path.abspath('%s')" % x - for x in [dst_code_path] + _get_code_dirs(src_code_path, dst_code_path)] - update_path = "sys.path = {} + sys.path; ".format("[%s]" % ",".join(code_path)) - - data_path = os.path.join(dst_path, conf[DATA]) if (DATA in conf) else dst_path - return loader_template.format(update_path=update_path, main=conf[MAIN], data_path=data_path) + mlflow.pyfunc.add_to_model( + mlflow_model, loader_module=loader_module, code=code, data=data, env=env) + mlflow_model.save(os.path.join(path, 'MLmodel')) + return mlflow_model loader_template = """ diff --git a/mlflow/pyfunc/backend.py b/mlflow/pyfunc/backend.py new file mode 100644 index 0000000000000..ce8700a72f545 --- /dev/null +++ b/mlflow/pyfunc/backend.py @@ -0,0 +1,148 @@ +import logging +import os + +import subprocess + +from mlflow.models import FlavorBackend +from mlflow.models.docker_utils import _build_image, DISABLE_ENV_CREATION +from mlflow.pyfunc import ENV +from mlflow.pyfunc import scoring_server + +from mlflow.projects import _get_or_create_conda_env, _get_conda_bin_executable +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.file_utils import path_to_local_file_uri +from mlflow.version import VERSION + +_logger = logging.getLogger(__name__) + + +class PyFuncBackend(FlavorBackend): + """ + Flavor backend implementation for the generic python models. + """ + + def __init__(self, config, workers=1, no_conda=False, install_mlflow=False, **kwargs): + super(PyFuncBackend, self).__init__(config=config, **kwargs) + self._nworkers = workers or 1 + self._no_conda = no_conda + self._install_mlflow = install_mlflow + + def predict(self, model_uri, input_path, output_path, content_type, json_format, ): + """ + Generate predictions using generic python model saved with MLflow. + Return the prediction results as a JSON. + """ + local_path = _download_artifact_from_uri(model_uri) + # NB: Absolute windows paths do not work with mlflow apis, use file uri to ensure + # platform compatibility. + local_uri = path_to_local_file_uri(local_path) + if not self._no_conda and ENV in self._config: + conda_env_path = os.path.join(local_path, self._config[ENV]) + command = ('python -c "from mlflow.pyfunc.scoring_server import _predict; _predict(' + 'model_uri={model_uri}, ' + 'input_path={input_path}, ' + 'output_path={output_path}, ' + 'content_type={content_type}, ' + 'json_format={json_format})"' + ).format( + model_uri=repr(local_uri), + input_path=repr(input_path), + output_path=repr(output_path), + content_type=repr(content_type), + json_format=repr(json_format)) + return _execute_in_conda_env(conda_env_path, command, self._install_mlflow) + else: + scoring_server._predict(local_uri, input_path, output_path, content_type, + json_format) + + def serve(self, model_uri, port, host): + """ + Serve pyfunc model locally. + """ + local_path = _download_artifact_from_uri(model_uri) + # NB: Absolute windows paths do not work with mlflow apis, use file uri to ensure + # platform compatibility. + local_uri = path_to_local_file_uri(local_path) + command = ("gunicorn --timeout 60 -b {host}:{port} -w {nworkers} " + "mlflow.pyfunc.scoring_server.wsgi:app").format( + host=host, + port=port, + nworkers=self._nworkers) + command_env = os.environ.copy() + command_env[scoring_server._SERVER_MODEL_PATH] = local_uri + if not self._no_conda and ENV in self._config: + conda_env_path = os.path.join(local_path, self._config[ENV]) + return _execute_in_conda_env(conda_env_path, command, self._install_mlflow, + command_env=command_env) + else: + _logger.info("=== Running command '%s'", command) + subprocess.Popen(command.split(" "), env=command_env).wait() + + def can_score_model(self): + if self._no_conda: + # noconda => already in python and dependencies are assumed to be installed. + return True + conda_path = _get_conda_bin_executable("conda") + try: + p = subprocess.Popen([conda_path, "--version"], stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + _, _ = p.communicate() + return p.wait() == 0 + except FileNotFoundError: + # Can not find conda + return False + + def build_image(self, model_uri, image_name, install_mlflow=False, mlflow_home=None): + + def copy_model_into_container(dockerfile_context_dir): + model_cwd = os.path.join(dockerfile_context_dir, "model_dir") + os.mkdir(model_cwd) + model_path = _download_artifact_from_uri(model_uri, output_path=model_cwd) + return """ + COPY {model_dir} /opt/ml/model + RUN python -c \ + 'from mlflow.models.container import _install_pyfunc_deps;\ + _install_pyfunc_deps("/opt/ml/model", install_mlflow={install_mlflow})' + ENV {disable_env}="true" + """.format( + disable_env=DISABLE_ENV_CREATION, + model_dir=os.path.join("model_dir", os.path.basename(model_path)), + install_mlflow=repr(install_mlflow) + ) + + # The pyfunc image runs the same server as the Sagemaker image + pyfunc_entrypoint = ( + 'ENTRYPOINT ["python", "-c", "from mlflow.models import container as C; C._serve()"]' + ) + _build_image( + image_name=image_name, + mlflow_home=mlflow_home, + custom_setup_steps_hook=copy_model_into_container, + entrypoint=pyfunc_entrypoint, + ) + + +def _execute_in_conda_env(conda_env_path, command, install_mlflow, command_env=None): + if command_env is None: + command_env = os.environ + env_id = os.environ.get("MLFLOW_HOME", VERSION) if install_mlflow else None + conda_env_name = _get_or_create_conda_env(conda_env_path, env_id=env_id) + activate_path = _get_conda_bin_executable("activate") + activate_conda_env = ["source {0} {1} 1>&2".format(activate_path, conda_env_name)] + + if install_mlflow: + if "MLFLOW_HOME" in os.environ: # dev version + install_mlflow = "pip install -e {} 1>&2".format(os.environ["MLFLOW_HOME"]) + else: + install_mlflow = "pip install mlflow=={} 1>&2".format(VERSION) + + activate_conda_env += [install_mlflow] + + command = " && ".join(activate_conda_env + [command]) + _logger.info("=== Running command '%s'", command) + child = subprocess.Popen(["bash", "-c", command], close_fds=True, env=command_env) + rc = child.wait() + if rc != 0: + raise Exception("Command '{0}' returned non zero return code. Return code = {1}".format( + command, rc + )) diff --git a/mlflow/pyfunc/cli.py b/mlflow/pyfunc/cli.py deleted file mode 100644 index c29d68fa56c0d..0000000000000 --- a/mlflow/pyfunc/cli.py +++ /dev/null @@ -1,100 +0,0 @@ -from __future__ import absolute_import - -import os -from six.moves import shlex_quote -import subprocess -import sys - - -import click -import pandas - -from mlflow.pyfunc import load_pyfunc, scoring_server, _load_model_env -from mlflow.tracking.utils import _get_model_log_dir -from mlflow.utils import cli_args -from mlflow.utils.logging_utils import eprint -from mlflow.projects import _get_conda_bin_executable, _get_or_create_conda_env - - -def _rerun_in_conda(conda_env_path): - """ Rerun CLI command inside a to-be-created conda environment.""" - conda_env_name = _get_or_create_conda_env(conda_env_path) - activate_path = _get_conda_bin_executable("activate") - commands = [] - commands.append("source {} {}".format(activate_path, conda_env_name)) - safe_argv = [shlex_quote(arg) for arg in sys.argv] - commands.append(" ".join(safe_argv) + " --no-conda") - commandline = " && ".join(commands) - eprint("=== Running command '{}'".format(commandline)) - child = subprocess.Popen(["bash", "-c", commandline], close_fds=True) - exit_code = child.wait() - return exit_code - - -@click.group("pyfunc") -def commands(): - """ - Serve Python models locally. - - To serve a model associated with a run on a tracking server, set the MLFLOW_TRACKING_URI - environment variable to the URL of the desired server. - """ - pass - - -@commands.command("serve") -@cli_args.MODEL_PATH -@cli_args.RUN_ID -@click.option("--port", "-p", default=5000, help="Server port. [default: 5000]") -@click.option("--host", "-h", default="127.0.0.1", help="Server host. [default: 127.0.0.1]") -@cli_args.NO_CONDA -def serve(model_path, run_id, port, host, no_conda): - """ - Serve a PythonFunction model saved with MLflow. - - If a ``run_id`` is specified, ``model-path`` is treated as an artifact path within that run; - otherwise it is treated as a local path. - """ - if run_id: - model_path = _get_model_log_dir(model_path, run_id) - - model_env_file = _load_model_env(model_path) - if not no_conda and model_env_file is not None: - conda_env_path = os.path.join(model_path, model_env_file) - return _rerun_in_conda(conda_env_path) - - app = scoring_server.init(load_pyfunc(model_path)) - app.run(port=port, host=host) - - -@commands.command("predict") -@cli_args.MODEL_PATH -@cli_args.RUN_ID -@click.option("--input-path", "-i", help="CSV containing pandas DataFrame to predict against.", - required=True) -@click.option("--output-path", "-o", help="File to output results to as CSV file." + - " If not provided, output to stdout.") -@cli_args.NO_CONDA -def predict(model_path, run_id, input_path, output_path, no_conda): - """ - Load a pandas DataFrame and runs a python_function model saved with MLflow against it. - Return the prediction results as a CSV-formatted pandas DataFrame. - - If a ``run-id`` is specified, ``model-path`` is treated as an artifact path within that run; - otherwise it is treated as a local path. - """ - if run_id: - model_path = _get_model_log_dir(model_path, run_id) - - model_env_file = _load_model_env(model_path) - if not no_conda and model_env_file is not None: - conda_env_path = os.path.join(model_path, model_env_file) - return _rerun_in_conda(conda_env_path) - - model = load_pyfunc(model_path) - df = pandas.read_csv(input_path) - result = model.predict(df) - out_stream = sys.stdout - if output_path: - out_stream = open(output_path, 'w') - pandas.DataFrame(data=result).to_csv(out_stream, header=False, index=False) diff --git a/mlflow/pyfunc/model.py b/mlflow/pyfunc/model.py new file mode 100644 index 0000000000000..b65d74957f65e --- /dev/null +++ b/mlflow/pyfunc/model.py @@ -0,0 +1,243 @@ +""" +The ``mlflow.pyfunc.model`` module defines logic for saving and loading custom "python_function" +models with a user-defined ``PythonModel`` subclass. +""" + +import os +import shutil +import yaml +from abc import ABCMeta, abstractmethod + +import cloudpickle + +import mlflow.pyfunc +import mlflow.utils +from mlflow.exceptions import MlflowException +from mlflow.models import Model +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration +from mlflow.utils.file_utils import TempDir, _copy_file_or_tree + +CONFIG_KEY_ARTIFACTS = "artifacts" +CONFIG_KEY_ARTIFACT_RELATIVE_PATH = "path" +CONFIG_KEY_ARTIFACT_URI = "uri" +CONFIG_KEY_PYTHON_MODEL = "python_model" +CONFIG_KEY_CLOUDPICKLE_VERSION = "cloudpickle_version" + + +def get_default_conda_env(): + """ + :return: The default Conda environment for MLflow Models produced by calls to + :func:`save_model() ` + and :func:`log_model() ` when a user-defined subclass of + :class:`PythonModel` is provided. + """ + return _mlflow_conda_env( + additional_conda_deps=None, + additional_pip_deps=[ + "cloudpickle=={}".format(cloudpickle.__version__), + ], + additional_conda_channels=None) + + +class PythonModel(object): + """ + Represents a generic Python model that evaluates inputs and produces API-compatible outputs. + By subclassing :class:`~PythonModel`, users can create customized MLflow models with the + "python_function" ("pyfunc") flavor, leveraging custom inference logic and artifact + dependencies. + """ + __metaclass__ = ABCMeta + + def load_context(self, context): + """ + Loads artifacts from the specified :class:`~PythonModelContext` that can be used by + :func:`~PythonModel.predict` when evaluating inputs. When loading an MLflow model with + :func:`~load_pyfunc`, this method is called as soon as the :class:`~PythonModel` is + constructed. + + The same :class:`~PythonModelContext` will also be available during calls to + :func:`~PythonModel.predict`, but it may be more efficient to override this method + and load artifacts from the context at model load time. + + :param context: A :class:`~PythonModelContext` instance containing artifacts that the model + can use to perform inference. + """ + + @abstractmethod + def predict(self, context, model_input): + """ + Evaluates a pyfunc-compatible input and produces a pyfunc-compatible output. + For more information about the pyfunc input/output API, see the :ref:`pyfunc-inference-api`. + + :param context: A :class:`~PythonModelContext` instance containing artifacts that the model + can use to perform inference. + :param model_input: A pyfunc-compatible input for the model to evaluate. + """ + + +class PythonModelContext(object): + """ + A collection of artifacts that a :class:`~PythonModel` can use when performing inference. + :class:`~PythonModelContext` objects are created *implicitly* by the + :func:`save_model() ` and + :func:`log_model() ` persistence methods, using the contents specified + by the ``artifacts`` parameter of these methods. + """ + + def __init__(self, artifacts): + """ + :param artifacts: A dictionary of ```` entries, where ``artifact_path`` + is an absolute filesystem path to a given artifact. + """ + self._artifacts = artifacts + + @property + def artifacts(self): + """ + :return: A dictionary containing ```` entries, where ``artifact_path`` + is an absolute filesystem path to the artifact. + """ + return self._artifacts + + +def _save_model_with_class_artifacts_params(path, python_model, artifacts=None, conda_env=None, + code_paths=None, mlflow_model=Model()): + """ + :param path: The path to which to save the Python model. + :param python_model: An instance of a subclass of :class:`~PythonModel`. ``python_model`` + defines how the model loads artifacts and how it performs inference. + :param artifacts: A dictionary containing ```` entries. + Remote artifact URIs + are resolved to absolute filesystem paths, producing a dictionary of + ```` entries. ``python_model`` can reference these + resolved entries as the ``artifacts`` property of the ``context`` + attribute. If ``None``, no artifacts are added to the model. + :param conda_env: Either a dictionary representation of a Conda environment or the + path to a Conda environment yaml file. If provided, this decribes the + environment this model should be run in. At minimum, it should specify + the dependencies + contained in :func:`get_default_conda_env()`. If ``None``, the default + :func:`get_default_conda_env()` environment is added to the model. + :param code_paths: A list of local filesystem paths to Python file dependencies (or directories + containing file dependencies). These files are *prepended* to the system + path before the model is loaded. + :param mlflow_model: The model configuration to which to add the ``mlflow.pyfunc`` flavor. + """ + if os.path.exists(path): + raise MlflowException( + message="Path '{}' already exists".format(path), + error_code=RESOURCE_ALREADY_EXISTS) + os.makedirs(path) + + custom_model_config_kwargs = { + CONFIG_KEY_CLOUDPICKLE_VERSION: cloudpickle.__version__, + } + if isinstance(python_model, PythonModel): + saved_python_model_subpath = "python_model.pkl" + with open(os.path.join(path, saved_python_model_subpath), "wb") as out: + cloudpickle.dump(python_model, out) + custom_model_config_kwargs[CONFIG_KEY_PYTHON_MODEL] = saved_python_model_subpath + else: + raise MlflowException( + message=("`python_model` must be a subclass of `PythonModel`. Instead, found an" + " object of type: {python_model_type}".format( + python_model_type=type(python_model))), + error_code=INVALID_PARAMETER_VALUE) + + if artifacts: + saved_artifacts_config = {} + with TempDir() as tmp_artifacts_dir: + tmp_artifacts_config = {} + saved_artifacts_dir_subpath = "artifacts" + for artifact_name, artifact_uri in artifacts.items(): + tmp_artifact_path = _download_artifact_from_uri( + artifact_uri=artifact_uri, output_path=tmp_artifacts_dir.path()) + tmp_artifacts_config[artifact_name] = tmp_artifact_path + saved_artifact_subpath = os.path.join( + saved_artifacts_dir_subpath, + os.path.relpath(path=tmp_artifact_path, start=tmp_artifacts_dir.path())) + saved_artifacts_config[artifact_name] = { + CONFIG_KEY_ARTIFACT_RELATIVE_PATH: saved_artifact_subpath, + CONFIG_KEY_ARTIFACT_URI: artifact_uri, + } + + shutil.move(tmp_artifacts_dir.path(), os.path.join(path, saved_artifacts_dir_subpath)) + custom_model_config_kwargs[CONFIG_KEY_ARTIFACTS] = saved_artifacts_config + + conda_env_subpath = "conda.yaml" + if conda_env is None: + conda_env = get_default_conda_env() + elif not isinstance(conda_env, dict): + with open(conda_env, "r") as f: + conda_env = yaml.safe_load(f) + with open(os.path.join(path, conda_env_subpath), "w") as f: + yaml.safe_dump(conda_env, stream=f, default_flow_style=False) + + saved_code_subpath = None + if code_paths is not None: + saved_code_subpath = "code" + for code_path in code_paths: + _copy_file_or_tree(src=code_path, dst=path, dst_dir=saved_code_subpath) + + mlflow.pyfunc.add_to_model(model=mlflow_model, loader_module=__name__, code=saved_code_subpath, + env=conda_env_subpath, **custom_model_config_kwargs) + mlflow_model.save(os.path.join(path, 'MLmodel')) + + +def _load_pyfunc(model_path): + pyfunc_config = _get_flavor_configuration( + model_path=model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME) + + python_model_cloudpickle_version = pyfunc_config.get(CONFIG_KEY_CLOUDPICKLE_VERSION, None) + if python_model_cloudpickle_version is None: + mlflow.pyfunc._logger.warning( + "The version of CloudPickle used to save the model could not be found in the MLmodel" + " configuration") + elif python_model_cloudpickle_version != cloudpickle.__version__: + # CloudPickle does not have a well-defined cross-version compatibility policy. Micro version + # releases have been known to cause incompatibilities. Therefore, we match on the full + # library version + mlflow.pyfunc._logger.warning( + "The version of CloudPickle that was used to save the model, `CloudPickle %s`, differs" + " from the version of CloudPickle that is currently running, `CloudPickle %s`, and may" + " be incompatible", + python_model_cloudpickle_version, cloudpickle.__version__) + + python_model_subpath = pyfunc_config.get(CONFIG_KEY_PYTHON_MODEL, None) + if python_model_subpath is None: + raise MlflowException( + "Python model path was not specified in the model configuration") + with open(os.path.join(model_path, python_model_subpath), "rb") as f: + python_model = cloudpickle.load(f) + + artifacts = {} + for saved_artifact_name, saved_artifact_info in\ + pyfunc_config.get(CONFIG_KEY_ARTIFACTS, {}).items(): + artifacts[saved_artifact_name] = os.path.join( + model_path, saved_artifact_info[CONFIG_KEY_ARTIFACT_RELATIVE_PATH]) + + context = PythonModelContext(artifacts=artifacts) + python_model.load_context(context=context) + return _PythonModelPyfuncWrapper(python_model=python_model, context=context) + + +class _PythonModelPyfuncWrapper(object): + """ + Wrapper class that creates a predict function such that + predict(model_input: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame) + """ + + def __init__(self, python_model, context): + """ + :param python_model: An instance of a subclass of :class:`~PythonModel`. + :param context: A :class:`~PythonModelContext` instance containing artifacts that + ``python_model`` may use when performing inference. + """ + self.python_model = python_model + self.context = context + + def predict(self, model_input): + return self.python_model.predict(self.context, model_input) diff --git a/mlflow/pyfunc/scoring_server.py b/mlflow/pyfunc/scoring_server.py deleted file mode 100644 index 3e9a3352d989c..0000000000000 --- a/mlflow/pyfunc/scoring_server.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -Scoring server for python model format. -The passed int model is expected to have function: - predict(pandas.Dataframe) -> pandas.DataFrame - -Input, expected intext/csv or application/json format, -is parsed into pandas.DataFrame and passed to the model. - -Defines two endpoints: - /ping used for health check - /invocations used for scoring -""" -from __future__ import print_function - -import json - -import pandas as pd -import flask -from mlflow.utils.rest_utils import NumpyEncoder - -try: - from StringIO import StringIO -except ImportError: - from io import StringIO - -from mlflow.utils import get_jsonable_obj - - -def init(model): - """ - Initialize the server. Loads pyfunc model from the path. - """ - app = flask.Flask(__name__) - - @app.route('/ping', methods=['GET']) - def ping(): # pylint: disable=unused-variable - """ - Determine if the container is working and healthy. - We declare it healthy if we can load the model successfully. - """ - health = model is not None - status = 200 if health else 404 - return flask.Response(response='\n', status=status, mimetype='application/json') - - @app.route('/invocations', methods=['POST']) - def transformation(): # pylint: disable=unused-variable - """ - Do an inference on a single batch of data. In this sample server, - we take data as CSV or json, convert it to a pandas data frame, - generate predictions and convert them back to CSV. - """ - # Convert from CSV to pandas - if flask.request.content_type == 'text/csv': - data = flask.request.data.decode('utf-8') - s = StringIO(data) - data = pd.read_csv(s) - elif flask.request.content_type == 'application/json': - data = flask.request.data.decode('utf-8') - s = StringIO(data) - data = pd.read_json(s, orient="records") - else: - return flask.Response( - response='This predictor only supports CSV or JSON data, got %s' % str( - flask.request.content_type), status=415, mimetype='text/plain') - - # Do the prediction - predictions = get_jsonable_obj(model.predict(data)) - result = json.dumps(predictions, cls=NumpyEncoder) - return flask.Response(response=result, status=200, mimetype='application/json') - - return app diff --git a/mlflow/pyfunc/scoring_server/__init__.py b/mlflow/pyfunc/scoring_server/__init__.py new file mode 100644 index 0000000000000..05d2e6ecac71b --- /dev/null +++ b/mlflow/pyfunc/scoring_server/__init__.py @@ -0,0 +1,264 @@ +""" +Scoring server for python model format. +The passed int model is expected to have function: + predict(pandas.Dataframe) -> pandas.DataFrame + +Input, expected intext/csv or application/json format, +is parsed into pandas.DataFrame and passed to the model. + +Defines two endpoints: + /ping used for health check + /invocations used for scoring +""" +from __future__ import print_function + +from collections import OrderedDict +import flask +import json +from json import JSONEncoder +import logging +import numpy as np +import pandas as pd +from six import reraise +import sys +import traceback + +# NB: We need to be careful what we import form mlflow here. Scoring server is used from within +# model's conda environment. The version of mlflow doing the serving (outside) and the version of +# mlflow in the model's conda environment (inside) can differ. We should therefore keep mlflow +# dependencies to the minimum here. +# ALl of the mlfow dependencies below need to be backwards compatible. +from mlflow.exceptions import MlflowException + +try: + from mlflow.pyfunc import load_model +except ImportError: + from mlflow.pyfunc import load_pyfunc as load_model +from mlflow.protos.databricks_pb2 import MALFORMED_REQUEST, BAD_REQUEST +from mlflow.server.handlers import catch_mlflow_exception + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +_SERVER_MODEL_PATH = "__pyfunc_model_path__" + +CONTENT_TYPE_CSV = "text/csv" +CONTENT_TYPE_JSON = "application/json" +CONTENT_TYPE_JSON_RECORDS_ORIENTED = "application/json; format=pandas-records" +CONTENT_TYPE_JSON_SPLIT_ORIENTED = "application/json; format=pandas-split" +CONTENT_TYPE_JSON_SPLIT_NUMPY = "application/json-numpy-split" + +CONTENT_TYPES = [ + CONTENT_TYPE_CSV, + CONTENT_TYPE_JSON, + CONTENT_TYPE_JSON_RECORDS_ORIENTED, + CONTENT_TYPE_JSON_SPLIT_ORIENTED, + CONTENT_TYPE_JSON_SPLIT_NUMPY +] + +_logger = logging.getLogger(__name__) + + +def parse_json_input(json_input, orient="split"): + """ + :param json_input: A JSON-formatted string representation of a Pandas DataFrame, or a stream + containing such a string representation. + :param orient: The Pandas DataFrame orientation of the JSON input. This is either 'split' + or 'records'. + """ + # pylint: disable=broad-except + try: + return pd.read_json(json_input, orient=orient, dtype=False) + except Exception: + _handle_serving_error( + error_message=( + "Failed to parse input as a Pandas DataFrame. Ensure that the input is" + " a valid JSON-formatted Pandas DataFrame with the `{orient}` orient" + " produced using the `pandas.DataFrame.to_json(..., orient='{orient}')`" + " method.".format(orient=orient)), + error_code=MALFORMED_REQUEST) + + +def parse_csv_input(csv_input): + """ + :param csv_input: A CSV-formatted string representation of a Pandas DataFrame, or a stream + containing such a string representation. + """ + # pylint: disable=broad-except + try: + return pd.read_csv(csv_input) + except Exception: + _handle_serving_error( + error_message=( + "Failed to parse input as a Pandas DataFrame. Ensure that the input is" + " a valid CSV-formatted Pandas DataFrame produced using the" + " `pandas.DataFrame.to_csv()` method."), + error_code=MALFORMED_REQUEST) + + +def parse_split_oriented_json_input_to_numpy(json_input): + """ + :param json_input: A JSON-formatted string representation of a Pandas DataFrame with split + orient, or a stream containing such a string representation. + """ + # pylint: disable=broad-except + try: + json_input_list = json.loads(json_input, object_pairs_hook=OrderedDict) + return pd.DataFrame(index=json_input_list['index'], + data=np.array(json_input_list['data'], dtype=object), + columns=json_input_list['columns']).infer_objects() + except Exception: + _handle_serving_error( + error_message=( + "Failed to parse input as a Numpy. Ensure that the input is" + " a valid JSON-formatted Pandas DataFrame with the split orient" + " produced using the `pandas.DataFrame.to_json(..., orient='split')`" + " method." + ), + error_code=MALFORMED_REQUEST) + + +def predictions_to_json(raw_predictions, output): + predictions = _get_jsonable_obj(raw_predictions, pandas_orient="records") + json.dump(predictions, output, cls=NumpyEncoder) + + +def _handle_serving_error(error_message, error_code): + """ + Logs information about an exception thrown by model inference code that is currently being + handled and reraises it with the specified error message. The exception stack trace + is also included in the reraised error message. + + :param error_message: A message for the reraised exception. + :param error_code: An appropriate error code for the reraised exception. This should be one of + the codes listed in the `mlflow.protos.databricks_pb2` proto. + """ + traceback_buf = StringIO() + traceback.print_exc(file=traceback_buf) + reraise(MlflowException, + MlflowException( + message=error_message, + error_code=error_code, + stack_trace=traceback_buf.getvalue())) + + +def init(model): + """ + Initialize the server. Loads pyfunc model from the path. + """ + app = flask.Flask(__name__) + + @app.route('/ping', methods=['GET']) + def ping(): # pylint: disable=unused-variable + """ + Determine if the container is working and healthy. + We declare it healthy if we can load the model successfully. + """ + health = model is not None + status = 200 if health else 404 + return flask.Response(response='\n', status=status, mimetype='application/json') + + @app.route('/invocations', methods=['POST']) + @catch_mlflow_exception + def transformation(): # pylint: disable=unused-variable + """ + Do an inference on a single batch of data. In this sample server, + we take data as CSV or json, convert it to a Pandas DataFrame or Numpy, + generate predictions and convert them back to json. + """ + # Convert from CSV to pandas + if flask.request.content_type == CONTENT_TYPE_CSV: + data = flask.request.data.decode('utf-8') + csv_input = StringIO(data) + data = parse_csv_input(csv_input=csv_input) + elif flask.request.content_type in [CONTENT_TYPE_JSON, CONTENT_TYPE_JSON_SPLIT_ORIENTED]: + data = parse_json_input(json_input=flask.request.data.decode('utf-8'), + orient="split") + elif flask.request.content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED: + data = parse_json_input(json_input=flask.request.data.decode('utf-8'), + orient="records") + elif flask.request.content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY: + data = parse_split_oriented_json_input_to_numpy(flask.request.data.decode('utf-8')) + else: + return flask.Response( + response=("This predictor only supports the following content types," + " {supported_content_types}. Got '{received_content_type}'.".format( + supported_content_types=CONTENT_TYPES, + received_content_type=flask.request.content_type)), + status=415, + mimetype='text/plain') + + # Do the prediction + # pylint: disable=broad-except + try: + raw_predictions = model.predict(data) + except Exception: + _handle_serving_error( + error_message=( + "Encountered an unexpected error while evaluating the model. Verify" + " that the serialized input Dataframe is compatible with the model for" + " inference."), + error_code=BAD_REQUEST) + result = StringIO() + predictions_to_json(raw_predictions, result) + return flask.Response(response=result.getvalue(), status=200, mimetype='application/json') + + return app + + +def _predict(model_uri, input_path, output_path, content_type, json_format): + pyfunc_model = load_model(model_uri) + if input_path is None: + input_path = sys.stdin + + if content_type == "json": + df = parse_json_input(input_path, orient=json_format) + elif content_type == "csv": + df = parse_csv_input(input_path) + else: + raise Exception("Unknown content type '{}'".format(content_type)) + + if output_path is None: + predictions_to_json(pyfunc_model.predict(df), sys.stdout) + else: + with open(output_path, "w") as fout: + predictions_to_json(pyfunc_model.predict(df), fout) + + +def _serve(model_uri, port, host): + pyfunc_model = load_model(model_uri) + init(pyfunc_model).run(port=port, host=host) + + +class NumpyEncoder(JSONEncoder): + """ Special json encoder for numpy types. + Note that some numpy types doesn't have native python equivalence, + hence json.dumps will raise TypeError. + In this case, you'll need to convert your numpy types into its closest python equivalence. + """ + + def default(self, o): # pylint: disable=E0202 + if isinstance(o, np.generic): + return np.asscalar(o) + return JSONEncoder.default(self, o) + + +def _get_jsonable_obj(data, pandas_orient="records"): + """Attempt to make the data json-able via standard library. + Look for some commonly used types that are not jsonable and convert them into json-able ones. + Unknown data types are returned as is. + + :param data: data to be converted, works with pandas and numpy, rest will be returned as is. + :param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON + dictionary using this Pandas serialization orientation. + """ + if isinstance(data, np.ndarray): + return data.tolist() + if isinstance(data, pd.DataFrame): + return data.to_dict(orient=pandas_orient) + if isinstance(data, pd.Series): + return pd.DataFrame(data).to_dict(orient=pandas_orient) + else: # by default just return whatever this is and hope for the best + return data diff --git a/mlflow/pyfunc/scoring_server/wsgi.py b/mlflow/pyfunc/scoring_server/wsgi.py new file mode 100644 index 0000000000000..d793eb42a36ca --- /dev/null +++ b/mlflow/pyfunc/scoring_server/wsgi.py @@ -0,0 +1,6 @@ +import os +from mlflow.pyfunc import scoring_server +from mlflow.pyfunc import load_model + + +app = scoring_server.init(load_model(os.environ[scoring_server._SERVER_MODEL_PATH])) diff --git a/mlflow/pyfunc/utils.py b/mlflow/pyfunc/utils.py new file mode 100644 index 0000000000000..7adf74e2d86cf --- /dev/null +++ b/mlflow/pyfunc/utils.py @@ -0,0 +1,21 @@ +import os +import sys + + +def _add_code_to_system_path(code_path): + sys.path = [code_path] + _get_code_dirs(code_path) + sys.path + + +def _get_code_dirs(src_code_path, dst_code_path=None): + """ + Obtains the names of the subdirectories contained under the specified source code + path and joins them with the specified destination code path. + + :param src_code_path: The path of the source code directory for which to list subdirectories. + :param dst_code_path: The destination directory path to which subdirectory names should be + joined. + """ + if not dst_code_path: + dst_code_path = src_code_path + return [(os.path.join(dst_code_path, x)) for x in os.listdir(src_code_path) + if os.path.isdir(x) and not x == "__pycache__"] diff --git a/mlflow/pytorch.py b/mlflow/pytorch.py deleted file mode 100644 index f14de88d73746..0000000000000 --- a/mlflow/pytorch.py +++ /dev/null @@ -1,208 +0,0 @@ -""" -The ``mlflow.pytorch`` module provides an API for logging and loading PyTorch models. This module -exports PyTorch models with the following flavors: - -PyTorch (native) format - This is the main flavor that can be loaded back into PyTorch. -:py:mod:`mlflow.pyfunc` - Produced for use by generic pyfunc-based deployment tools and batch inference. -""" - -from __future__ import absolute_import - -import os - -import numpy as np -import pandas as pd -import torch - -from mlflow import pyfunc -from mlflow.models import Model -import mlflow.tracking - - -FLAVOR_NAME = "pytorch" - - -def log_model(pytorch_model, artifact_path, conda_env=None, **kwargs): - """ - Log a PyTorch model as an MLflow artifact for the current run. - - :param pytorch_model: PyTorch model to be saved. Must accept a single ``torch.FloatTensor`` as - input and produce a single output tensor. - :param artifact_path: Run-relative artifact path. - :param conda_env: Path to a Conda environment file. If provided, this defines the environment - for the model. At minimum, it should specify python, pytorch, and mlflow with appropriate - versions. - :param kwargs: kwargs to pass to ``torch.save`` method. - - >>> import torch - >>> from torch.autograd import Variable - >>> import mlflow - >>> import mlflow.pytorch - >>> # X data - >>> x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]])) - >>> # Y data with its expected value: labels - >>> y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]])) - >>> # Partial Model example modified from Sung Kim - >>> # https://github.com/hunkim/PyTorchZeroToAll - >>> class Model(torch.nn.Module): - >>> def __init__(self): - >>> super(Model, self).__init__() - >>> self.linear = torch.nn.Linear(1, 1) # One in and one out - >>> def forward(self, x): - >>> y_pred = self.linear(x) - >>> return y_pred - >>> # our model - >>> model = Model() - >>> criterion = torch.nn.MSELoss(size_average=False) - >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.01) - >>> # Training loop - >>> for epoch in range(500): - >>> # Forward pass: Compute predicted y by passing x to the model - >>> y_pred = model(x_data) - >>> # Compute and print loss - >>> loss = criterion(y_pred, y_data) - >>> print(epoch, loss.data[0]) - >>> #Zero gradients, perform a backward pass, and update the weights. - >>> optimizer.zero_grad() - >>> loss.backward() - >>> optimizer.step() - >>> # After training - >>> for hv in [4.0, 5.0, 6.0]: - >>> hour_var = Variable(torch.Tensor([[hv]])) - >>> y_pred = model(hour_var) - >>> print("predict (after training)", hv, model(hour_var ).data[0][0]) - >>> # log the model - >>> with mlflow.start_run() as run: - >>> mlflow.log_param("epochs", 500) - >>> mlflow.pytorch.log_model(pytorch_model, "models") - """ - Model.log(artifact_path=artifact_path, flavor=mlflow.pytorch, - pytorch_model=pytorch_model, conda_env=conda_env, **kwargs) - - -def save_model(pytorch_model, path, conda_env=None, mlflow_model=Model(), **kwargs): - """ - Save a PyTorch model to a path on the local file system. - - :param pytorch_model: PyTorch model to be saved. Must accept a single ``torch.FloatTensor`` as - input and produce a single output tensor. - :param path: Local path where the model is to be saved. - :param conda_env: Path to a Conda environment file. If provided, this decribes the environment - this model should be run in. At minimum, it should specify python, pytorch, - and mlflow with appropriate versions. - :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to. - :param kwargs: kwargs to pass to ``torch.save`` method. - - >>> import torch - >>> import mlflow - >>> import mlflow.pytorch - >>> # create model and set values - >>> pytorch_model = Model() - >>> pytorch_model_path = ... - >>> #train our model - >>> for epoch in range(500): - >>> y_pred = model(x_data) - >>> ... - >>> #save the model - >>> with mlflow.start_run() as run: - >>> mlflow.log_param("epochs", 500) - >>> mlflow.pytorch.save_model(pytorch_model, pytorch_model_path) - """ - if not isinstance(pytorch_model, torch.nn.Module): - raise TypeError("Argument 'pytorch_model' should be a torch.nn.Module") - - path = os.path.abspath(path) - if os.path.exists(path): - raise RuntimeError("Path '{}' already exists".format(path)) - os.makedirs(path) - model_path = os.path.join(path, "model.pth") - - # Save pytorch model - torch.save(pytorch_model, model_path, **kwargs) - model_file = os.path.basename(model_path) - - mlflow_model.add_flavor(FLAVOR_NAME, model_data=model_file, pytorch_version=torch.__version__) - pyfunc.add_to_model(mlflow_model, loader_module="mlflow.pytorch", - data=model_file, env=conda_env) - mlflow_model.save(os.path.join(path, "MLmodel")) - - -def _load_model(path, **kwargs): - mlflow_model_path = os.path.join(path, "MLmodel") - if not os.path.exists(mlflow_model_path): - raise RuntimeError("MLmodel is not found at '{}'".format(path)) - - mlflow_model = Model.load(mlflow_model_path) - - if FLAVOR_NAME not in mlflow_model.flavors: - raise ValueError("Could not find flavor '{}' amongst available flavors {}, " - "unable to load stored model" - .format(FLAVOR_NAME, list(mlflow_model.flavors.keys()))) - - # This maybe replaced by a warning and then try/except torch.load - flavor = mlflow_model.flavors[FLAVOR_NAME] - if torch.__version__ != flavor["pytorch_version"]: - raise ValueError("Stored model version '{}' does not match " - "installed PyTorch version '{}'" - .format(flavor["pytorch_version"], torch.__version__)) - - path = os.path.abspath(path) - path = os.path.join(path, mlflow_model.flavors[FLAVOR_NAME]['model_data']) - return torch.load(path, **kwargs) - - -def load_model(path, run_id=None, **kwargs): - """ - Load a PyTorch model from a local file (if ``run_id`` is ``None``) or a run. - - :param path: Local filesystem path or run-relative artifact path to the model saved - by :py:func:`mlflow.pytorch.log_model`. - :param run_id: Run ID. If provided, combined with ``path`` to identify the model. - :param kwargs: kwargs to pass to ``torch.load`` method. - - >>> import torch - >>> import mlflow - >>> import mlflow.pytorch - >>> # set values - >>> model_path_dir = ... - >>> run_id="96771d893a5e46159d9f3b49bf9013e2" - >>> pytorch_model = mlflow.pytorch.load_model(model_path_dir, run_id) - >>> y_pred = pytorch_model(x_new_data) - """ - if run_id is not None: - path = mlflow.tracking.utils._get_model_log_dir(model_name=path, run_id=run_id) - - return _load_model(path, **kwargs) - - -def _load_pyfunc(path, **kwargs): - """ - Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. - """ - return _PyTorchWrapper(_load_model(os.path.dirname(path), **kwargs)) - - -class _PyTorchWrapper(object): - """ - Wrapper class that creates a predict function such that - predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame) - """ - def __init__(self, pytorch_model): - self.pytorch_model = pytorch_model - - def predict(self, data, device='cpu'): - if not isinstance(data, pd.DataFrame): - raise TypeError("Input data should be pandas.DataFrame") - self.pytorch_model.to(device) - self.pytorch_model.eval() - with torch.no_grad(): - input_tensor = torch.from_numpy(data.values.astype(np.float32)).to(device) - preds = self.pytorch_model(input_tensor) - if not isinstance(preds, torch.Tensor): - raise TypeError("Expected PyTorch model to output a single output tensor, " - "but got output of type '{}'".format(type(preds))) - predicted = pd.DataFrame(preds.numpy()) - predicted.index = data.index - return predicted diff --git a/mlflow/pytorch/__init__.py b/mlflow/pytorch/__init__.py new file mode 100644 index 0000000000000..d9613a91cb97b --- /dev/null +++ b/mlflow/pytorch/__init__.py @@ -0,0 +1,380 @@ +""" +The ``mlflow.pytorch`` module provides an API for logging and loading PyTorch models. This module +exports PyTorch models with the following flavors: + +PyTorch (native) format + This is the main flavor that can be loaded back into PyTorch. +:py:mod:`mlflow.pyfunc` + Produced for use by generic pyfunc-based deployment tools and batch inference. +""" + +from __future__ import absolute_import + +import importlib +import logging +import os +import yaml + +import cloudpickle +import numpy as np +import pandas as pd + +import mlflow +import mlflow.pyfunc.utils as pyfunc_utils +from mlflow import pyfunc +from mlflow.exceptions import MlflowException +from mlflow.models import Model +from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST +from mlflow.pytorch import pickle_module as mlflow_pytorch_pickle_module +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.file_utils import _copy_file_or_tree +from mlflow.utils.model_utils import _get_flavor_configuration + +FLAVOR_NAME = "pytorch" + +_SERIALIZED_TORCH_MODEL_FILE_NAME = "model.pth" +_PICKLE_MODULE_INFO_FILE_NAME = "pickle_module_info.txt" + +_logger = logging.getLogger(__name__) + + +def get_default_conda_env(): + """ + :return: The default Conda environment for MLflow Models produced by calls to + :func:`save_model()` and :func:`log_model()`. + """ + import torch + import torchvision + + return _mlflow_conda_env( + additional_conda_deps=[ + "pytorch={}".format(torch.__version__), + "torchvision={}".format(torchvision.__version__), + ], + additional_pip_deps=[ + # We include CloudPickle in the default environment because + # it's required by the default pickle module used by `save_model()` + # and `log_model()`: `mlflow.pytorch.pickle_module`. + "cloudpickle=={}".format(cloudpickle.__version__) + ], + additional_conda_channels=[ + "pytorch", + ]) + + +def log_model(pytorch_model, artifact_path, conda_env=None, code_paths=None, + pickle_module=None, **kwargs): + """ + Log a PyTorch model as an MLflow artifact for the current run. + + :param pytorch_model: PyTorch model to be saved. Must accept a single ``torch.FloatTensor`` as + input and produce a single output tensor. Any code dependencies of the + model's class, including the class definition itself, should be + included in one of the following locations: + + - The package(s) listed in the model's Conda environment, specified + by the ``conda_env`` parameter. + - One or more of the files specified by the ``code_paths`` parameter. + + :param artifact_path: Run-relative artifact path. + :param conda_env: Path to a Conda environment file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If ``None``, the default + :func:`get_default_conda_env()` environment is added to the model. The + following is an *example* dictionary representation of a Conda environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'pytorch=0.4.1', + 'torchvision=0.2.1' + ] + } + + :param code_paths: A list of local filesystem paths to Python file dependencies (or directories + containing file dependencies). These files are *prepended* to the system + path when the model is loaded. + :param pickle_module: The module that PyTorch should use to serialize ("pickle") the specified + ``pytorch_model``. This is passed as the ``pickle_module`` parameter + to ``torch.save()``. By default, this module is also used to + deserialize ("unpickle") the PyTorch model at load time. + :param kwargs: kwargs to pass to ``torch.save`` method. + + >>> import torch + >>> import mlflow + >>> import mlflow.pytorch + >>> # X data + >>> x_data = torch.Tensor([[1.0], [2.0], [3.0]]) + >>> # Y data with its expected value: labels + >>> y_data = torch.Tensor([[2.0], [4.0], [6.0]]) + >>> # Partial Model example modified from Sung Kim + >>> # https://github.com/hunkim/PyTorchZeroToAll + >>> class Model(torch.nn.Module): + >>> def __init__(self): + >>> super(Model, self).__init__() + >>> self.linear = torch.nn.Linear(1, 1) # One in and one out + >>> def forward(self, x): + >>> y_pred = self.linear(x) + >>> return y_pred + >>> # our model + >>> model = Model() + >>> criterion = torch.nn.MSELoss(size_average=False) + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + >>> # Training loop + >>> for epoch in range(500): + >>> # Forward pass: Compute predicted y by passing x to the model + >>> y_pred = model(x_data) + >>> # Compute and print loss + >>> loss = criterion(y_pred, y_data) + >>> print(epoch, loss.data.item()) + >>> #Zero gradients, perform a backward pass, and update the weights. + >>> optimizer.zero_grad() + >>> loss.backward() + >>> optimizer.step() + >>> + >>> # After training + >>> for hv in [4.0, 5.0, 6.0]: + >>> hour_var = torch.Tensor([[hv]]) + >>> y_pred = model(hour_var) + >>> print("predict (after training)", hv, model(hour_var).data[0][0]) + >>> # log the model + >>> with mlflow.start_run() as run: + >>> mlflow.log_param("epochs", 500) + >>> mlflow.pytorch.log_model(model, "models") + """ + pickle_module = pickle_module or mlflow_pytorch_pickle_module + Model.log(artifact_path=artifact_path, flavor=mlflow.pytorch, pytorch_model=pytorch_model, + conda_env=conda_env, code_paths=code_paths, pickle_module=pickle_module, **kwargs) + + +def save_model(pytorch_model, path, conda_env=None, mlflow_model=Model(), code_paths=None, + pickle_module=None, **kwargs): + """ + Save a PyTorch model to a path on the local file system. + + :param pytorch_model: PyTorch model to be saved. Must accept a single ``torch.FloatTensor`` as + input and produce a single output tensor. Any code dependencies of the + model's class, including the class definition itself, should be + included in one of the following locations: + + - The package(s) listed in the model's Conda environment, specified + by the ``conda_env`` parameter. + - One or more of the files specified by the ``code_paths`` parameter. + + :param path: Local path where the model is to be saved. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If ``None``, the default + :func:`get_default_conda_env()` environment is added to the model. The + following is an *example* dictionary representation of a Conda environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'pytorch=0.4.1', + 'torchvision=0.2.1' + ] + } + + :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to. + :param code_paths: A list of local filesystem paths to Python file dependencies (or directories + containing file dependencies). These files are *prepended* to the system + path when the model is loaded. + :param pickle_module: The module that PyTorch should use to serialize ("pickle") the specified + ``pytorch_model``. This is passed as the ``pickle_module`` parameter + to ``torch.save()``. By default, this module is also used to + deserialize ("unpickle") the PyTorch model at load time. + :param kwargs: kwargs to pass to ``torch.save`` method. + + >>> import torch + >>> import mlflow + >>> import mlflow.pytorch + >>> # create model and set values + >>> pytorch_model = Model() + >>> pytorch_model_path = ... + >>> #train our model + >>> for epoch in range(500): + >>> y_pred = model(x_data) + >>> ... + >>> #save the model + >>> with mlflow.start_run() as run: + >>> mlflow.log_param("epochs", 500) + >>> mlflow.pytorch.save_model(pytorch_model, pytorch_model_path) + """ + import torch + pickle_module = pickle_module or mlflow_pytorch_pickle_module + + if not isinstance(pytorch_model, torch.nn.Module): + raise TypeError("Argument 'pytorch_model' should be a torch.nn.Module") + + path = os.path.abspath(path) + if os.path.exists(path): + raise RuntimeError("Path '{}' already exists".format(path)) + os.makedirs(path) + + model_data_subpath = "data" + model_data_path = os.path.join(path, model_data_subpath) + os.makedirs(model_data_path) + # Persist the pickle module name as a file in the model's `data` directory. This is necessary + # because the `data` directory is the only available parameter to `_load_pyfunc`, and it + # does not contain the MLmodel configuration; therefore, it is not sufficient to place + # the module name in the MLmodel + # + # TODO: Stop persisting this information to the filesystem once we have a mechanism for + # supplying the MLmodel configuration to `mlflow.pytorch._load_pyfunc` + pickle_module_path = os.path.join(model_data_path, _PICKLE_MODULE_INFO_FILE_NAME) + with open(pickle_module_path, "w") as f: + f.write(pickle_module.__name__) + # Save pytorch model + model_path = os.path.join(model_data_path, _SERIALIZED_TORCH_MODEL_FILE_NAME) + torch.save(pytorch_model, model_path, pickle_module=pickle_module, **kwargs) + + conda_env_subpath = "conda.yaml" + if conda_env is None: + conda_env = get_default_conda_env() + elif not isinstance(conda_env, dict): + with open(conda_env, "r") as f: + conda_env = yaml.safe_load(f) + with open(os.path.join(path, conda_env_subpath), "w") as f: + yaml.safe_dump(conda_env, stream=f, default_flow_style=False) + + if code_paths is not None: + code_dir_subpath = "code" + for code_path in code_paths: + _copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath) + else: + code_dir_subpath = None + + mlflow_model.add_flavor( + FLAVOR_NAME, model_data=model_data_subpath, pytorch_version=torch.__version__) + pyfunc.add_to_model(mlflow_model, loader_module="mlflow.pytorch", data=model_data_subpath, + pickle_module_name=pickle_module.__name__, code=code_dir_subpath, + env=conda_env_subpath) + mlflow_model.save(os.path.join(path, "MLmodel")) + + +def _load_model(path, **kwargs): + """ + :param path: The path to a serialized PyTorch model. + :param kwargs: Additional kwargs to pass to the PyTorch ``torch.load`` function. + """ + import torch + + if os.path.isdir(path): + # `path` is a directory containing a serialized PyTorch model and a text file containing + # information about the pickle module that should be used by PyTorch to load it + model_path = os.path.join(path, "model.pth") + pickle_module_path = os.path.join(path, _PICKLE_MODULE_INFO_FILE_NAME) + with open(pickle_module_path, "r") as f: + pickle_module_name = f.read() + if "pickle_module" in kwargs and kwargs["pickle_module"].__name__ != pickle_module_name: + _logger.warning( + "Attempting to load the PyTorch model with a pickle module, '%s', that does not" + " match the pickle module that was used to save the model: '%s'.", + kwargs["pickle_module"].__name__, + pickle_module_name) + else: + try: + kwargs["pickle_module"] = importlib.import_module(pickle_module_name) + except ImportError: + raise MlflowException( + message=( + "Failed to import the pickle module that was used to save the PyTorch" + " model. Pickle module name: `{pickle_module_name}`".format( + pickle_module_name=pickle_module_name)), + error_code=RESOURCE_DOES_NOT_EXIST) + + else: + model_path = path + + return torch.load(model_path, **kwargs) + + +def load_model(model_uri, **kwargs): + """ + Load a PyTorch model from a local file or a run. + + :param model_uri: The location, in URI format, of the MLflow model, for example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. + + :param kwargs: kwargs to pass to ``torch.load`` method. + :return: A PyTorch model. + + >>> import torch + >>> import mlflow + >>> import mlflow.pytorch + >>> # set values + >>> model_path_dir = ... + >>> run_id="96771d893a5e46159d9f3b49bf9013e2" + >>> pytorch_model = mlflow.pytorch.load_model("runs:/" + run_id + "/" + model_path_dir) + >>> y_pred = pytorch_model(x_new_data) + """ + import torch + + local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) + try: + pyfunc_conf = _get_flavor_configuration( + model_path=local_model_path, flavor_name=pyfunc.FLAVOR_NAME) + except MlflowException: + pyfunc_conf = {} + code_subpath = pyfunc_conf.get(pyfunc.CODE) + if code_subpath is not None: + pyfunc_utils._add_code_to_system_path( + code_path=os.path.join(local_model_path, code_subpath)) + + pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) + if torch.__version__ != pytorch_conf["pytorch_version"]: + _logger.warning( + "Stored model version '%s' does not match installed PyTorch version '%s'", + pytorch_conf["pytorch_version"], torch.__version__) + torch_model_artifacts_path = os.path.join(local_model_path, pytorch_conf['model_data']) + return _load_model(path=torch_model_artifacts_path, **kwargs) + + +def _load_pyfunc(path, **kwargs): + """ + Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. + + :param path: Local filesystem path to the MLflow Model with the ``pytorch`` flavor. + """ + return _PyTorchWrapper(_load_model(path, **kwargs)) + + +class _PyTorchWrapper(object): + """ + Wrapper class that creates a predict function such that + predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame) + """ + def __init__(self, pytorch_model): + self.pytorch_model = pytorch_model + + def predict(self, data, device='cpu'): + import torch + + if not isinstance(data, pd.DataFrame): + raise TypeError("Input data should be pandas.DataFrame") + self.pytorch_model.to(device) + self.pytorch_model.eval() + with torch.no_grad(): + input_tensor = torch.from_numpy(data.values.astype(np.float32)).to(device) + preds = self.pytorch_model(input_tensor) + if not isinstance(preds, torch.Tensor): + raise TypeError("Expected PyTorch model to output a single output tensor, " + "but got output of type '{}'".format(type(preds))) + predicted = pd.DataFrame(preds.numpy()) + predicted.index = data.index + return predicted diff --git a/mlflow/pytorch/pickle_module.py b/mlflow/pytorch/pickle_module.py new file mode 100644 index 0000000000000..becaf1518ae64 --- /dev/null +++ b/mlflow/pytorch/pickle_module.py @@ -0,0 +1,35 @@ +""" +This module imports contents from CloudPickle in a way that is compatible with the +``pickle_module`` parameter of PyTorch's model persistence function: ``torch.save`` +(see https://github.com/pytorch/pytorch/blob/692898fe379c9092f5e380797c32305145cd06e1/torch/ +serialization.py#L192). It is included as a distinct module from :mod:`mlflow.pytorch` to avoid +polluting the namespace with wildcard imports. + +Calling ``torch.save(..., pickle_module=mlflow.pytorch.pickle_module)`` will persist PyTorch model +definitions using CloudPickle, leveraging improved pickling functionality such as the ability +to capture class definitions in the "__main__" scope. + +TODO: Remove this module or make it an alias of CloudPickle when CloudPickle and PyTorch have +compatible pickling APIs. +""" + +# Import all contents of the CloudPickle module in an attempt to include all functions required +# by ``torch.save``. +# pylint: disable=wildcard-import +# pylint: disable=unused-wildcard-import +from cloudpickle import * +# PyTorch uses the ``Pickler`` class of the specified ``pickle_module`` +# (https://github.com/pytorch/pytorch/blob/692898fe379c9092f5e380797c32305145cd06e1/torch/ +# serialization.py#L290). Unfortunately, ``cloudpickle.Pickler`` is an alias for Python's native +# pickling class: ``pickle.Pickler``, instead of ``cloudpickle.CloudPickler``. +# https://github.com/cloudpipe/cloudpickle/pull/235 has been filed to correct the issue, +# but this import renaming is necessary until either the requested change has been incorporated +# into a CloudPickle release or the ``torch.save`` API has been updated to be compatible with +# the existing CloudPickle API. +from cloudpickle import CloudPickler as Pickler +# CloudPickle does not include `Unpickler` in its namespace, which is required by PyTorch for +# deserialization. Noting that CloudPickle's `load()` and `loads()` routines are aliases for +# `pickle.load()` and `pickle.loads()`, we therefore import Unpickler from the native +# Python pickle library. +# pylint: disable=unused-import +from pickle import Unpickler diff --git a/mlflow/rfunc/backend.py b/mlflow/rfunc/backend.py new file mode 100644 index 0000000000000..b432e89eda351 --- /dev/null +++ b/mlflow/rfunc/backend.py @@ -0,0 +1,71 @@ +import logging +import os +import re +import subprocess +from six.moves import shlex_quote + +from mlflow.models import FlavorBackend +from mlflow.tracking.artifact_utils import _download_artifact_from_uri + +_logger = logging.getLogger(__name__) + + +class RFuncBackend(FlavorBackend): + """ + Flavor backend implementation for the generic R models. + Predict and serve locally models with 'crate' flavor. + """ + version_pattern = re.compile("version ([0-9]+[.][0-9]+[.][0-9]+)") + + def predict(self, model_uri, input_path, output_path, content_type, json_format): + """ + Generate predictions using R model saved with MLflow. + Return the prediction results as a JSON. + """ + model_path = _download_artifact_from_uri(model_uri) + str_cmd = "mlflow:::mlflow_rfunc_predict(model_path = '{0}', input_path = {1}, " \ + "output_path = {2}, content_type = {3}, json_format = {4})" + command = str_cmd.format(shlex_quote(model_path), + _str_optional(input_path), + _str_optional(output_path), + _str_optional(content_type), + _str_optional(json_format)) + _execute(command) + + def serve(self, model_uri, port, host): + """ + Generate R model locally. + """ + model_path = _download_artifact_from_uri(model_uri) + command = "mlflow::mlflow_rfunc_serve('{0}', port = {1}, host = '{2}')".format( + shlex_quote(model_path), port, host) + _execute(command) + + def can_score_model(self): + process = subprocess.Popen(["Rscript", "--version"], close_fds=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + _, stderr = process.communicate() + if process.wait() != 0: + return False + + version = self.version_pattern.search(stderr.decode("utf-8")) + if not version: + return False + version = [int(x) for x in version.group(1).split(".")] + return version[0] > 3 or version[0] == 3 and version[1] >= 3 + + +def _execute(command): + env = os.environ.copy() + import sys + process = subprocess.Popen(["Rscript", "-e", command], env=env, close_fds=False, + stdin=sys.stdin, + stdout=sys.stdout, + stderr=sys.stderr) + if process.wait() != 0: + raise Exception("Command returned non zero exit code.") + + +def _str_optional(s): + return "NULL" if s is None else "'{}'".format(shlex_quote(str(s))) diff --git a/mlflow/rfunc/cli.py b/mlflow/rfunc/cli.py deleted file mode 100644 index 37a9d65791d85..0000000000000 --- a/mlflow/rfunc/cli.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import absolute_import - -import click -import os -import subprocess - -from mlflow.tracking.utils import _get_model_log_dir -from mlflow.utils import cli_args -from mlflow.utils.logging_utils import eprint - - -@click.group("rfunc") -def commands(): - """ - Serve R models locally. - - To serve a model associated with a run on a tracking server, set the MLFLOW_TRACKING_URI - environment variable to the URL of the desired server. - """ - pass - - -def execute(command): - eprint("=== Rscript -e %s) ===" % command) - env = os.environ.copy() - process = subprocess.Popen(["Rscript", "-e", command], close_fds=True, env=env) - process.wait() - - -def str_optional(s): - if s is None: - return '' - return str(s) - - -@commands.command("serve") -@cli_args.MODEL_PATH -@cli_args.RUN_ID -@click.option("--port", "-p", default=5000, help="Server port. [default: 5000]") -def serve(model_path, run_id, port): - """ - Serve an RFunction model saved with MLflow. - - If a ``run_id`` is specified, ``model-path`` is treated as an artifact path within that run; - otherwise it is treated as a local path. - """ - if run_id: - model_path = _get_model_log_dir(model_path, run_id) - - command = "mlflow::mlflow_rfunc_serve('{0}', port = {1})".format(model_path, port) - execute(command) - - -@commands.command("predict") -@cli_args.MODEL_PATH -@cli_args.RUN_ID -@click.option("--input-path", "-i", help="JSON or CSV containing DataFrame to predict against.", - required=True) -@click.option("--output-path", "-o", help="File to output results to as JSON or CSV file." + - " If not provided, output to stdout.") -def predict(model_path, run_id, input_path, output_path): - """ - Serve an RFunction model saved with MLflow. - Return the prediction results as a JSON DataFrame. - - If a ``run-id`` is specified, ``model-path`` is treated as an artifact path within that run; - otherwise it is treated as a local path. - """ - if run_id: - model_path = _get_model_log_dir(model_path, run_id) - - str_cmd = "mlflow::mlflow_rfunc_predict('{0}', '{1}', '{2}')" - command = str_cmd.format(model_path, input_path, str_optional(output_path)) - - execute(command) diff --git a/mlflow/runs.py b/mlflow/runs.py new file mode 100644 index 0000000000000..05be5780ec14f --- /dev/null +++ b/mlflow/runs.py @@ -0,0 +1,82 @@ +""" +CLI for runs +""" +from __future__ import print_function + +import click +import json +import mlflow.tracking +from mlflow.entities import ViewType +from mlflow.tracking import _get_store +from tabulate import tabulate +from mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME +from mlflow.utils.time_utils import conv_longdate_to_str + +RUN_ID = click.option("--run-id", type=click.STRING, required=True) + + +@click.group("runs") +def commands(): + """ + Manage runs. To manage runs of experiments associated with a tracking server, set the + MLFLOW_TRACKING_URI environment variable to the URL of the desired server. + """ + pass + + +@commands.command("list") +@click.option("--experiment-id", envvar=mlflow.tracking._EXPERIMENT_ID_ENV_VAR, type=click.STRING, + help="Specify the experiment ID for list of runs.", required=True) +@click.option("--view", "-v", default="active_only", + help="Select view type for list experiments. Valid view types are " + "'active_only' (default), 'deleted_only', and 'all'.") +def list_run(experiment_id, view): + """ + List all runs of the specified experiment in the configured tracking server. + """ + store = _get_store() + view_type = ViewType.from_string(view) if view else ViewType.ACTIVE_ONLY + runs = store.search_runs([experiment_id], None, view_type) + table = [] + for run in runs: + tags = {k: v for k, v in run.data.tags.items()} + run_name = tags.get(MLFLOW_RUN_NAME, "") + table.append([conv_longdate_to_str(run.info.start_time), run_name, run.info.run_id]) + print(tabulate(sorted(table, reverse=True), headers=["Date", "Name", "ID"])) + + +@commands.command("delete") +@RUN_ID +def delete_run(run_id): + """ + Mark a run for deletion. Return an error if the run does not exist or + is already marked. You can restore a marked run with ``restore_run``, + or permanently delete a run in the backend store. + """ + store = _get_store() + store.delete_run(run_id) + print("Run with ID %s has been deleted." % str(run_id)) + + +@commands.command("restore") +@RUN_ID +def restore_run(run_id): + """ + Restore a deleted run. + Returns an error if the run is active or has been permanently deleted. + """ + store = _get_store() + store.restore_run(run_id) + print("Run with id %s has been restored." % str(run_id)) + + +@commands.command('describe') +@RUN_ID +def describe_run(run_id): + """ + All of run details will print to the stdout as JSON format. + """ + store = _get_store() + run = store.get_run(run_id) + json_run = json.dumps(run.to_dictionary(), indent=4) + print(json_run) diff --git a/mlflow/sagemaker/__init__.py b/mlflow/sagemaker/__init__.py index f073322466415..0bb1e924380f1 100644 --- a/mlflow/sagemaker/__init__.py +++ b/mlflow/sagemaker/__init__.py @@ -4,28 +4,27 @@ from __future__ import print_function import os -import sys from subprocess import Popen, PIPE, STDOUT from six.moves import urllib +import sys import tarfile -import uuid -import shutil +import logging +import time -import base64 -import boto3 -import yaml import mlflow import mlflow.version from mlflow import pyfunc, mleap +from mlflow.exceptions import MlflowException from mlflow.models import Model -from mlflow.tracking.utils import _get_model_log_dir +from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST, INVALID_PARAMETER_VALUE +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils import get_unique_resource_id +from mlflow.utils.file_utils import TempDir from mlflow.utils.logging_utils import eprint -from mlflow.utils.file_utils import TempDir, _copy_project -from mlflow.sagemaker.container import SUPPORTED_FLAVORS as SUPPORTED_DEPLOYMENT_FLAVORS -from mlflow.sagemaker.container import DEPLOYMENT_CONFIG_KEY_FLAVOR_NAME +from mlflow.models.container import SUPPORTED_FLAVORS as SUPPORTED_DEPLOYMENT_FLAVORS +from mlflow.models.container import DEPLOYMENT_CONFIG_KEY_FLAVOR_NAME DEFAULT_IMAGE_NAME = "mlflow-pyfunc" - DEPLOYMENT_MODE_ADD = "add" DEPLOYMENT_MODE_REPLACE = "replace" DEPLOYMENT_MODE_CREATE = "create" @@ -36,127 +35,68 @@ DEPLOYMENT_MODE_REPLACE ] -IMAGE_NAME_ENV_VAR = "SAGEMAKER_DEPLOY_IMG_URL" +IMAGE_NAME_ENV_VAR = "MLFLOW_SAGEMAKER_DEPLOY_IMG_URL" +# Deprecated as of MLflow 1.0. +DEPRECATED_IMAGE_NAME_ENV_VAR = "SAGEMAKER_DEPLOY_IMG_URL" DEFAULT_BUCKET_NAME_PREFIX = "mlflow-sagemaker" DEFAULT_SAGEMAKER_INSTANCE_TYPE = "ml.m4.xlarge" DEFAULT_SAGEMAKER_INSTANCE_COUNT = 1 -_DOCKERFILE_TEMPLATE = """ -# Build an image that can serve pyfunc model in SageMaker -FROM ubuntu:16.04 - -RUN apt-get -y update && apt-get install -y --no-install-recommends \ - wget \ - curl \ - nginx \ - ca-certificates \ - bzip2 \ - build-essential \ - cmake \ - openjdk-8-jdk \ - git-core \ - maven \ - && rm -rf /var/lib/apt/lists/* - -# Download and setup miniconda -RUN curl https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh >> miniconda.sh -RUN bash ./miniconda.sh -b -p /miniconda; rm ./miniconda.sh; -ENV PATH="/miniconda/bin:${PATH}" -ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 - -RUN conda install -c anaconda gunicorn;\ - conda install -c anaconda gevent;\ - -%s - -# Set up the program in the image -WORKDIR /opt/mlflow - -# start mlflow scoring -ENTRYPOINT ["python", "-c", "import sys; from mlflow.sagemaker import container as C; \ -C._init(sys.argv[1])"] -""" - +_logger = logging.getLogger(__name__) -def _docker_ignore(mlflow_root): - docker_ignore = os.path.join(mlflow_root, '.dockerignore') - - def strip_slash(x): - if x.startswith("/"): - x = x[1:] - if x.endswith('/'): - x = x[:-1] - return x +_full_template = "{account}.dkr.ecr.{region}.amazonaws.com/{image}:{version}" - if os.path.exists(docker_ignore): - with open(docker_ignore, "r") as f: - patterns = [x.strip() for x in f.readlines()] - patterns = [strip_slash(x) - for x in patterns if not x.startswith("#")] - def ignore(_, names): - import fnmatch - res = set() - for p in patterns: - res.update(set(fnmatch.filter(names, p))) - return list(res) +def _get_preferred_deployment_flavor(model_config): + """ + Obtains the flavor that MLflow would prefer to use when deploying the model. + If the model does not contain any supported flavors for deployment, an exception + will be thrown. - return ignore + :param model_config: An MLflow model object + :return: The name of the preferred deployment flavor for the specified model + """ + if mleap.FLAVOR_NAME in model_config.flavors: + return mleap.FLAVOR_NAME + elif pyfunc.FLAVOR_NAME in model_config.flavors: + return pyfunc.FLAVOR_NAME + else: + raise MlflowException( + message=( + "The specified model does not contain any of the supported flavors for" + " deployment. The model contains the following flavors: {model_flavors}." + " Supported flavors: {supported_flavors}".format( + model_flavors=model_config.flavors.keys(), + supported_flavors=SUPPORTED_DEPLOYMENT_FLAVORS)), + error_code=RESOURCE_DOES_NOT_EXIST) -def build_image(name=DEFAULT_IMAGE_NAME, mlflow_home=None): +def _validate_deployment_flavor(model_config, flavor): """ - Build an MLflow Docker image. - The image is built locally and it requires Docker to run. + Checks that the specified flavor is a supported deployment flavor + and is contained in the specified model. If one of these conditions + is not met, an exception is thrown. - :param name: Docker image name. - :param mlflow_home: Directory containing checkout of the MLflow GitHub project or - current directory if not specified. + :param model_config: An MLflow Model object + :param flavor: The deployment flavor to validate """ - with TempDir() as tmp: - cwd = tmp.path() - if mlflow_home: - mlflow_dir = _copy_project( - src_path=mlflow_home, dst_path=cwd) - install_mlflow = ( - "COPY {mlflow_dir} /opt/mlflow\n" - "RUN pip install /opt/mlflow\n" - "RUN cd /opt/mlflow/mlflow/java/scoring &&" - " mvn --batch-mode package -DskipTests &&" - " mkdir -p /opt/java/jars &&" - " mv /opt/mlflow/mlflow/java/scoring/target/" - "mlflow-scoring-*-with-dependencies.jar /opt/java/jars\n" - ).format(mlflow_dir=mlflow_dir) - else: - install_mlflow = ( - "RUN pip install mlflow=={version}\n" - "RUN mvn --batch-mode dependency:copy" - " -Dartifact=org.mlflow:mlflow-scoring:{version}:pom" - " -DoutputDirectory=/opt/java\n" - "RUN mvn --batch-mode dependency:copy" - " -Dartifact=org.mlflow:mlflow-scoring:{version}:jar" - " -DoutputDirectory=/opt/java/jars\n" - "RUN cd /opt/java && mv mlflow-scoring-{version}.pom pom.xml &&" - " mvn --batch-mode dependency:copy-dependencies -DoutputDirectory=/opt/java/jars\n" - "RUN rm /opt/java/pom.xml\n" - ).format(version=mlflow.version.VERSION) - - with open(os.path.join(cwd, "Dockerfile"), "w") as f: - f.write(_DOCKERFILE_TEMPLATE % install_mlflow) - eprint("building docker image") - os.system('find {cwd}/'.format(cwd=cwd)) - proc = Popen(["docker", "build", "-t", name, "-f", "Dockerfile", "."], - cwd=cwd, - stdout=PIPE, - stderr=STDOUT, - universal_newlines=True) - for x in iter(proc.stdout.readline, ""): - eprint(x, end='') - - -_full_template = "{account}.dkr.ecr.{region}.amazonaws.com/{image}:{version}" + if flavor not in SUPPORTED_DEPLOYMENT_FLAVORS: + raise MlflowException( + message=( + "The specified flavor: `{flavor_name}` is not supported for deployment." + " Please use one of the supported flavors: {supported_flavor_names}".format( + flavor_name=flavor, + supported_flavor_names=SUPPORTED_DEPLOYMENT_FLAVORS)), + error_code=INVALID_PARAMETER_VALUE) + elif flavor not in model_config.flavors: + raise MlflowException( + message=("The specified model does not contain the specified deployment flavor:" + " `{flavor_name}`. Please use one of the following deployment flavors" + " that the model contains: {model_flavors}".format( + flavor_name=flavor, model_flavors=model_config.flavors.keys())), + error_code=RESOURCE_DOES_NOT_EXIST) def push_image_to_ecr(image=DEFAULT_IMAGE_NAME): @@ -167,7 +107,8 @@ def push_image_to_ecr(image=DEFAULT_IMAGE_NAME): :param image: Docker image name. """ - eprint("Pushing image to ECR") + import boto3 + _logger.info("Pushing image to ECR") client = boto3.client("sts") caller_id = client.get_caller_identity() account = caller_id['Account'] @@ -175,8 +116,7 @@ def push_image_to_ecr(image=DEFAULT_IMAGE_NAME): region = my_session.region_name or "us-west-2" fullname = _full_template.format(account=account, region=region, image=image, version=mlflow.version.VERSION) - eprint("Pushing docker image {image} to {repo}".format( - image=image, repo=fullname)) + _logger.info("Pushing docker image %s to %s", image, fullname) ecr_client = boto3.client('ecr') try: ecr_client.describe_repositories(repositoryNames=[image])['repositories'] @@ -195,24 +135,48 @@ def push_image_to_ecr(image=DEFAULT_IMAGE_NAME): os.system(cmd) -def deploy(app_name, model_path, execution_role_arn=None, bucket=None, run_id=None, +def deploy(app_name, model_uri, execution_role_arn=None, bucket=None, image_url=None, region_name="us-west-2", mode=DEPLOYMENT_MODE_CREATE, archive=False, instance_type=DEFAULT_SAGEMAKER_INSTANCE_TYPE, - instance_count=DEFAULT_SAGEMAKER_INSTANCE_COUNT, vpc_config=None, flavor=None): + instance_count=DEFAULT_SAGEMAKER_INSTANCE_COUNT, vpc_config=None, flavor=None, + synchronous=True, timeout_seconds=1200): """ Deploy an MLflow model on AWS SageMaker. The currently active AWS account must have correct permissions set up. + This function creates a SageMaker endpoint. For more information about the input data + formats accepted by this endpoint, see the + :ref:`MLflow deployment tools documentation `. + :param app_name: Name of the deployed application. - :param path: Path to the model. Either local if no ``run_id`` or MLflow-relative if ``run_id`` - is specified. - :param execution_role_arn: Amazon execution role with SageMaker rights. - Defaults to the currently-assumed role. + :param model_uri: The location, in URI format, of the MLflow model to deploy to SageMaker. + For example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. + + :param execution_role_arn: The name of an IAM role granting the SageMaker service permissions to + access the specified Docker image and S3 bucket containing MLflow + model artifacts. If unspecified, the currently-assumed role will be + used. This execution role is passed to the SageMaker service when + creating a SageMaker model from the specified MLflow model. It is + passed as the ``ExecutionRoleArn`` parameter of the `SageMaker + CreateModel API call `_. This role is *not* assumed for any other + call. For more information about SageMaker execution roles for model + creation, see + https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html. :param bucket: S3 bucket where model artifacts will be stored. Defaults to a SageMaker-compatible bucket name. - :param run_id: MLflow run ID. - :param image: Name of the Docker image to be used. if not specified, uses a - publicly-available pre-built image. + :param image_url: URL of the ECR-hosted Docker image the model should be deployed into, produced + by ``mlflow sagemaker build-and-push-container``. This parameter can also + be specified by the environment variable ``MLFLOW_SAGEMAKER_DEPLOY_IMG_URL``. :param region_name: Name of the AWS region to which to deploy the application. :param mode: The mode in which to deploy the application. Must be one of the following: @@ -235,9 +199,13 @@ def deploy(app_name, model_path, execution_role_arn=None, bucket=None, run_id=No AWS console or the ``UpdateEndpointWeightsAndCapacities`` function defined in https://docs.aws.amazon.com/sagemaker/latest/dg/API_UpdateEndpointWeightsAndCapacities.html. - :param archive: If True, any pre-existing SageMaker application resources that become inactive - (i.e. as a result of deploying in ``mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE`` - mode) are preserved. If False, these resources are deleted. + :param archive: If ``True``, any pre-existing SageMaker application resources that become + inactive (i.e. as a result of deploying in + ``mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE`` mode) are preserved. + These resources may include unused SageMaker models and endpoint configurations + that were associated with a prior version of the application endpoint. If + ``False``, these resources are deleted. In order to use ``archive=False``, + ``deploy()`` must be executed synchronously with ``synchronous=True``. :param instance_type: The type of SageMaker ML instance on which to deploy the model. For a list of supported instance types, see https://aws.amazon.com/sagemaker/pricing/instance-types/. @@ -267,107 +235,140 @@ def deploy(app_name, model_path, execution_role_arn=None, bucket=None, run_id=No a flavor is automatically selected from the model's available flavors. If the specified flavor is not present or not supported for deployment, an exception will be thrown. - """ - if mode not in DEPLOYMENT_MODES: - raise ValueError("`mode` must be one of: {mds}".format( - mds=",".join(DEPLOYMENT_MODES))) + :param synchronous: If ``True``, this function will block until the deployment process succeeds + or encounters an irrecoverable failure. If ``False``, this function will + return immediately after starting the deployment process. It will not wait + for the deployment process to complete; in this case, the caller is + responsible for monitoring the health and status of the pending deployment + via native SageMaker APIs or the AWS console. + :param timeout_seconds: If ``synchronous`` is ``True``, the deployment process will return after + the specified number of seconds if no definitive result (success or + failure) is achieved. Once the function returns, the caller is + responsible for monitoring the health and status of the pending + deployment using native SageMaker APIs or the AWS console. If + ``synchronous`` is ``False``, this parameter is ignored. + """ + import boto3 + if (not archive) and (not synchronous): + raise MlflowException( + message=( + "Resources must be archived when `deploy()` is executed in non-synchronous mode." + " Either set `synchronous=True` or `archive=True`."), + error_code=INVALID_PARAMETER_VALUE) - s3_bucket_prefix = model_path - if run_id: - model_path = _get_model_log_dir(model_path, run_id) - s3_bucket_prefix = os.path.join(run_id, s3_bucket_prefix) + if mode not in DEPLOYMENT_MODES: + raise MlflowException( + message="`mode` must be one of: {deployment_modes}".format( + deployment_modes=",".join(DEPLOYMENT_MODES)), + error_code=INVALID_PARAMETER_VALUE) + model_path = _download_artifact_from_uri(model_uri) model_config_path = os.path.join(model_path, "MLmodel") if not os.path.exists(model_config_path): - raise Exception( - "Failed to find MLmodel configuration within the specified model's root directory.") + raise MlflowException( + message=( + "Failed to find MLmodel configuration within the specified model's" + " root directory."), + error_code=INVALID_PARAMETER_VALUE) model_config = Model.load(model_config_path) if flavor is None: flavor = _get_preferred_deployment_flavor(model_config) else: _validate_deployment_flavor(model_config, flavor) - print("Using the {selected_flavor} flavor for deployment!".format(selected_flavor=flavor)) + _logger.info("Using the %s flavor for deployment!", flavor) + sage_client = boto3.client('sagemaker', region_name=region_name) + s3_client = boto3.client('s3', region_name=region_name) + + endpoint_exists = _find_endpoint(endpoint_name=app_name, sage_client=sage_client) is not None + if endpoint_exists and mode == DEPLOYMENT_MODE_CREATE: + raise MlflowException( + message=( + "You are attempting to deploy an application with name: {application_name} in" + " '{mode_create}' mode. However, an application with the same name already" + " exists. If you want to update this application, deploy in '{mode_add}' or" + " '{mode_replace}' mode.".format( + application_name=app_name, + mode_create=DEPLOYMENT_MODE_CREATE, + mode_add=DEPLOYMENT_MODE_ADD, + mode_replace=DEPLOYMENT_MODE_REPLACE)), + error_code=INVALID_PARAMETER_VALUE) + + model_name = _get_sagemaker_model_name(endpoint_name=app_name) if not image_url: image_url = _get_default_image_url(region_name=region_name) - if not execution_role_arn: execution_role_arn = _get_assumed_role_arn() - if not bucket: - eprint("No model data bucket specified, using the default bucket") + _logger.info("No model data bucket specified, using the default bucket") bucket = _get_default_s3_bucket(region_name) - model_s3_path = _upload_s3( - local_model_path=model_path, bucket=bucket, prefix=s3_bucket_prefix) - _deploy(role=execution_role_arn, - image_url=image_url, - app_name=app_name, - model_s3_path=model_s3_path, - run_id=run_id, - region_name=region_name, - mode=mode, - archive=archive, - instance_type=instance_type, - instance_count=instance_count, - vpc_config=vpc_config, - flavor=flavor) - - -def _get_preferred_deployment_flavor(model_config): - """ - Obtains the flavor that MLflow would prefer to use when deploying the model. - If the model does not contain any supported flavors for deployment, an exception - will be thrown. - - :param model_config: An MLflow model object - :return: The name of the preferred deployment flavor for the specified model - """ - if mleap.FLAVOR_NAME in model_config.flavors: - return mleap.FLAVOR_NAME - elif pyfunc.FLAVOR_NAME in model_config.flavors: - return pyfunc.FLAVOR_NAME + model_s3_path = _upload_s3(local_model_path=model_path, + bucket=bucket, + prefix=model_name, + region_name=region_name, + s3_client=s3_client) + + if endpoint_exists: + deployment_operation = _update_sagemaker_endpoint( + endpoint_name=app_name, model_name=model_name, model_s3_path=model_s3_path, + model_uri=model_uri, image_url=image_url, flavor=flavor, + instance_type=instance_type, instance_count=instance_count, vpc_config=vpc_config, + mode=mode, role=execution_role_arn, sage_client=sage_client, s3_client=s3_client) else: - raise ValueError("The specified model does not contain any of the supported flavors for" - " deployment. The model contains the following flavors:" - " {model_flavors}. Supported flavors: {supported_flavors}".format( - model_flavors=model_config.flavors.keys(), - supported_flavors=SUPPORTED_DEPLOYMENT_FLAVORS)) - - -def _validate_deployment_flavor(model_config, flavor): - """ - Checks that the specified flavor is a supported deployment flavor - and is contained in the specified model. If one of these conditions - is not met, an exception is thrown. - - :param model_config: An MLflow Model object - :param flavor: The deployment flavor to validate - """ - if flavor not in SUPPORTED_DEPLOYMENT_FLAVORS: - raise ValueError("The specified flavor: `{flavor_name}` is not supported for" - " deployment. Please use one of the supported flavors:" - " {supported_flavor_names}".format( - flavor_name=flavor, - supported_flavor_names=SUPPORTED_DEPLOYMENT_FLAVORS)) - elif flavor not in model_config.flavors: - raise ValueError("The specified model does not contain the specified deployment flavor:" - " `{flavor_name}`. Please use one of the following deployment flavors" - " that the model contains: {model_flavors}".format( - flavor_name=flavor, model_flavors=model_config.flavors.keys())) + deployment_operation = _create_sagemaker_endpoint( + endpoint_name=app_name, model_name=model_name, model_s3_path=model_s3_path, + model_uri=model_uri, image_url=image_url, flavor=flavor, + instance_type=instance_type, instance_count=instance_count, vpc_config=vpc_config, + role=execution_role_arn, sage_client=sage_client) + + if synchronous: + _logger.info("Waiting for the deployment operation to complete...") + operation_status = deployment_operation.await_completion(timeout_seconds=timeout_seconds) + if operation_status.state == _SageMakerOperationStatus.STATE_SUCCEEDED: + _logger.info("The deployment operation completed successfully with message: \"%s\"", + operation_status.message) + else: + raise MlflowException( + "The deployment operation failed with the following error message:" + " \"{error_message}\"".format(error_message=operation_status.message)) + if not archive: + deployment_operation.clean_up() -def delete(app_name, region_name="us-west-2", archive=False): +def delete(app_name, region_name="us-west-2", archive=False, synchronous=True, timeout_seconds=300): """ Delete a SageMaker application. :param app_name: Name of the deployed application. :param region_name: Name of the AWS region in which the application is deployed. - :param archive: If True, resources associated with the specified application, such - as its associated models and endpoint configuration, will be preserved. - If False, these resources will be deleted. - """ + :param archive: If ``True``, resources associated with the specified application, such + as its associated models and endpoint configuration, are preserved. + If ``False``, these resources are deleted. In order to use + ``archive=False``, ``delete()`` must be executed synchronously with + ``synchronous=True``. + :param synchronous: If `True`, this function blocks until the deletion process succeeds + or encounters an irrecoverable failure. If `False`, this function + returns immediately after starting the deletion process. It will not wait + for the deletion process to complete; in this case, the caller is + responsible for monitoring the status of the deletion process via native + SageMaker APIs or the AWS console. + :param timeout_seconds: If `synchronous` is `True`, the deletion process returns after the + specified number of seconds if no definitive result (success or failure) + is achieved. Once the function returns, the caller is responsible + for monitoring the status of the deletion process via native SageMaker + APIs or the AWS console. If `synchronous` is False, this parameter + is ignored. + """ + import boto3 + if (not archive) and (not synchronous): + raise MlflowException( + message=( + "Resources must be archived when `deploy()` is executed in non-synchronous mode." + " Either set `synchronous=True` or `archive=True`."), + error_code=INVALID_PARAMETER_VALUE) + s3_client = boto3.client('s3', region_name=region_name) sage_client = boto3.client('sagemaker', region_name=region_name) @@ -375,31 +376,64 @@ def delete(app_name, region_name="us-west-2", archive=False): endpoint_arn = endpoint_info["EndpointArn"] sage_client.delete_endpoint(EndpointName=app_name) - eprint("Deleted endpoint with arn: {earn}".format(earn=endpoint_arn)) + _logger.info("Deleted endpoint with arn: %s", endpoint_arn) + + def status_check_fn(): + endpoint_info = _find_endpoint(endpoint_name=app_name, sage_client=sage_client) + if endpoint_info is not None: + return _SageMakerOperationStatus.in_progress( + "Deletion is still in progress. Current endpoint status: {endpoint_status}".format( + endpoint_status=endpoint_info["EndpointStatus"])) + else: + return _SageMakerOperationStatus.succeeded( + "The SageMaker endpoint was deleted successfully.") - if not archive: + def cleanup_fn(): + _logger.info("Cleaning up unused resources...") config_name = endpoint_info["EndpointConfigName"] config_info = sage_client.describe_endpoint_config( EndpointConfigName=config_name) config_arn = config_info["EndpointConfigArn"] sage_client.delete_endpoint_config(EndpointConfigName=config_name) - eprint("Deleted associated endpoint configuration with arn: {carn}".format( - carn=config_arn)) + _logger.info("Deleted associated endpoint configuration with arn: %s", config_arn) for pv in config_info["ProductionVariants"]: model_name = pv["ModelName"] model_arn = _delete_sagemaker_model( model_name, sage_client, s3_client) - eprint("Deleted associated model with arn: {marn}".format( - marn=model_arn)) + _logger.info("Deleted associated model with arn: %s", model_arn) + + delete_operation = _SageMakerOperation(status_check_fn=status_check_fn, cleanup_fn=cleanup_fn) + + if synchronous: + _logger.info("Waiting for the delete operation to complete...") + operation_status = delete_operation.await_completion(timeout_seconds=timeout_seconds) + if operation_status.state == _SageMakerOperationStatus.STATE_SUCCEEDED: + _logger.info("The deletion operation completed successfully with message: \"%s\"", + operation_status.message) + else: + raise MlflowException( + "The deletion operation failed with the following error message:" + " \"{error_message}\"".format(error_message=operation_status.message)) + if not archive: + delete_operation.clean_up() -def run_local(model_path, run_id=None, port=5000, image=DEFAULT_IMAGE_NAME, flavor=None): +def run_local(model_uri, port=5000, image=DEFAULT_IMAGE_NAME, flavor=None): """ Serve model locally in a SageMaker compatible Docker container. - :param model_path: path to the model. Either local if no ``run_id`` or MLflow-relative if - ``run_id`` is specified. - :param run_id: MLflow run ID. + :param model_uri: The location, in URI format, of the MLflow model to serve locally, + for example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. + :param port: Local port. :param image: Name of the Docker image to be used. :param flavor: The name of the flavor of the model to use for local serving. If ``None``, @@ -407,9 +441,7 @@ def run_local(model_path, run_id=None, port=5000, image=DEFAULT_IMAGE_NAME, flav specified flavor is not present or not supported for deployment, an exception is thrown. """ - if run_id: - model_path = _get_model_log_dir(model_path, run_id) - model_path = os.path.abspath(model_path) + model_path = _download_artifact_from_uri(model_uri) model_config_path = os.path.join(model_path, "MLmodel") model_config = Model.load(model_config_path) @@ -421,29 +453,35 @@ def run_local(model_path, run_id=None, port=5000, image=DEFAULT_IMAGE_NAME, flav deployment_config = _get_deployment_config(flavor_name=flavor) - eprint("launching docker image with path {}".format(model_path)) + _logger.info("launching docker image with path %s", model_path) cmd = ["docker", "run", "-v", "{}:/opt/ml/model/".format(model_path), "-p", "%d:8080" % port] for key, value in deployment_config.items(): cmd += ["-e", "{key}={value}".format(key=key, value=value)] cmd += ["--rm", image, "serve"] - eprint('executing', ' '.join(cmd)) - proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, universal_newlines=True) + _logger.info('executing: %s', ' '.join(cmd)) + proc = Popen(cmd, stdout=sys.stdout, stderr=sys.stderr, universal_newlines=True) def _sigterm_handler(*_): - eprint("received termination signal => killing docker process") + _logger.info("received termination signal => killing docker process") proc.send_signal(signal.SIGINT) import signal signal.signal(signal.SIGTERM, _sigterm_handler) - for x in iter(proc.stdout.readline, ""): - eprint(x, end='') + proc.wait() def _get_default_image_url(region_name): + import boto3 env_img = os.environ.get(IMAGE_NAME_ENV_VAR) if env_img: return env_img + env_img = os.environ.get(DEPRECATED_IMAGE_NAME_ENV_VAR) + if env_img: + _logger.warning("Environment variable '%s' is deprecated, please use '%s' instead", + DEPRECATED_IMAGE_NAME_ENV_VAR, IMAGE_NAME_ENV_VAR) + return env_img + ecr_client = boto3.client("ecr", region_name=region_name) repository_conf = ecr_client.describe_repositories( repositoryNames=[DEFAULT_IMAGE_NAME])['repositories'][0] @@ -451,6 +489,7 @@ def _get_default_image_url(region_name): def _get_account_id(): + import boto3 sess = boto3.Session() sts_client = sess.client("sts") identity_info = sts_client.get_caller_identity() @@ -462,6 +501,7 @@ def _get_assumed_role_arn(): """ :return: ARN of the user's current IAM role. """ + import boto3 sess = boto3.Session() sts_client = sess.client("sts") identity_info = sts_client.get_caller_identity() @@ -473,6 +513,7 @@ def _get_assumed_role_arn(): def _get_default_s3_bucket(region_name): + import boto3 # create bucket if it does not exist sess = boto3.Session() account_id = _get_account_id() @@ -482,7 +523,7 @@ def _get_default_s3_bucket(region_name): response = s3.list_buckets() buckets = [b['Name'] for b in response["Buckets"]] if bucket_name not in buckets: - eprint("Default bucket `%s` not found. Creating..." % bucket_name) + _logger.info("Default bucket `%s` not found. Creating...", bucket_name) bucket_creation_kwargs = { 'ACL': 'bucket-owner-full-control', 'Bucket': bucket_name, @@ -497,10 +538,9 @@ def _get_default_s3_bucket(region_name): 'LocationConstraint': region_name } response = s3.create_bucket(**bucket_creation_kwargs) - eprint(response) + _logger.info("Bucket creation response: %s", response) else: - eprint("Default bucket `%s` already exists. Skipping creation." % - bucket_name) + _logger.info("Default bucket `%s` already exists. Skipping creation.", bucket_name) return bucket_name @@ -513,30 +553,32 @@ def _make_tarfile(output_filename, source_dir): tar.add(os.path.join(source_dir, f), arcname=f) -def _upload_s3(local_model_path, bucket, prefix): +def _upload_s3(local_model_path, bucket, prefix, region_name, s3_client): """ Upload dir to S3 as .tar.gz. :param local_model_path: Local path to a dir. :param bucket: S3 bucket where to store the data. :param prefix: Path within the bucket. + :param region_name: The AWS region in which to upload data to S3. + :param s3_client: A boto3 client for S3. :return: S3 path of the uploaded artifact. """ - sess = boto3.Session() + import boto3 + sess = boto3.Session(region_name=region_name) with TempDir() as tmp: model_data_file = tmp.path("model.tar.gz") _make_tarfile(model_data_file, local_model_path) - s3 = boto3.client('s3') with open(model_data_file, 'rb') as fobj: key = os.path.join(prefix, 'model.tar.gz') obj = sess.resource('s3').Bucket(bucket).Object(key) obj.upload_fileobj(fobj) - response = s3.put_object_tagging( + response = s3_client.put_object_tagging( Bucket=bucket, Key=key, Tagging={'TagSet': [{'Key': 'SageMaker', 'Value': 'true'}, ]} ) - eprint('tag response', response) - return '{}/{}/{}'.format(s3.meta.endpoint_url, bucket, key) + _logger.info('tag response: %s', response) + return '{}/{}/{}'.format(s3_client.meta.endpoint_url, bucket, key) def _get_deployment_config(flavor_name): @@ -547,131 +589,43 @@ def _get_deployment_config(flavor_name): return deployment_config -def _deploy(role, image_url, app_name, model_s3_path, run_id, region_name, mode, archive, - instance_type, instance_count, vpc_config, flavor): - """ - Deploy model on sagemaker. - :param role: SageMaker execution ARN role - :param image_url: URL of the ECR-hosted docker image the model is being deployed into - :param app_name: Name of the deployed app. - :param model_s3_path: S3 path where we stored the model artifacts. - :param run_id: Run ID that generated this model. - :param mode: The mode in which to deploy the application. - :param archive: If True, any pre-existing SageMaker application resources that become inactive - (i.e. as a result of deploying in mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE mode) - will be preserved. If False, these resources will be deleted. - :param instance_type: The type of SageMaker ML instance on which to deploy the model. - :param instance_count: The number of SageMaker ML instances on which to deploy the model. - :param vpc_config: A dictionary specifying the VPC configuration to use when creating the - new SageMaker model associated with this application. - :param flavor: The name of the flavor of the model to use for deployment. - """ - sage_client = boto3.client('sagemaker', region_name=region_name) - s3_client = boto3.client('s3', region_name=region_name) - - endpoints_page = sage_client.list_endpoints( - MaxResults=100, NameContains=app_name) - endpoint_found = (app_name in [endp["EndpointName"] - for endp in endpoints_page["Endpoints"]]) - while (not endpoint_found) and ("NextToken" in endpoints_page): - next_token = endpoints_page["NextToken"] - endpoints_page = sage_client.list_endpoints(MaxResults=100, - NextToken=next_token, - NameContains=app_name) - endpoint_found = any( - [ep["EndpointName"] == app_name for ep in endpoints_page["Endpoints"]]) - - if endpoint_found and mode == DEPLOYMENT_MODE_CREATE: - msg = ("You are attempting to deploy an application with name: `{an}` in `{mcr} `mode." - " However, an application with the same name already exists. If you want to update" - " this application, deploy in `{madd}` or `{mrep}` mode.").format( - an=app_name, - mcr=DEPLOYMENT_MODE_CREATE, - madd=DEPLOYMENT_MODE_ADD, - mrep=DEPLOYMENT_MODE_REPLACE) - raise Exception(msg) - elif endpoint_found: - return _update_sagemaker_endpoint(endpoint_name=app_name, - image_url=image_url, - model_s3_path=model_s3_path, - run_id=run_id, - flavor=flavor, - instance_type=instance_type, - instance_count=instance_count, - vpc_config=vpc_config, - mode=mode, - archive=archive, - role=role, - sage_client=sage_client, - s3_client=s3_client) - else: - return _create_sagemaker_endpoint(endpoint_name=app_name, - image_url=image_url, - model_s3_path=model_s3_path, - run_id=run_id, - flavor=flavor, - instance_type=instance_type, - instance_count=instance_count, - vpc_config=vpc_config, - role=role, - sage_client=sage_client) - - -def _get_sagemaker_resource_unique_id(): - """ - :return: A unique identifier that can be appended to a user-readable resource name to avoid - naming collisions. - """ - uuid_bytes = uuid.uuid4().bytes - # Use base64 encoding to shorten the UUID length. Note that the replacement of the - # unsupported '+' symbol maintains uniqueness because the UUID byte string is of a fixed, - # 32-byte length - uuid_b64 = base64.b64encode(uuid_bytes) - if sys.version_info >= (3, 0): - # In Python3, `uuid_b64` is a `bytes` object. It needs to be - # converted to a string - uuid_b64 = uuid_b64.decode("ascii") - uuid_b64 = uuid_b64.rstrip('=\n').replace("/", "-").replace("+", "AB") - return uuid_b64 - - def _get_sagemaker_model_name(endpoint_name): - unique_id = _get_sagemaker_resource_unique_id() - return "{en}-model-{uid}".format(en=endpoint_name, uid=unique_id) + return "{en}-model-{uid}".format(en=endpoint_name, uid=get_unique_resource_id()) def _get_sagemaker_config_name(endpoint_name): - unique_id = _get_sagemaker_resource_unique_id() - return "{en}-config-{uid}".format(en=endpoint_name, uid=unique_id) + return "{en}-config-{uid}".format(en=endpoint_name, uid=get_unique_resource_id()) -def _create_sagemaker_endpoint(endpoint_name, image_url, model_s3_path, run_id, flavor, - instance_type, vpc_config, instance_count, role, sage_client): +def _create_sagemaker_endpoint(endpoint_name, model_name, model_s3_path, model_uri, image_url, + flavor, instance_type, vpc_config, instance_count, role, + sage_client): """ - :param image_url: URL of the ECR-hosted docker image the model is being deployed into. + :param endpoint_name: The name of the SageMaker endpoint to create. + :param model_name: The name to assign the new SageMaker model that will be associated with the + specified endpoint. :param model_s3_path: S3 path where we stored the model artifacts. - :param run_id: Run ID that generated this model. + :param model_uri: URI of the MLflow model to associate with the specified SageMaker endpoint. + :param image_url: URL of the ECR-hosted docker image the model is being deployed into. :param flavor: The name of the flavor of the model to use for deployment. :param instance_type: The type of SageMaker ML instance on which to deploy the model. :param instance_count: The number of SageMaker ML instances on which to deploy the model. :param vpc_config: A dictionary specifying the VPC configuration to use when creating the new SageMaker model associated with this SageMaker endpoint. - :param role: SageMaker execution ARN role - :param sage_client: A boto3 client for SageMaker + :param role: SageMaker execution ARN role. + :param sage_client: A boto3 client for SageMaker. """ - eprint("Creating new endpoint with name: {en} ...".format( - en=endpoint_name)) + _logger.info("Creating new endpoint with name: %s ...", endpoint_name) - model_name = _get_sagemaker_model_name(endpoint_name) model_response = _create_sagemaker_model(model_name=model_name, model_s3_path=model_s3_path, + model_uri=model_uri, flavor=flavor, vpc_config=vpc_config, - run_id=run_id, image_url=image_url, execution_role=role, sage_client=sage_client) - eprint("Created model with arn: %s" % model_response["ModelArn"]) + _logger.info("Created model with arn: %s", model_response["ModelArn"]) production_variant = { 'VariantName': model_name, @@ -691,24 +645,53 @@ def _create_sagemaker_endpoint(endpoint_name, image_url, model_s3_path, run_id, }, ], ) - eprint("Created endpoint configuration with arn: %s" - % endpoint_config_response["EndpointConfigArn"]) + _logger.info("Created endpoint configuration with arn: %s", + endpoint_config_response["EndpointConfigArn"]) endpoint_response = sage_client.create_endpoint( EndpointName=endpoint_name, EndpointConfigName=config_name, Tags=[], ) - eprint("Created endpoint with arn: %s" % endpoint_response["EndpointArn"]) + _logger.info("Created endpoint with arn: %s", endpoint_response["EndpointArn"]) + + def status_check_fn(): + endpoint_info = _find_endpoint(endpoint_name=endpoint_name, sage_client=sage_client) + + if endpoint_info is None: + return _SageMakerOperationStatus.in_progress("Waiting for endpoint to be created...") + + endpoint_status = endpoint_info["EndpointStatus"] + if endpoint_status == "Creating": + return _SageMakerOperationStatus.in_progress( + "Waiting for endpoint to reach the \"InService\" state. Current endpoint status:" + " \"{endpoint_status}\"".format(endpoint_status=endpoint_status)) + elif endpoint_status == "InService": + return _SageMakerOperationStatus.succeeded( + "The SageMaker endpoint was created successfully.") + else: + failure_reason = endpoint_info.get( + "FailureReason", + ("An unknown SageMaker failure occurred. Please see the SageMaker console logs for" + " more information.")) + return _SageMakerOperationStatus.failed(failure_reason) + + def cleanup_fn(): + pass + return _SageMakerOperation(status_check_fn=status_check_fn, cleanup_fn=cleanup_fn) -def _update_sagemaker_endpoint(endpoint_name, image_url, model_s3_path, run_id, flavor, - instance_type, instance_count, vpc_config, mode, archive, role, + +def _update_sagemaker_endpoint(endpoint_name, model_name, model_uri, image_url, model_s3_path, + flavor, instance_type, instance_count, vpc_config, mode, role, sage_client, s3_client): """ + :param endpoint_name: The name of the SageMaker endpoint to update. + :param model_name: The name to assign the new SageMaker model that will be associated with the + specified endpoint. + :param model_uri: URI of the MLflow model to associate with the specified SageMaker endpoint. :param image_url: URL of the ECR-hosted Docker image the model is being deployed into :param model_s3_path: S3 path where we stored the model artifacts - :param run_id: Run ID that generated this model :param flavor: The name of the flavor of the model to use for deployment. :param instance_type: The type of SageMaker ML instance on which to deploy the model. :param instance_count: The number of SageMaker ML instances on which to deploy the model. @@ -716,9 +699,6 @@ def _update_sagemaker_endpoint(endpoint_name, image_url, model_s3_path, run_id, new SageMaker model associated with this SageMaker endpoint. :param mode: either mlflow.sagemaker.DEPLOYMENT_MODE_ADD or mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE. - :param archive: If True, any pre-existing SageMaker application resources that become inactive - (i.e. as a result of deploying in mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE mode) - will be preserved. If False, these resources will be deleted. :param role: SageMaker execution ARN role. :param sage_client: A boto3 client for SageMaker. :param s3_client: A boto3 client for S3. @@ -736,19 +716,17 @@ def _update_sagemaker_endpoint(endpoint_name, image_url, model_s3_path, run_id, deployed_config_arn = deployed_config_info["EndpointConfigArn"] deployed_production_variants = deployed_config_info["ProductionVariants"] - eprint("Found active endpoint with arn: {earn}. Updating...".format( - earn=endpoint_arn)) + _logger.info("Found active endpoint with arn: %s. Updating...", endpoint_arn) - new_model_name = _get_sagemaker_model_name(endpoint_name) - new_model_response = _create_sagemaker_model(model_name=new_model_name, + new_model_response = _create_sagemaker_model(model_name=model_name, model_s3_path=model_s3_path, + model_uri=model_uri, flavor=flavor, vpc_config=vpc_config, - run_id=run_id, image_url=image_url, execution_role=role, sage_client=sage_client) - eprint("Created new model with arn: %s" % new_model_response["ModelArn"]) + _logger.info("Created new model with arn: %s", new_model_response["ModelArn"]) if mode == DEPLOYMENT_MODE_ADD: new_model_weight = 0 @@ -758,8 +736,8 @@ def _update_sagemaker_endpoint(endpoint_name, image_url, model_s3_path, run_id, production_variants = [] new_production_variant = { - 'VariantName': new_model_name, - 'ModelName': new_model_name, + 'VariantName': model_name, + 'ModelName': model_name, 'InitialInstanceCount': instance_count, 'InstanceType': instance_type, 'InitialVariantWeight': new_model_weight @@ -779,44 +757,70 @@ def _update_sagemaker_endpoint(endpoint_name, image_url, model_s3_path, run_id, }, ], ) - eprint("Created new endpoint configuration with arn: %s" - % endpoint_config_response["EndpointConfigArn"]) + _logger.info("Created new endpoint configuration with arn: %s", + endpoint_config_response["EndpointConfigArn"]) sage_client.update_endpoint(EndpointName=endpoint_name, EndpointConfigName=new_config_name) - eprint("Updated endpoint with new configuration!") + _logger.info("Updated endpoint with new configuration!") + + operation_start_time = time.time() + + def status_check_fn(): + if time.time() - operation_start_time < 20: + # Wait at least 20 seconds before checking the status of the update; this ensures + # that we don't consider the operation to have failed if small delays occur at + # initialization time + return _SageMakerOperationStatus.in_progress() + + endpoint_info = sage_client.describe_endpoint(EndpointName=endpoint_name) + endpoint_update_was_rolled_back = ( + endpoint_info["EndpointStatus"] == "InService" + and endpoint_info["EndpointConfigName"] != new_config_name) + if endpoint_update_was_rolled_back or endpoint_info["EndpointStatus"] == "Failed": + failure_reason = endpoint_info.get( + "FailureReason", + ("An unknown SageMaker failure occurred. Please see the SageMaker console logs for" + " more information.")) + return _SageMakerOperationStatus.failed(failure_reason) + elif endpoint_info["EndpointStatus"] == "InService": + return _SageMakerOperationStatus.succeeded( + "The SageMaker endpoint was updated successfully.") + else: + return _SageMakerOperationStatus.in_progress( + "The update operation is still in progress. Current endpoint status:" + " \"{endpoint_status}\"".format(endpoint_status=endpoint_info["EndpointStatus"])) - # If applicable, clean up unused models and old configurations - if not archive: - eprint("Cleaning up unused resources...") + def cleanup_fn(): + _logger.info("Cleaning up unused resources...") if mode == DEPLOYMENT_MODE_REPLACE: - s3_client = boto3.client('s3') for pv in deployed_production_variants: deployed_model_arn = _delete_sagemaker_model(model_name=pv["ModelName"], sage_client=sage_client, s3_client=s3_client) - eprint("Deleted model with arn: {marn}".format( - marn=deployed_model_arn)) + _logger.info("Deleted model with arn: %s", deployed_model_arn) sage_client.delete_endpoint_config( EndpointConfigName=deployed_config_name) - eprint("Deleted endpoint configuration with arn: {carn}".format( - carn=deployed_config_arn)) + _logger.info("Deleted endpoint configuration with arn: %s", deployed_config_arn) + return _SageMakerOperation(status_check_fn=status_check_fn, cleanup_fn=cleanup_fn) -def _create_sagemaker_model(model_name, model_s3_path, flavor, vpc_config, run_id, image_url, + +def _create_sagemaker_model(model_name, model_s3_path, model_uri, flavor, vpc_config, image_url, execution_role, sage_client): """ - :param model_s3_path: S3 path where the model artifacts are stored - :param flavor: The name of the flavor of the model + :param model_name: The name to assign the new SageMaker model that is created. + :param model_s3_path: S3 path where the model artifacts are stored. + :param model_uri: URI of the MLflow model associated with the new SageMaker model. + :param flavor: The name of the flavor of the model. :param vpc_config: A dictionary specifying the VPC configuration to use when creating the new SageMaker model associated with this SageMaker endpoint. - :param run_id: Run ID that generated this model :param image_url: URL of the ECR-hosted Docker image that will serve as the - model's container - :param execution_role: The ARN of the role that SageMaker will assume when creating the model - :param sage_client: A boto3 client for SageMaker - :return: AWS response containing metadata associated with the new model + model's container, + :param execution_role: The ARN of the role that SageMaker will assume when creating the model. + :param sage_client: A boto3 client for SageMaker. + :return: AWS response containing metadata associated with the new model. """ create_model_args = { "ModelName": model_name, @@ -827,7 +831,7 @@ def _create_sagemaker_model(model_name, model_s3_path, flavor, vpc_config, run_i 'Environment': _get_deployment_config(flavor_name=flavor), }, "ExecutionRoleArn": execution_role, - "Tags": [{'Key': 'run_id', 'Value': str(run_id)}], + "Tags": [{'Key': 'model_uri', 'Value': str(model_uri)}], } if vpc_config is not None: create_model_args["VpcConfig"] = vpc_config @@ -859,3 +863,115 @@ def _delete_sagemaker_model(model_name, sage_client, s3_client): sage_client.delete_model(ModelName=model_name) return model_arn + + +def _delete_sagemaker_endpoint_configuration(endpoint_config_name, sage_client): + """ + :param sage_client: A boto3 client for SageMaker. + :return: ARN of the deleted endpoint configuration. + """ + endpoint_config_info = sage_client.describe_endpoint_config( + EndpointConfigName=endpoint_config_name) + sage_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name) + return endpoint_config_info["EndpointConfigArn"] + + +def _find_endpoint(endpoint_name, sage_client): + """ + Finds a SageMaker endpoint with the specified name in the caller's AWS account, returning a + NoneType if the endpoint is not found. + + :param sage_client: A boto3 client for SageMaker. + :return: If the endpoint exists, a dictionary of endpoint attributes. If the endpoint does not + exist, ``None``. + """ + endpoints_page = sage_client.list_endpoints( + MaxResults=100, NameContains=endpoint_name) + + while True: + for endpoint in endpoints_page["Endpoints"]: + if endpoint["EndpointName"] == endpoint_name: + return endpoint + + if "NextToken" in endpoints_page: + endpoints_page = sage_client.list_endpoints(MaxResults=100, + NextToken=endpoints_page["NextToken"], + NameContains=endpoint_name) + else: + return None + + +class _SageMakerOperation: + + def __init__(self, status_check_fn, cleanup_fn): + self.status_check_fn = status_check_fn + self.cleanup_fn = cleanup_fn + self.start_time = time.time() + self.status = _SageMakerOperationStatus(_SageMakerOperationStatus.STATE_IN_PROGRESS, None) + self.cleaned_up = False + + def await_completion(self, timeout_seconds): + iteration = 0 + begin = time.time() + while (time.time() - begin) < timeout_seconds: + status = self.status_check_fn() + if status.state == _SageMakerOperationStatus.STATE_IN_PROGRESS: + if iteration % 4 == 0: + # Log the progress status roughly every 20 seconds + _logger.info(status.message) + + time.sleep(5) + iteration += 1 + continue + else: + self.status = status + return status + + duration_seconds = time.time() - begin + return _SageMakerOperationStatus.timed_out(duration_seconds) + + def clean_up(self): + if self.status.state != _SageMakerOperationStatus.STATE_SUCCEEDED: + raise ValueError( + "Cannot clean up an operation that has not succeeded! Current operation state:" + " {operation_state}".format(operation_state=self.status.state)) + + if not self.cleaned_up: + self.cleaned_up = True + else: + raise ValueError("`clean_up()` has already been executed for this operation!") + + self.cleanup_fn() + + +class _SageMakerOperationStatus: + + STATE_SUCCEEDED = "succeeded" + STATE_FAILED = "failed" + STATE_IN_PROGRESS = "in progress" + STATE_TIMED_OUT = "timed_out" + + def __init__(self, state, message): + self.state = state + self.message = message + + @classmethod + def in_progress(cls, message=None): + if message is None: + message = "The operation is still in progress." + return cls(_SageMakerOperationStatus.STATE_IN_PROGRESS, message) + + @classmethod + def timed_out(cls, duration_seconds): + return cls(_SageMakerOperationStatus.STATE_TIMED_OUT, + "Timed out after waiting {duration_seconds} seconds for the operation to" + " complete. This operation may still be in progress. Please check the AWS" + " console for more information.".format(duration_seconds=duration_seconds)) + + @classmethod + def failed(cls, message): + return cls(_SageMakerOperationStatus.STATE_FAILED, message) + + @classmethod + def succeeded(cls, message): + return cls(_SageMakerOperationStatus.STATE_SUCCEEDED, message) diff --git a/mlflow/sagemaker/cli.py b/mlflow/sagemaker/cli.py index 13a7e5434cc83..cac9b9ae520b1 100644 --- a/mlflow/sagemaker/cli.py +++ b/mlflow/sagemaker/cli.py @@ -9,6 +9,7 @@ import mlflow.sagemaker from mlflow.sagemaker import DEFAULT_IMAGE_NAME as IMAGE from mlflow.utils import cli_args +import mlflow.models.docker_utils @click.group("sagemaker") @@ -24,89 +25,129 @@ def commands(): @commands.command("deploy") @click.option("--app-name", "-a", help="Application name", required=True) -@cli_args.MODEL_PATH +@cli_args.MODEL_URI @click.option("--execution-role-arn", "-e", default=None, help="SageMaker execution role") @click.option("--bucket", "-b", default=None, help="S3 bucket to store model artifacts") -@cli_args.RUN_ID @click.option("--image-url", "-i", default=None, help="ECR URL for the Docker image") @click.option("--region-name", default="us-west-2", help="Name of the AWS region in which to deploy the application") @click.option("--mode", default=mlflow.sagemaker.DEPLOYMENT_MODE_CREATE, help="The mode in which to deploy the application." - " Must be one of the following: {mds}".format( - mds=", ".join(mlflow.sagemaker.DEPLOYMENT_MODES))) -@click.option("--archive", "-ar", is_flag=True, help="If specified, any SageMaker resources that" - " become inactive (i.e as the result of replacement) will be preserved") + " Must be one of the following: {mds}".format( + mds=", ".join(mlflow.sagemaker.DEPLOYMENT_MODES))) +@click.option("--archive", "-ar", is_flag=True, + help=("If specified, any SageMaker resources that become inactive (i.e as the" + " result of an update in {mode_replace} mode) are preserved." + " These resources may include unused SageMaker models and endpoint" + " configurations that were associated with a prior version of the application" + " endpoint. Otherwise, if `--archive` is unspecified, these resources are" + " deleted. `--archive` must be specified when deploying asynchronously with" + " `--async`.".format( + mode_replace=mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE))) @click.option("--instance-type", "-t", default=mlflow.sagemaker.DEFAULT_SAGEMAKER_INSTANCE_TYPE, help="The type of SageMaker ML instance on which to deploy the model. For a list of" - " supported instance types, see" - " https://aws.amazon.com/sagemaker/pricing/instance-types/.") + " supported instance types, see" + " https://aws.amazon.com/sagemaker/pricing/instance-types/.") @click.option("--instance-count", "-c", default=mlflow.sagemaker.DEFAULT_SAGEMAKER_INSTANCE_COUNT, help="The number of SageMaker ML instances on which to deploy the model") @click.option("--vpc-config", "-v", help="Path to a file containing a JSON-formatted VPC configuration. This" - " configuration will be used when creating the new SageMaker model associated" - " with this application. For more information, see" - " https://docs.aws.amazon.com/sagemaker/latest/dg/API_VpcConfig.html") + " configuration will be used when creating the new SageMaker model associated" + " with this application. For more information, see" + " https://docs.aws.amazon.com/sagemaker/latest/dg/API_VpcConfig.html") @click.option("--flavor", "-f", default=None, help=("The name of the flavor to use for deployment. Must be one of the following:" " {supported_flavors}. If unspecified, a flavor will be automatically selected" " from the model's available flavors.".format( - supported_flavors=mlflow.sagemaker.SUPPORTED_DEPLOYMENT_FLAVORS))) -def deploy(app_name, model_path, execution_role_arn, bucket, run_id, image_url, region_name, mode, - archive, instance_type, instance_count, vpc_config, flavor): + supported_flavors=mlflow.sagemaker.SUPPORTED_DEPLOYMENT_FLAVORS))) +@click.option("--async", "asynchronous", is_flag=True, + help=("If specified, this command will return immediately after starting the" + " deployment process. It will not wait for the deployment process to complete." + " The caller is responsible for monitoring the deployment process via native" + " SageMaker APIs or the AWS console.")) +@click.option("--timeout", default=1200, + help=("If the command is executed synchronously, the deployment process will return" + " after the specified number of seconds if no definitive result (success or" + " failure) is achieved. Once the function returns, the caller is responsible" + " for monitoring the health and status of the pending deployment via" + " native SageMaker APIs or the AWS console. If the command is executed" + " asynchronously using the `--async` flag, this value is ignored.")) +def deploy(app_name, model_uri, execution_role_arn, bucket, image_url, region_name, mode, archive, + instance_type, instance_count, vpc_config, flavor, asynchronous, timeout): """ Deploy model on Sagemaker as a REST API endpoint. Current active AWS account needs to have correct permissions setup. + + By default, unless the ``--async`` flag is specified, this command will block until + either the deployment process completes (definitively succeeds or fails) or the specified + timeout elapses. + + For more information about the input data formats accepted by the deployed REST API endpoint, + see the following documentation: + https://www.mlflow.org/docs/latest/models.html#sagemaker-deployment. """ if vpc_config is not None: with open(vpc_config, "r") as f: vpc_config = json.load(f) - mlflow.sagemaker.deploy(app_name=app_name, model_path=model_path, - execution_role_arn=execution_role_arn, bucket=bucket, run_id=run_id, + mlflow.sagemaker.deploy(app_name=app_name, model_uri=model_uri, + execution_role_arn=execution_role_arn, bucket=bucket, image_url=image_url, region_name=region_name, mode=mode, archive=archive, instance_type=instance_type, - instance_count=instance_count, vpc_config=vpc_config, flavor=flavor) - - -@commands.command("list-flavors") -def list_flavors(): - print("Supported model flavors for SageMaker deployment are: {supported_flavors}".format( - supported_flavors=mlflow.sagemaker.SUPPORTED_DEPLOYMENT_FLAVORS)) + instance_count=instance_count, vpc_config=vpc_config, flavor=flavor, + synchronous=(not asynchronous), timeout_seconds=timeout) @commands.command("delete") @click.option("--app-name", "-a", help="Application name", required=True) @click.option("--region-name", "-r", default="us-west-2", help="Name of the AWS region in which to deploy the application.") -@click.option("--archive", "-ar", is_flag=True, help="If specified, resources associated with" - " the application are preserved. Otherwise, these resources are deleted.") -def delete(app_name, region_name, archive): +@click.option("--archive", "-ar", is_flag=True, + help=("If specified, resources associated with the application are preserved." + " These resources may include unused SageMaker models and endpoint" + " configurations that were previously associated with the application endpoint." + " Otherwise, if `--archive` is unspecified, these resources are deleted." + " `--archive` must be specified when deleting asynchronously with `--async`.")) +@click.option("--async", "asynchronous", is_flag=True, + help=("If specified, this command will return immediately after starting the" + " deletion process. It will not wait for the deletion process to complete." + " The caller is responsible for monitoring the deletion process via native" + " SageMaker APIs or the AWS console.")) +@click.option("--timeout", default=1200, + help=("If the command is executed synchronously, the deployment process will return" + " after the specified number of seconds if no definitive result (success or" + " failure) is achieved. Once the function returns, the caller is responsible" + " for monitoring the health and status of the pending deployment via" + " native SageMaker APIs or the AWS console. If the command is executed" + " asynchronously using the `--async` flag, this value is ignored.")) +def delete(app_name, region_name, archive, asynchronous, timeout): """ - Delete the specified application. Unless ``archive`` is set to ``True``, all SageMaker resources + Delete the specified application. Unless ``--archive`` is specified, all SageMaker resources associated with the application are deleted as well. + + By default, unless the ``--async`` flag is specified, this command will block until + either the deletion process completes (definitively succeeds or fails) or the specified timeout + elapses. """ mlflow.sagemaker.delete( - app_name=app_name, region_name=region_name, archive=archive) + app_name=app_name, region_name=region_name, archive=archive, synchronous=(not asynchronous), + timeout_seconds=timeout) @commands.command("run-local") -@cli_args.MODEL_PATH -@cli_args.RUN_ID +@cli_args.MODEL_URI @click.option("--port", "-p", default=5000, help="Server port. [default: 5000]") @click.option("--image", "-i", default=IMAGE, help="Docker image name") @click.option("--flavor", "-f", default=None, help=("The name of the flavor to use for local serving. Must be one of the following:" " {supported_flavors}. If unspecified, a flavor will be automatically selected" " from the model's available flavors.".format( - supported_flavors=mlflow.sagemaker.SUPPORTED_DEPLOYMENT_FLAVORS))) -def run_local(model_path, run_id, port, image, flavor): + supported_flavors=mlflow.sagemaker.SUPPORTED_DEPLOYMENT_FLAVORS))) +def run_local(model_uri, port, image, flavor): """ Serve model locally running in a Sagemaker-compatible Docker container. """ - mlflow.sagemaker.run_local( - model_path=model_path, run_id=run_id, port=port, image=image, flavor=flavor) + mlflow.sagemaker.run_local(model_uri=model_uri, port=port, image=image, flavor=flavor) @commands.command("build-and-push-container") @@ -125,8 +166,23 @@ def build_and_push_container(build, push, container, mlflow_home): if not (build or push): print("skipping both build and push, have nothing to do!") if build: - mlflow.sagemaker.build_image(container, - mlflow_home=os.path.abspath(mlflow_home) if mlflow_home - else None) + sagemaker_image_entrypoint = """ + ENTRYPOINT ["python", "-c", "import sys; from mlflow.models import container as C; \ + C._init(sys.argv[1])"] + """ + + def setup_container(_): + return "\n".join([ + 'ENV {disable_env}="false"', + 'RUN python -c "from mlflow.models.container import _install_pyfunc_deps;' + '_install_pyfunc_deps(None, False)"' + ]) + + mlflow.models.docker_utils._build_image( + container, + mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None, + entrypoint=sagemaker_image_entrypoint, + custom_setup_steps_hook=setup_container + ) if push: mlflow.sagemaker.push_image_to_ecr(container) diff --git a/mlflow/sagemaker/container/push_image_to_ecr.sh b/mlflow/sagemaker/push_image_to_ecr.sh similarity index 100% rename from mlflow/sagemaker/container/push_image_to_ecr.sh rename to mlflow/sagemaker/push_image_to_ecr.sh diff --git a/mlflow/server/__init__.py b/mlflow/server/__init__.py index dac8c93ebb365..80cb42bda216c 100644 --- a/mlflow/server/__init__.py +++ b/mlflow/server/__init__.py @@ -1,42 +1,34 @@ import os +import shlex +import sys -from flask import Flask, send_from_directory, make_response +from flask import Flask, send_from_directory from mlflow.server import handlers -from mlflow.server.handlers import get_artifact_handler +from mlflow.server.handlers import get_artifact_handler, STATIC_PREFIX_ENV_VAR, _add_static_prefix from mlflow.utils.process import exec_cmd -FILE_STORE_ENV_VAR = "MLFLOW_SERVER_FILE_STORE" -ARTIFACT_ROOT_ENV_VAR = "MLFLOW_SERVER_ARTIFACT_ROOT" -STATIC_PREFIX_ENV_VAR = "MLFLOW_STATIC_PREFIX" +# NB: These are intenrnal environment variables used for communication between +# the cli and the forked gunicorn processes. +BACKEND_STORE_URI_ENV_VAR = "_MLFLOW_SERVER_FILE_STORE" +ARTIFACT_ROOT_ENV_VAR = "_MLFLOW_SERVER_ARTIFACT_ROOT" REL_STATIC_DIR = "js/build" + app = Flask(__name__, static_folder=REL_STATIC_DIR) STATIC_DIR = os.path.join(app.root_path, REL_STATIC_DIR) + for http_path, handler, methods in handlers.get_endpoints(): app.add_url_rule(http_path, handler.__name__, handler, methods=methods) -def _add_static_prefix(route): - prefix = os.environ.get(STATIC_PREFIX_ENV_VAR) - if prefix: - return prefix + route - return route - - # Serve the "get-artifact" route. @app.route(_add_static_prefix('/get-artifact')) def serve_artifacts(): return get_artifact_handler() -# Serve the font awesome fonts for the React app -@app.route(_add_static_prefix('/webfonts/')) -def serve_webfonts(path): - return send_from_directory(STATIC_DIR, os.path.join('webfonts', path)) - - # We expect the react app to be built assuming it is hosted at /static-files, so that requests for # CSS/JS resources will be made to e.g. /static-files/main.css and we can handle them here. @app.route(_add_static_prefix('/static-files/')) @@ -50,20 +42,42 @@ def serve(): return send_from_directory(STATIC_DIR, 'index.html') -def _run_server(file_store_path, default_artifact_root, host, port, workers, static_prefix): +def _build_waitress_command(waitress_opts, host, port): + opts = shlex.split(waitress_opts) if waitress_opts else [] + return ['waitress-serve'] + \ + opts + [ + "--host=%s" % host, + "--port=%s" % port, + "--ident=mlflow", + "mlflow.server:app" + ] + + +def _build_gunicorn_command(gunicorn_opts, host, port, workers): + bind_address = "%s:%s" % (host, port) + opts = shlex.split(gunicorn_opts) if gunicorn_opts else [] + return ["gunicorn"] + opts + ["-b", bind_address, "-w", "%s" % workers, "mlflow.server:app"] + + +def _run_server(file_store_path, default_artifact_root, host, port, static_prefix=None, + workers=None, gunicorn_opts=None, waitress_opts=None): """ - Run the MLflow server, wrapping it in gunicorn + Run the MLflow server, wrapping it in gunicorn or waitress on windows :param static_prefix: If set, the index.html asset will be served from the path static_prefix. If left None, the index.html asset will be served from the root path. :return: None """ env_map = {} if file_store_path: - env_map[FILE_STORE_ENV_VAR] = file_store_path + env_map[BACKEND_STORE_URI_ENV_VAR] = file_store_path if default_artifact_root: env_map[ARTIFACT_ROOT_ENV_VAR] = default_artifact_root if static_prefix: env_map[STATIC_PREFIX_ENV_VAR] = static_prefix - bind_address = "%s:%s" % (host, port) - exec_cmd(["gunicorn", "-b", bind_address, "-w", "%s" % workers, "mlflow.server:app"], - env=env_map, stream_output=True) + + # TODO: eventually may want waitress on non-win32 + if sys.platform == 'win32': + full_command = _build_waitress_command(waitress_opts, host, port) + else: + full_command = _build_gunicorn_command(gunicorn_opts, host, port, workers or 4) + exec_cmd(full_command, env=env_map, stream_output=True) diff --git a/mlflow/server/handlers.py b/mlflow/server/handlers.py index f952166a6cba0..d34ced528049f 100644 --- a/mlflow/server/handlers.py +++ b/mlflow/server/handlers.py @@ -13,26 +13,57 @@ from mlflow.protos import databricks_pb2 from mlflow.protos.service_pb2 import CreateExperiment, MlflowService, GetExperiment, \ GetRun, SearchRuns, ListArtifacts, GetMetricHistory, CreateRun, \ - UpdateRun, LogMetric, LogParam, SetTag, ListExperiments, GetMetric, GetParam, \ - DeleteExperiment, RestoreExperiment, RestoreRun, DeleteRun, UpdateExperiment -from mlflow.store.artifact_repo import ArtifactRepository -from mlflow.store.file_store import FileStore + UpdateRun, LogMetric, LogParam, SetTag, ListExperiments, \ + DeleteExperiment, RestoreExperiment, RestoreRun, DeleteRun, UpdateExperiment, LogBatch, \ + DeleteTag +from mlflow.store.artifact_repository_registry import get_artifact_repository +from mlflow.store.dbmodels.db_types import DATABASE_ENGINES +from mlflow.tracking.registry import TrackingStoreRegistry from mlflow.utils.proto_json_utils import message_to_json, parse_dict - +from mlflow.utils.validation import _validate_batch_log_api_req _store = None +STATIC_PREFIX_ENV_VAR = "_MLFLOW_STATIC_PREFIX" + + +def _add_static_prefix(route): + prefix = os.environ.get(STATIC_PREFIX_ENV_VAR) + if prefix: + return prefix + route + return route + + +def _get_file_store(store_uri, artifact_uri): + from mlflow.store.file_store import FileStore + return FileStore(store_uri, artifact_uri) + + +def _get_sqlalchemy_store(store_uri, artifact_uri): + from mlflow.store.sqlalchemy_store import SqlAlchemyStore + return SqlAlchemyStore(store_uri, artifact_uri) + + +_tracking_store_registry = TrackingStoreRegistry() +_tracking_store_registry.register('', _get_file_store) +_tracking_store_registry.register('file', _get_file_store) +for scheme in DATABASE_ENGINES: + _tracking_store_registry.register(scheme, _get_sqlalchemy_store) -def _get_store(): - from mlflow.server import FILE_STORE_ENV_VAR, ARTIFACT_ROOT_ENV_VAR +def _get_store(backend_store_uri=None, default_artifact_root=None): + from mlflow.server import BACKEND_STORE_URI_ENV_VAR, ARTIFACT_ROOT_ENV_VAR global _store if _store is None: - store_dir = os.environ.get(FILE_STORE_ENV_VAR, os.path.abspath("mlruns")) - artifact_root = os.environ.get(ARTIFACT_ROOT_ENV_VAR, store_dir) - _store = FileStore(store_dir, artifact_root) + store_uri = backend_store_uri or os.environ.get(BACKEND_STORE_URI_ENV_VAR, None) + artifact_root = default_artifact_root or os.environ.get(ARTIFACT_ROOT_ENV_VAR, None) + _store = _tracking_store_registry.get_store(store_uri, artifact_root) return _store +def _get_request_json(flask_request=request): + return flask_request.get_json(force=True, silent=True) + + def _get_request_message(request_message, flask_request=request): if flask_request.method == 'GET' and len(flask_request.query_string) > 0: # This is a hack to make arrays of length 1 work with the parser. @@ -45,7 +76,7 @@ def _get_request_message(request_message, flask_request=request): parse_dict(request_dict, request_message) return request_message - request_json = flask_request.get_json(force=True, silent=True) + request_json = _get_request_json(flask_request) # Older clients may post their JSON double-encoded as strings, so the get_json # above actually converts it to a string. Therefore, we check this condition @@ -69,7 +100,7 @@ def wrapper(*args, **kwargs): except MlflowException as e: response = Response(mimetype='application/json') response.set_data(e.serialize_as_json()) - response.status_code = 500 + response.status_code = e.get_http_status_code() return response return wrapper @@ -90,13 +121,16 @@ def get_handler(request_class): def get_artifact_handler(): query_string = request.query_string.decode('utf-8') request_dict = parser.parse(query_string, normalized=True) - run = _get_store().get_run(request_dict['run_uuid']) + run_id = request_dict.get('run_id') or request_dict.get('run_uuid') + run = _get_store().get_run(run_id) filename = os.path.abspath(_get_artifact_repo(run).download_artifacts(request_dict['path'])) extension = os.path.splitext(filename)[-1].replace(".", "") + # Always send artifacts as attachments to prevent the browser from displaying them on our web + # server's domain, which might enable XSS. if extension in _TEXT_EXTENSIONS: - return send_file(filename, mimetype='text/plain') + return send_file(filename, mimetype='text/plain', as_attachment=True) else: - return send_file(filename) + return send_file(filename, as_attachment=True) def _not_implemented(): @@ -121,8 +155,8 @@ def _create_experiment(): def _get_experiment(): request_message = _get_request_message(GetExperiment()) response_message = GetExperiment.Response() - response_message.experiment.MergeFrom(_get_store().get_experiment(request_message.experiment_id) - .to_proto()) + experiment = _get_store().get_experiment(request_message.experiment_id).to_proto() + response_message.experiment.MergeFrom(experiment) run_info_entities = _get_store().list_run_infos(request_message.experiment_id, run_view_type=ViewType.ACTIVE_ONLY) response_message.runs.extend([r.to_proto() for r in run_info_entities]) @@ -170,14 +204,8 @@ def _create_run(): run = _get_store().create_run( experiment_id=request_message.experiment_id, user_id=request_message.user_id, - run_name=request_message.run_name, - source_type=request_message.source_type, - source_name=request_message.source_name, - entry_point_name=request_message.entry_point_name, start_time=request_message.start_time, - source_version=request_message.source_version, - tags=tags, - parent_run_id=request_message.parent_run_id) + tags=tags) response_message = CreateRun.Response() response_message.run.MergeFrom(run.to_proto()) @@ -189,7 +217,8 @@ def _create_run(): @catch_mlflow_exception def _update_run(): request_message = _get_request_message(UpdateRun()) - updated_info = _get_store().update_run_info(request_message.run_uuid, request_message.status, + run_id = request_message.run_id or request_message.run_uuid + updated_info = _get_store().update_run_info(run_id, request_message.status, request_message.end_time) response_message = UpdateRun.Response(run_info=updated_info.to_proto()) response = Response(mimetype='application/json') @@ -220,8 +249,10 @@ def _restore_run(): @catch_mlflow_exception def _log_metric(): request_message = _get_request_message(LogMetric()) - metric = Metric(request_message.key, request_message.value, request_message.timestamp) - _get_store().log_metric(request_message.run_uuid, metric) + metric = Metric(request_message.key, request_message.value, request_message.timestamp, + request_message.step) + run_id = request_message.run_id or request_message.run_uuid + _get_store().log_metric(run_id, metric) response_message = LogMetric.Response() response = Response(mimetype='application/json') response.set_data(message_to_json(response_message)) @@ -232,7 +263,8 @@ def _log_metric(): def _log_param(): request_message = _get_request_message(LogParam()) param = Param(request_message.key, request_message.value) - _get_store().log_param(request_message.run_uuid, param) + run_id = request_message.run_id or request_message.run_uuid + _get_store().log_param(run_id, param) response_message = LogParam.Response() response = Response(mimetype='application/json') response.set_data(message_to_json(response_message)) @@ -243,18 +275,30 @@ def _log_param(): def _set_tag(): request_message = _get_request_message(SetTag()) tag = RunTag(request_message.key, request_message.value) - _get_store().set_tag(request_message.run_uuid, tag) + run_id = request_message.run_id or request_message.run_uuid + _get_store().set_tag(run_id, tag) response_message = SetTag.Response() response = Response(mimetype='application/json') response.set_data(message_to_json(response_message)) return response +@catch_mlflow_exception +def _delete_tag(): + request_message = _get_request_message(DeleteTag()) + _get_store().delete_tag(request_message.run_id, request_message.key) + response_message = DeleteTag.Response() + response = Response(mimetype='application/json') + response.set_data(message_to_json(response_message)) + return response + + @catch_mlflow_exception def _get_run(): request_message = _get_request_message(GetRun()) response_message = GetRun.Response() - response_message.run.MergeFrom(_get_store().get_run(request_message.run_uuid).to_proto()) + run_id = request_message.run_id or request_message.run_uuid + response_message.run.MergeFrom(_get_store().get_run(run_id).to_proto()) response = Response(mimetype='application/json') response.set_data(message_to_json(response_message)) return response @@ -267,10 +311,16 @@ def _search_runs(): run_view_type = ViewType.ACTIVE_ONLY if request_message.HasField('run_view_type'): run_view_type = ViewType.from_proto(request_message.run_view_type) - run_entities = _get_store().search_runs(request_message.experiment_ids, - request_message.anded_expressions, - run_view_type) + filter_string = request_message.filter + max_results = request_message.max_results + experiment_ids = request_message.experiment_ids + order_by = request_message.order_by + page_token = request_message.page_token + run_entities = _get_store().search_runs(experiment_ids, filter_string, run_view_type, + max_results, order_by, page_token) response_message.runs.extend([r.to_proto() for r in run_entities]) + if run_entities.token: + response_message.next_page_token = run_entities.token response = Response(mimetype='application/json') response.set_data(message_to_json(response_message)) return response @@ -284,7 +334,8 @@ def _list_artifacts(): path = request_message.path else: path = None - run = _get_store().get_run(request_message.run_uuid) + run_id = request_message.run_id or request_message.run_uuid + run = _get_store().get_run(run_id) artifact_entities = _get_artifact_repo(run).list_artifacts(path) response_message.files.extend([a.to_proto() for a in artifact_entities]) response_message.root_uri = _get_artifact_repo(run).artifact_uri @@ -297,7 +348,8 @@ def _list_artifacts(): def _get_metric_history(): request_message = _get_request_message(GetMetricHistory()) response_message = GetMetricHistory.Response() - metric_entites = _get_store().get_metric_history(request_message.run_uuid, + run_id = request_message.run_id or request_message.run_uuid + metric_entites = _get_store().get_metric_history(run_id, request_message.metric_key) response_message.metrics.extend([m.to_proto() for m in metric_entites]) response = Response(mimetype='application/json') @@ -305,28 +357,6 @@ def _get_metric_history(): return response -@catch_mlflow_exception -def _get_metric(): - request_message = _get_request_message(GetMetric()) - response_message = GetMetric.Response() - metric = _get_store().get_metric(request_message.run_uuid, request_message.metric_key) - response_message.metric.MergeFrom(metric.to_proto()) - response = Response(mimetype='application/json') - response.set_data(message_to_json(response_message)) - return response - - -@catch_mlflow_exception -def _get_param(): - request_message = _get_request_message(GetParam()) - response_message = GetParam.Response() - parameter = _get_store().get_param(request_message.run_uuid, request_message.param_name) - response_message.parameter.MergeFrom(parameter.to_proto()) - response = Response(mimetype='application/json') - response.set_data(message_to_json(response_message)) - return response - - @catch_mlflow_exception def _list_experiments(): request_message = _get_request_message(ListExperiments()) @@ -340,15 +370,21 @@ def _list_experiments(): @catch_mlflow_exception def _get_artifact_repo(run): - store = _get_store() - if run.info.artifact_uri: - return ArtifactRepository.from_artifact_uri(run.info.artifact_uri, store) + return get_artifact_repository(run.info.artifact_uri) + - # TODO(aaron) Remove this once everyone locally only has runs from after - # the introduction of "artifact_uri". - uri = os.path.join(store.root_directory, str(run.info.experiment_id), - run.info.run_uuid, "artifacts") - return ArtifactRepository.from_artifact_uri(uri, store) +@catch_mlflow_exception +def _log_batch(): + _validate_batch_log_api_req(_get_request_json()) + request_message = _get_request_message(LogBatch()) + metrics = [Metric.from_proto(proto_metric) for proto_metric in request_message.metrics] + params = [Param.from_proto(proto_param) for proto_param in request_message.params] + tags = [RunTag.from_proto(proto_tag) for proto_tag in request_message.tags] + _get_store().log_batch(run_id=request_message.run_id, metrics=metrics, params=params, tags=tags) + response_message = LogBatch.Response() + response = Response(mimetype='application/json') + response.set_data(message_to_json(response_message)) + return response def _get_paths(base_path): @@ -357,7 +393,7 @@ def _get_paths(base_path): We should register paths like /api/2.0/preview/mlflow/experiment and /ajax-api/2.0/preview/mlflow/experiment in the Flask router. """ - return ['/api/2.0{}'.format(base_path), '/ajax-api/2.0{}'.format(base_path)] + return ['/api/2.0{}'.format(base_path), _add_static_prefix('/ajax-api/2.0{}'.format(base_path))] def get_endpoints(): @@ -388,11 +424,11 @@ def get_endpoints(): LogParam: _log_param, LogMetric: _log_metric, SetTag: _set_tag, + DeleteTag: _delete_tag, + LogBatch: _log_batch, GetRun: _get_run, SearchRuns: _search_runs, ListArtifacts: _list_artifacts, GetMetricHistory: _get_metric_history, ListExperiments: _list_experiments, - GetParam: _get_param, - GetMetric: _get_metric, } diff --git a/mlflow/server/js/.eslintrc.js b/mlflow/server/js/.eslintrc.js index b19a8d7e65219..59b49f082bef4 100644 --- a/mlflow/server/js/.eslintrc.js +++ b/mlflow/server/js/.eslintrc.js @@ -14,6 +14,7 @@ module.exports = { 'env': { 'es6': true, 'browser': true, + 'jest': true, }, 'globals': { 'GridStackUI': true, @@ -209,7 +210,7 @@ module.exports = { 'no-param-reassign': 2, 'no-path-concat': 2, 'no-plusplus': 0, - 'no-process-env': 2, + 'no-process-env': 0, 'no-process-exit': 2, 'no-proto': 2, 'no-prototype-builtins': 0, diff --git a/mlflow/server/js/config-overrides.js b/mlflow/server/js/config-overrides.js index 031014c880df6..a9aa2335db2fd 100644 --- a/mlflow/server/js/config-overrides.js +++ b/mlflow/server/js/config-overrides.js @@ -2,6 +2,7 @@ const url = require('url'); const path = require('path'); const fs = require('fs'); const rewirePolyfills = require('react-app-rewire-polyfills'); +const rewireDefinePlugin = require('react-app-rewire-define-plugin') // copied from 'react-dev-utils/WebpackDevServerUtils' function mayProxy(pathname) { @@ -36,7 +37,19 @@ function rewriteCookies(proxyRes) { module.exports = { webpack: function(config, env) { - return rewirePolyfills(config, env); + config = rewirePolyfills(config, env); + config = rewireDefinePlugin(config, env, { + 'process.env': { + 'HIDE_HEADER': process.env.HIDE_HEADER ? JSON.stringify('true') : JSON.stringify('false'), + 'HIDE_EXPERIMENT_LIST': + process.env.HIDE_EXPERIMENT_LIST ? JSON.stringify('true') : JSON.stringify('false'), + 'SHOW_GDPR_PURGING_MESSAGES': + process.env.SHOW_GDPR_PURGING_MESSAGES ? JSON.stringify('true') : JSON.stringify('false'), + 'USE_ABSOLUTE_AJAX_URLS': + process.env.USE_ABSOLUTE_AJAX_URLS ? JSON.stringify('true') : JSON.stringify('false'), + } + }); + return config; }, devServer: function(configFunction) { diff --git a/mlflow/server/js/lint.sh b/mlflow/server/js/lint.sh index a46e3a66bfef9..bc603da998d27 100755 --- a/mlflow/server/js/lint.sh +++ b/mlflow/server/js/lint.sh @@ -1,2 +1,2 @@ #!/usr/bin/env bash -./node_modules/.bin/eslint src +./node_modules/.bin/eslint src --fix diff --git a/mlflow/server/js/package-lock.json b/mlflow/server/js/package-lock.json index cfa9e98eb44b2..372ce3ddfa36b 100644 --- a/mlflow/server/js/package-lock.json +++ b/mlflow/server/js/package-lock.json @@ -4,6 +4,43 @@ "lockfileVersion": 1, "requires": true, "dependencies": { + "3d-view": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/3d-view/-/3d-view-2.0.0.tgz", + "integrity": "sha1-gxrpQtdQjFCAHj4G+v4ejFdOF74=", + "requires": { + "matrix-camera-controller": "^2.1.1", + "orbit-camera-controller": "^4.0.0", + "turntable-camera-controller": "^3.0.0" + } + }, + "3d-view-controls": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/3d-view-controls/-/3d-view-controls-2.2.2.tgz", + "integrity": "sha512-WL0u3PN41lEx/4qvKqV6bJlweUYoW18FXMshW/qHb41AVdZxDReLoJNGYsI7x6jf9bYelEF62BJPQmO7yEnG2w==", + "requires": { + "3d-view": "^2.0.0", + "has-passive-events": "^1.0.0", + "mouse-change": "^1.1.1", + "mouse-event-offset": "^3.0.2", + "mouse-wheel": "^1.0.2", + "right-now": "^1.0.0" + } + }, + "@ant-design/icons": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@ant-design/icons/-/icons-1.2.1.tgz", + "integrity": "sha512-gQx3nH6m1xvebOWh5xibhzVK02aoqHY7JUXUS4doAidSDRWsj5iwKC8Gq9DemDZ4T+bW6xO7jJZN1UsbvcW7Uw==" + }, + "@ant-design/icons-react": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ant-design/icons-react/-/icons-react-1.1.2.tgz", + "integrity": "sha512-7Fgt9d8ABgxrhZxsFjHk/VpPcxodQJJhbJO8Lsh7u58pGN4NoxxW++92naeGTXCyqZsbDPBReP+SC0bdBtbsGQ==", + "requires": { + "ant-design-palettes": "^1.1.3", + "babel-runtime": "^6.26.0" + } + }, "@babel/code-frame": { "version": "7.0.0-rc.1", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.0.0-rc.1.tgz", @@ -179,17 +216,128 @@ } } }, + "@choojs/findup": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@choojs/findup/-/findup-0.2.1.tgz", + "integrity": "sha512-YstAqNb0MCN8PjdLCDfRsBcGVRN41f3vgLvaI0IrIcBp4AqILRSS0DeWNGkicC+f/zRIPJLc+9RURVSepwvfBw==", + "requires": { + "commander": "^2.15.1" + } + }, + "@mapbox/geojson-area": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/@mapbox/geojson-area/-/geojson-area-0.2.2.tgz", + "integrity": "sha1-GNeBSqNr8j+7zDefjiaiKSfevxA=", + "requires": { + "wgs84": "0.0.0" + } + }, + "@mapbox/gl-matrix": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/@mapbox/gl-matrix/-/gl-matrix-0.0.1.tgz", + "integrity": "sha1-5RJqq01kw2uBx6l9CuDd3eV3PSs=" + }, + "@mapbox/jsonlint-lines-primitives": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@mapbox/jsonlint-lines-primitives/-/jsonlint-lines-primitives-2.0.2.tgz", + "integrity": "sha1-zlblOfg1UrWNENZy6k1vya3HsjQ=" + }, + "@mapbox/mapbox-gl-supported": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@mapbox/mapbox-gl-supported/-/mapbox-gl-supported-1.4.0.tgz", + "integrity": "sha512-ZD0Io4XK+/vU/4zpANjOtdWfVszAgnaMPsGR6LKsWh4kLIEv9qoobTVmJPPuwuM+ZI2b3BlZ6DYw1XHVmv6YTA==" + }, + "@mapbox/point-geometry": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/@mapbox/point-geometry/-/point-geometry-0.1.0.tgz", + "integrity": "sha1-ioP5M1x4YO/6Lu7KJUMyqgru2PI=" + }, + "@mapbox/shelf-pack": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@mapbox/shelf-pack/-/shelf-pack-3.2.0.tgz", + "integrity": "sha512-dyQxe6ukILV6qaEvxoKCIwhblgRjYp1ZGlClo4xvfbmxzFO5LYu7Tnrg2AZrRgN7VsSragsGcNjzUe9kCdKHYQ==" + }, + "@mapbox/tiny-sdf": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@mapbox/tiny-sdf/-/tiny-sdf-1.1.0.tgz", + "integrity": "sha512-dnhyk8X2BkDRWImgHILYAGgo+kuciNYX30CUKj/Qd5eNjh54OWM/mdOS/PWsPeN+3abtN+QDGYM4G220ynVJKA==" + }, + "@mapbox/unitbezier": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/@mapbox/unitbezier/-/unitbezier-0.0.0.tgz", + "integrity": "sha1-FWUb1VOme4WB+zmIEMmK2Go0Uk4=" + }, + "@mapbox/vector-tile": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@mapbox/vector-tile/-/vector-tile-1.3.1.tgz", + "integrity": "sha512-MCEddb8u44/xfQ3oD+Srl/tNcQoqTw3goGk2oLsrFxOTc3dUp+kAnby3PvAeeBYSMSjSPD1nd1AJA6W49WnoUw==", + "requires": { + "@mapbox/point-geometry": "~0.1.0" + } + }, + "@mapbox/whoots-js": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@mapbox/whoots-js/-/whoots-js-3.1.0.tgz", + "integrity": "sha512-Es6WcD0nO5l+2BOQS4uLfNPYQaNDfbot3X1XUoloz+x0mPDS3eeORZJl06HXjwBG1fOGwCRnzK88LMdxKRrd6Q==" + }, + "@plotly/d3-sankey": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/@plotly/d3-sankey/-/d3-sankey-0.5.1.tgz", + "integrity": "sha512-uMToNGexOSLG0hBm+uAzElfFW0Pt2utgJ//puL5nuerNnPnRTTe3Un7XFVcWqRhvXEViF00Xq/8wGoA8i8eZJA==", + "requires": { + "d3-array": "1", + "d3-collection": "1", + "d3-interpolate": "1" + } + }, "@types/node": { "version": "10.9.2", "resolved": "https://registry.npmjs.org/@types/node/-/node-10.9.2.tgz", "integrity": "sha512-pwZnkVyCGJ3LsQ0/3flQK5lCFao4esIzwUVzzk5NvL9vnkEyDhNf4fhHzUMHvyr56gNZywWTS2MR0euabMSz4A==", "dev": true }, + "@types/prop-types": { + "version": "15.7.0", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.0.tgz", + "integrity": "sha512-eItQyV43bj4rR3JPV0Skpl1SncRCdziTEK9/v8VwXmV6d/qOUO8/EuWeHBbCZcsfSHfzI5UyMJLCSXtxxznyZg==" + }, + "@types/react": { + "version": "16.8.8", + "resolved": "https://registry.npmjs.org/@types/react/-/react-16.8.8.tgz", + "integrity": "sha512-xwEvyet96u7WnB96kqY0yY7qxx/pEpU51QeACkKFtrgjjXITQn0oO1iwPEraXVgh10ZFPix7gs1R4OJXF7P5sg==", + "requires": { + "@types/prop-types": "*", + "csstype": "^2.2.0" + } + }, + "@types/react-slick": { + "version": "0.23.3", + "resolved": "https://registry.npmjs.org/@types/react-slick/-/react-slick-0.23.3.tgz", + "integrity": "sha512-B6wU5ynINOolrByhoeJ448qZPjCFPcuhyQI5sjihjG8gQJuoTH6a4YQhuDm4umvbRVielJQANhptc8hmxA85IA==", + "requires": { + "@types/react": "*" + } + }, + "a-big-triangle": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/a-big-triangle/-/a-big-triangle-1.0.3.tgz", + "integrity": "sha1-7v0wsCqPUl6LH3K7a7GwwWdRx5Q=", + "requires": { + "gl-buffer": "^2.1.1", + "gl-vao": "^1.2.0", + "weak-map": "^1.0.5" + } + }, "abab": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/abab/-/abab-1.0.4.tgz", "integrity": "sha1-X6rZwsB/YN12dw9xzwJbYqY8/U4=" }, + "abs-svg-path": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/abs-svg-path/-/abs-svg-path-0.1.1.tgz", + "integrity": "sha1-32Acjo0roQ1KdtYl4japo5wnI78=" + }, "accepts": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz", @@ -249,11 +397,35 @@ } } }, + "add-dom-event-listener": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/add-dom-event-listener/-/add-dom-event-listener-1.1.0.tgz", + "integrity": "sha512-WCxx1ixHT0GQU9hb0KI/mhgRQhnU+U3GvwY6ZvVjYq8rsihIGoaIOUbY0yMPBxLH5MDtr0kz3fisWGNcbWW7Jw==", + "requires": { + "object-assign": "4.x" + } + }, + "add-line-numbers": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/add-line-numbers/-/add-line-numbers-1.0.1.tgz", + "integrity": "sha1-SNu96kfb0jTer+rGyTzqb3C0t+M=", + "requires": { + "pad-left": "^1.0.2" + } + }, "address": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/address/-/address-1.0.3.tgz", "integrity": "sha512-z55ocwKBRLryBs394Sm3ushTtBeg6VAeuku7utSoSnsJKvKcnXFIyC6vh27n3rXyxSgkJBBCAvyOn7gSUcTYjg==" }, + "affine-hull": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/affine-hull/-/affine-hull-1.0.0.tgz", + "integrity": "sha1-dj/x040GPOt+Jy8X7k17vK+QXF0=", + "requires": { + "robust-orientation": "^1.1.3" + } + }, "ajv": { "version": "5.5.2", "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz", @@ -290,6 +462,29 @@ } } }, + "almost-equal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/almost-equal/-/almost-equal-1.1.0.tgz", + "integrity": "sha1-+FHGMROHV5lCdqou++jfowZszN0=" + }, + "alpha-complex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/alpha-complex/-/alpha-complex-1.0.0.tgz", + "integrity": "sha1-kIZYcNawVCrnPAwTHU75iWabctI=", + "requires": { + "circumradius": "^1.0.0", + "delaunay-triangulate": "^1.1.6" + } + }, + "alpha-shape": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/alpha-shape/-/alpha-shape-1.0.0.tgz", + "integrity": "sha1-yDEJkj7P2mZ9IWP+Tyb+JHJvZKk=", + "requires": { + "alpha-complex": "^1.0.0", + "simplicial-complex-boundary": "^1.0.0" + } + }, "alphanum-sort": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz", @@ -331,6 +526,130 @@ "color-convert": "^1.9.0" } }, + "ansicolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/ansicolors/-/ansicolors-0.2.1.tgz", + "integrity": "sha1-vgiVmQl7dKXJxKhKDNvNtivYeu8=" + }, + "ant-design-palettes": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/ant-design-palettes/-/ant-design-palettes-1.1.3.tgz", + "integrity": "sha512-UpkkTp8egEN21KZNvY7sTcabLlkHvLvS71EVPk4CYi77Z9AaGGCaVn7i72tbOgWDrQp2wjIg8WgMbKBdK7GtWA==", + "requires": { + "tinycolor2": "^1.4.1" + } + }, + "antd": { + "version": "3.15.2", + "resolved": "https://registry.npmjs.org/antd/-/antd-3.15.2.tgz", + "integrity": "sha512-yKN6v5j3znt/JFUJ8LFny179medv1ZlyKmQdtVvbOKuQN/VEnIo79fKVzmduacU+QMS/SJDuJ+oXD9zIuodPQQ==", + "requires": { + "@ant-design/icons": "~1.2.0", + "@ant-design/icons-react": "~1.1.2", + "@types/react-slick": "^0.23.3", + "array-tree-filter": "^2.1.0", + "babel-runtime": "6.x", + "classnames": "~2.2.6", + "copy-to-clipboard": "^3.0.8", + "create-react-class": "^15.6.3", + "create-react-context": "0.2.2", + "css-animation": "^1.5.0", + "dom-closest": "^0.2.0", + "enquire.js": "^2.1.6", + "lodash": "^4.17.11", + "moment": "^2.24.0", + "omit.js": "^1.0.0", + "prop-types": "^15.6.2", + "raf": "^3.4.0", + "rc-animate": "^2.5.4", + "rc-calendar": "~9.10.3", + "rc-cascader": "~0.17.0", + "rc-checkbox": "~2.1.5", + "rc-collapse": "~1.11.1", + "rc-dialog": "~7.3.0", + "rc-drawer": "~1.7.6", + "rc-dropdown": "~2.4.1", + "rc-editor-mention": "^1.1.7", + "rc-form": "^2.4.0", + "rc-input-number": "~4.4.0", + "rc-menu": "~7.4.12", + "rc-notification": "~3.3.0", + "rc-pagination": "~1.17.7", + "rc-progress": "~2.3.0", + "rc-rate": "~2.5.0", + "rc-select": "~9.0.0", + "rc-slider": "~8.6.5", + "rc-steps": "~3.3.0", + "rc-switch": "~1.9.0", + "rc-table": "~6.4.0", + "rc-tabs": "~9.6.0", + "rc-time-picker": "~3.6.1", + "rc-tooltip": "~3.7.3", + "rc-tree": "~1.14.6", + "rc-tree-select": "~2.6.0", + "rc-trigger": "^2.6.2", + "rc-upload": "~2.6.0", + "rc-util": "^4.5.1", + "react-lazy-load": "^3.0.13", + "react-lifecycles-compat": "^3.0.4", + "react-slick": "~0.23.2", + "resize-observer-polyfill": "^1.5.0", + "shallowequal": "^1.1.0", + "warning": "~4.0.2" + }, + "dependencies": { + "create-react-context": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/create-react-context/-/create-react-context-0.2.2.tgz", + "integrity": "sha512-KkpaLARMhsTsgp0d2NA/R94F/eDLbhXERdIq3LvX2biCAXcDvHYoOqHfWCHf1+OLj+HKBotLG3KqaOOf+C1C+A==", + "requires": { + "fbjs": "^0.8.0", + "gud": "^1.0.0" + } + }, + "lodash": { + "version": "4.17.11", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz", + "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==" + }, + "loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "requires": { + "js-tokens": "^3.0.0 || ^4.0.0" + } + }, + "prop-types": { + "version": "15.7.2", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz", + "integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==", + "requires": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.8.1" + } + }, + "react-is": { + "version": "16.8.5", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.8.5.tgz", + "integrity": "sha512-sudt2uq5P/2TznPV4Wtdi+Lnq3yaYW8LfvPKLM9BKD8jJNBkxMVyB0C9/GmVhLw7Jbdmndk/73n7XQGeN9A3QQ==" + }, + "shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } + } + }, "anymatch": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz", @@ -449,6 +768,11 @@ "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=" }, + "array-bounds": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/array-bounds/-/array-bounds-1.0.1.tgz", + "integrity": "sha512-8wdW3ZGk6UjMPJx/glyEt0sLzzwAE1bhToPsO1W2pbpR2gULyxe3BjSiuJFheP50T/GgODVPz2fuMUmIywt8cQ==" + }, "array-equal": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/array-equal/-/array-equal-1.0.0.tgz", @@ -488,11 +812,34 @@ "resolved": "https://registry.npmjs.org/array-map/-/array-map-0.0.0.tgz", "integrity": "sha1-iKK6tz0c97zVwbEYoAP2b2ZfpmI=" }, + "array-normalize": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/array-normalize/-/array-normalize-1.1.3.tgz", + "integrity": "sha1-c/uDf0gW7BkVHTxejYU6RZDOAb0=", + "requires": { + "array-bounds": "^1.0.0" + } + }, + "array-range": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/array-range/-/array-range-1.0.1.tgz", + "integrity": "sha1-9W5GWRhDYRxqVvd+8C7afFAIm/w=" + }, + "array-rearrange": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/array-rearrange/-/array-rearrange-2.2.2.tgz", + "integrity": "sha512-UfobP5N12Qm4Qu4fwLDIi2v6+wZsSf6snYSxAMeKhrh37YGnNWZPRmVEKc/2wfms53TLQnzfpG8wCx2Y/6NG1w==" + }, "array-reduce": { "version": "0.0.0", "resolved": "https://registry.npmjs.org/array-reduce/-/array-reduce-0.0.0.tgz", "integrity": "sha1-FziZ0//Rx9k4PkR5Ul2+J4yrXys=" }, + "array-tree-filter": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-tree-filter/-/array-tree-filter-2.1.0.tgz", + "integrity": "sha512-4ROwICNlNw/Hqa9v+rk5h22KjmzB1JGTMVKP2AKJBOCgb0yL0ASf0+YvCcLNNwquOHNX48jkeZIJ3a+oOQqKcw==" + }, "array-union": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz", @@ -583,6 +930,14 @@ "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.1.tgz", "integrity": "sha1-GdOGodntxufByF04iu28xW0zYC0=" }, + "async-validator": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/async-validator/-/async-validator-1.8.5.tgz", + "integrity": "sha512-tXBM+1m056MAX0E8TL2iCjg8WvSyXu0Zc8LNtYqrVeyoL3+esHRZ4SieE9fKQyyU09uONjnMEjrNBMqT0mbvmA==", + "requires": { + "babel-runtime": "6.x" + } + }, "asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", @@ -593,6 +948,11 @@ "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.1.tgz", "integrity": "sha1-ri1acpR38onWDdf5amMUoi3Wwio=" }, + "atob-lite": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/atob-lite/-/atob-lite-1.0.0.tgz", + "integrity": "sha1-uI3KYAaSK5YglPdVaCa6sxxKKWs=" + }, "autoprefixer": { "version": "7.1.6", "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-7.1.6.tgz", @@ -1472,6 +1832,14 @@ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" }, + "barycentric": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/barycentric/-/barycentric-1.0.1.tgz", + "integrity": "sha1-8VYruJGyb0/sRjqC7to2V4AOxog=", + "requires": { + "robust-linear-solve": "^1.0.0" + } + }, "base": { "version": "0.11.2", "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", @@ -1541,6 +1909,16 @@ "tweetnacl": "^0.14.3" } }, + "big-rat": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/big-rat/-/big-rat-1.0.4.tgz", + "integrity": "sha1-do0JO7V5MN0Y7Vdcf8on3FORreo=", + "requires": { + "bit-twiddle": "^1.0.2", + "bn.js": "^4.11.6", + "double-bits": "^1.1.1" + } + }, "big.js": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz", @@ -1556,6 +1934,33 @@ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.11.0.tgz", "integrity": "sha1-RqoXUftqL5PuXmibsQh9SxTGwgU=" }, + "binary-search-bounds": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/binary-search-bounds/-/binary-search-bounds-1.0.0.tgz", + "integrity": "sha1-MjyjF+PypA9CRMclX1OEpbIHu2k=" + }, + "bit-twiddle": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bit-twiddle/-/bit-twiddle-1.0.2.tgz", + "integrity": "sha1-DGwfq+KyPRcXPZpht7cJPrnhdp4=" + }, + "bitmap-sdf": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bitmap-sdf/-/bitmap-sdf-1.0.3.tgz", + "integrity": "sha512-ojYySSvWTx21cbgntR942zgEgqj38wHctN64vr4vYRFf3GKVmI23YlA94meWGkFslidwLwGCsMy2laJ3g/94Sg==", + "requires": { + "clamp": "^1.0.1" + } + }, + "bl": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.2.tgz", + "integrity": "sha512-e8tQYnZodmebYDWGH7KMRvtzKXaJHx3BbilrgZCfvyLUYdKpK1t5PSPmpkny/SgiTSCnjfLW7v5rlONXVFkQEA==", + "requires": { + "readable-stream": "^2.3.5", + "safe-buffer": "^5.1.1" + } + }, "bluebird": { "version": "3.5.1", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.5.1.tgz", @@ -1621,11 +2026,28 @@ "hoek": "4.x.x" } }, + "boundary-cells": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/boundary-cells/-/boundary-cells-2.0.1.tgz", + "integrity": "sha1-6QWo0UGc9Hyza+Pb9SXbXiTeAEI=", + "requires": { + "tape": "^4.0.0" + } + }, "bowser": { "version": "1.9.3", "resolved": "https://registry.npmjs.org/bowser/-/bowser-1.9.3.tgz", "integrity": "sha512-/gp96UlcFw5DbV2KQPCqTqi0Mb9gZRyDAHiDsGEH+4B/KOQjeoE5lM1PxlVX8DQDvfEfitmC1rW2Oy8fk/XBDg==" }, + "box-intersect": { + "version": "1.0.1", + "resolved": "http://registry.npmjs.org/box-intersect/-/box-intersect-1.0.1.tgz", + "integrity": "sha1-tyilnj8aPHPCJJM8JmC5J6oTeQI=", + "requires": { + "bit-twiddle": "^1.0.2", + "typedarray-pool": "^1.1.0" + } + }, "boxen": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/boxen/-/boxen-1.3.0.tgz", @@ -1693,6 +2115,77 @@ } } }, + "brfs": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/brfs/-/brfs-1.6.1.tgz", + "integrity": "sha512-OfZpABRQQf+Xsmju8XE9bDjs+uU4vLREGolP7bDgcpsI17QREyZ4Bl+2KLxxx1kCgA0fAIhKQBaBYh+PEcCqYQ==", + "requires": { + "quote-stream": "^1.0.1", + "resolve": "^1.1.5", + "static-module": "^2.2.0", + "through2": "^2.0.0" + }, + "dependencies": { + "duplexer2": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", + "integrity": "sha1-ixLauHjA1p4+eJEFFmKjL8a93ME=", + "requires": { + "readable-stream": "^2.0.2" + } + }, + "minimist": { + "version": "1.2.0", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + }, + "object-inspect": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.4.1.tgz", + "integrity": "sha512-wqdhLpfCUbEsoEwl3FXwGyv8ief1k/1aUdIPCqVnupM6e8l63BEJdiF/0swtn04/8p05tG/T0FrpTlfwvljOdw==" + }, + "quote-stream": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/quote-stream/-/quote-stream-1.0.2.tgz", + "integrity": "sha1-hJY/jJwmuULhU/7rU6rnRlK34LI=", + "requires": { + "buffer-equal": "0.0.1", + "minimist": "^1.1.3", + "through2": "^2.0.0" + } + }, + "static-module": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/static-module/-/static-module-2.2.5.tgz", + "integrity": "sha512-D8vv82E/Kpmz3TXHKG8PPsCPg+RAX6cbCOyvjM6x04qZtQ47EtJFVwRsdov3n5d6/6ynrOY9XB4JkaZwB2xoRQ==", + "requires": { + "concat-stream": "~1.6.0", + "convert-source-map": "^1.5.1", + "duplexer2": "~0.1.4", + "escodegen": "~1.9.0", + "falafel": "^2.1.0", + "has": "^1.0.1", + "magic-string": "^0.22.4", + "merge-source-map": "1.0.4", + "object-inspect": "~1.4.0", + "quote-stream": "~1.0.2", + "readable-stream": "~2.3.3", + "shallow-copy": "~0.0.1", + "static-eval": "^2.0.0", + "through2": "~2.0.3" + } + }, + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + } + } + }, "brorand": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", @@ -1794,6 +2287,93 @@ "node-int64": "^0.4.0" } }, + "buble": { + "version": "0.19.6", + "resolved": "https://registry.npmjs.org/buble/-/buble-0.19.6.tgz", + "integrity": "sha512-9kViM6nJA1Q548Jrd06x0geh+BG2ru2+RMDkIHHgJY/8AcyCs34lTHwra9BX7YdPrZXd5aarkpr/SY8bmPgPdg==", + "requires": { + "chalk": "^2.4.1", + "magic-string": "^0.25.1", + "minimist": "^1.2.0", + "os-homedir": "^1.0.1", + "regexpu-core": "^4.2.0", + "vlq": "^1.0.0" + }, + "dependencies": { + "chalk": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", + "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "jsesc": { + "version": "0.5.0", + "resolved": "http://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=" + }, + "magic-string": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.1.tgz", + "integrity": "sha512-sCuTz6pYom8Rlt4ISPFn6wuFodbKMIHUMv4Qko9P17dpxb7s52KJTmRuZZqHdGmLCK9AOcDare039nRIcfdkEg==", + "requires": { + "sourcemap-codec": "^1.4.1" + } + }, + "minimist": { + "version": "1.2.0", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + }, + "regenerate": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz", + "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==" + }, + "regexpu-core": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.2.0.tgz", + "integrity": "sha512-Z835VSnJJ46CNBttalHD/dB+Sj2ezmY6Xp38npwU87peK6mqOzOpV8eYktdkLTEkzzD+JsTcxd84ozd8I14+rw==", + "requires": { + "regenerate": "^1.4.0", + "regenerate-unicode-properties": "^7.0.0", + "regjsgen": "^0.4.0", + "regjsparser": "^0.3.0", + "unicode-match-property-ecmascript": "^1.0.4", + "unicode-match-property-value-ecmascript": "^1.0.2" + } + }, + "regjsgen": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.4.0.tgz", + "integrity": "sha512-X51Lte1gCYUdlwhF28+2YMO0U6WeN0GLpgpA7LK7mbdDnkQYiwvEpmpe0F/cv5L14EbxgrdayAG3JETBv0dbXA==" + }, + "regjsparser": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.3.0.tgz", + "integrity": "sha512-zza72oZBBHzt64G7DxdqrOo/30bhHkwMUoT0WqfGu98XLd7N+1tsy5MJ96Bk4MD0y74n629RhmrGW6XlnLLwCA==", + "requires": { + "jsesc": "~0.5.0" + } + }, + "vlq": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/vlq/-/vlq-1.0.0.tgz", + "integrity": "sha512-o3WmXySo+oI5thgqr7Qy8uBkT/v9Zr+sRyrh1lr8aWPUkgDWdWt4Nae2WKBrLsocgE8BuWWD0jLc+VW8LeU+2g==" + } + } + }, + "bubleify": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/bubleify/-/bubleify-1.2.0.tgz", + "integrity": "sha512-SJnUsR+f8WeDw0K2l1S+VuYI33Cu5Gfghe5jTow/fpJueNtnwyoECyfCGsDuFoQt4QGhjpV3LYPpN0hxy90LgA==", + "requires": { + "buble": "^0.19.3" + } + }, "buffer": { "version": "4.9.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.1.tgz", @@ -1804,6 +2384,11 @@ "isarray": "^1.0.0" } }, + "buffer-equal": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal/-/buffer-equal-0.0.1.tgz", + "integrity": "sha1-kbx0sR6kBbyRa8aqkI+q+ltKrEs=" + }, "buffer-from": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", @@ -1925,11 +2510,28 @@ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000839.tgz", "integrity": "sha512-gJZIfmkuy84agOeAZc7WJOexZhisZaBSFk96gkGM6TkH7+1mBfr/MSPnXC8lO0g7guh/ucbswYjruvDbzc6i0g==" }, + "canvas-fit": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/canvas-fit/-/canvas-fit-1.5.0.tgz", + "integrity": "sha1-rhO+Zq3kL1vg5IfjRfzjCl5bXl8=", + "requires": { + "element-size": "^1.1.1" + } + }, "capture-stack-trace": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/capture-stack-trace/-/capture-stack-trace-1.0.0.tgz", "integrity": "sha1-Sm+gc5nCa7pH8LJJa00PtAjFVQ0=" }, + "cardinal": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/cardinal/-/cardinal-0.4.4.tgz", + "integrity": "sha1-ylu2iltRG5D+k7ms6km97lwyv+I=", + "requires": { + "ansicolors": "~0.2.1", + "redeyed": "~0.4.0" + } + }, "case-sensitive-paths-webpack-plugin": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/case-sensitive-paths-webpack-plugin/-/case-sensitive-paths-webpack-plugin-2.1.1.tgz", @@ -1940,14 +2542,36 @@ "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" }, - "center-align": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/center-align/-/center-align-0.1.3.tgz", - "integrity": "sha1-qg0yYptu6XIgBBHL1EYckHvCt60=", + "cdt2d": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/cdt2d/-/cdt2d-1.0.0.tgz", + "integrity": "sha1-TyEkNLzWe9s9aLj+9KzcLFRBUUE=", "requires": { - "align-text": "^0.1.3", - "lazy-cache": "^1.0.3" - } + "binary-search-bounds": "^2.0.3", + "robust-in-sphere": "^1.1.3", + "robust-orientation": "^1.1.3" + }, + "dependencies": { + "binary-search-bounds": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/binary-search-bounds/-/binary-search-bounds-2.0.4.tgz", + "integrity": "sha512-2hg5kgdKql5ClF2ErBcSx0U5bnl5hgS4v7wMnLFodyR47yMtj2w+UAZB+0CiqyHct2q543i7Bi4/aMIegorCCg==" + } + } + }, + "cell-orientation": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cell-orientation/-/cell-orientation-1.0.1.tgz", + "integrity": "sha1-tQStlqZq0obZ7dmFoiU9A7gNKFA=" + }, + "center-align": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/center-align/-/center-align-0.1.3.tgz", + "integrity": "sha1-qg0yYptu6XIgBBHL1EYckHvCt60=", + "requires": { + "align-text": "^0.1.3", + "lazy-cache": "^1.0.3" + } }, "chalk": { "version": "1.1.3", @@ -2107,6 +2731,28 @@ "resolved": "https://registry.npmjs.org/circular-json/-/circular-json-0.3.3.tgz", "integrity": "sha512-UZK3NBx2Mca+b5LsG7bY183pHWt5Y1xts4P3Pz7ENTwGVnJOUWbRb3ocjvX7hx9tq/yTAdclXm9sZ38gNuem4A==" }, + "circumcenter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/circumcenter/-/circumcenter-1.0.0.tgz", + "integrity": "sha1-INeqE7F/usUvUtpPVMasi5Bu5Sk=", + "requires": { + "dup": "^1.0.0", + "robust-linear-solve": "^1.0.0" + } + }, + "circumradius": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/circumradius/-/circumradius-1.0.0.tgz", + "integrity": "sha1-cGxEfj5VzR7T0RvRM+N8JSzDBbU=", + "requires": { + "circumcenter": "^1.0.0" + } + }, + "clamp": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/clamp/-/clamp-1.0.1.tgz", + "integrity": "sha1-ZqDmQBGBbjcZaCj9yMjBRzEshjQ=" + }, "clap": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/clap/-/clap-1.2.3.tgz", @@ -2156,6 +2802,20 @@ } } }, + "clean-pslg": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/clean-pslg/-/clean-pslg-1.1.2.tgz", + "integrity": "sha1-vTXHRgt+irWp92Gl7VF5aqPIbBE=", + "requires": { + "big-rat": "^1.0.3", + "box-intersect": "^1.0.1", + "nextafter": "^1.0.0", + "rat-vec": "^1.1.1", + "robust-segment-intersect": "^1.0.1", + "union-find": "^1.0.2", + "uniq": "^1.0.1" + } + }, "cli-boxes": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-1.0.0.tgz", @@ -2233,6 +2893,14 @@ "color-string": "^0.3.0" } }, + "color-alpha": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/color-alpha/-/color-alpha-1.0.3.tgz", + "integrity": "sha512-ap5UCPpnpsSQu09ccl/5cNQDJlSFvkuXHMBY1+1vu6iKj6H9zw7Sz852snsETFsrYlPUnvTByCFAnYVynKJb9A==", + "requires": { + "color-parse": "^1.2.0" + } + }, "color-convert": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.1.tgz", @@ -2241,11 +2909,58 @@ "color-name": "^1.1.1" } }, + "color-id": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/color-id/-/color-id-1.1.0.tgz", + "integrity": "sha512-2iRtAn6dC/6/G7bBIo0uupVrIne1NsQJvJxZOBCzQOfk7jRq97feaDZ3RdzuHakRXXnHGNwglto3pqtRx1sX0g==", + "requires": { + "clamp": "^1.0.1" + } + }, "color-name": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, + "color-normalize": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/color-normalize/-/color-normalize-1.3.0.tgz", + "integrity": "sha512-BfOC/x9Q7bmrR1t/Mflfr9c4ZEbr3B+Sz3pWNG6xkcB8mFtF8z32MStJK0NSBmFVhHtFlfXQKOYC/ADbqmxHzg==", + "requires": { + "clamp": "^1.0.1", + "color-rgba": "^2.1.0", + "dtype": "^2.0.0" + } + }, + "color-parse": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/color-parse/-/color-parse-1.3.7.tgz", + "integrity": "sha512-8G6rPfyTZhWYKU7D2hwywTjA4YlqX/Z7ClqTEzh5ENc5QkLOff0u8EuyNZR6xScEBhWpAyiDrrVGNUE/Btg2LA==", + "requires": { + "color-name": "^1.0.0", + "defined": "^1.0.0", + "is-plain-obj": "^1.1.0" + } + }, + "color-rgba": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/color-rgba/-/color-rgba-2.1.0.tgz", + "integrity": "sha512-yAmMouVOLRAtYJwP52qymiscIMpw2g7VO82pkW+a88BpW1AZ+O6JDxAAojLljGO0pQkkvZLLN9oQNTEgT+RFiw==", + "requires": { + "clamp": "^1.0.1", + "color-parse": "^1.3.7", + "color-space": "^1.14.6" + } + }, + "color-space": { + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/color-space/-/color-space-1.16.0.tgz", + "integrity": "sha512-A6WMiFzunQ8KEPFmj02OnnoUnqhmSaHaZ/0LVFcPTdlvm8+3aMJ5x1HRHy3bDHPkovkf4sS0f4wsVvwk71fKkg==", + "requires": { + "hsluv": "^0.0.3", + "mumath": "^3.3.4" + } + }, "color-string": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/color-string/-/color-string-0.3.0.tgz", @@ -2254,6 +2969,14 @@ "color-name": "^1.0.0" } }, + "colormap": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/colormap/-/colormap-2.3.0.tgz", + "integrity": "sha512-Mkk6mQUMbCleXEeStFm2xLwv5zbRakZMUFB1T1+iNEv58VKBByfPwYIjMQDwSRmXNM1gvo5y3WTYAhmdMn/rbg==", + "requires": { + "lerp": "^1.0.3" + } + }, "colormin": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/colormin/-/colormin-1.1.2.tgz", @@ -2287,16 +3010,55 @@ "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=" }, + "compare-angle": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/compare-angle/-/compare-angle-1.0.1.tgz", + "integrity": "sha1-pOtjQW6jx0f8a9bItjZotN5PoSk=", + "requires": { + "robust-orientation": "^1.0.2", + "robust-product": "^1.0.0", + "robust-sum": "^1.0.0", + "signum": "^0.0.0", + "two-sum": "^1.0.0" + } + }, + "compare-cell": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/compare-cell/-/compare-cell-1.0.0.tgz", + "integrity": "sha1-qetwj24OQa73qlZrEw8ZaNyeGqo=" + }, + "compare-oriented-cell": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/compare-oriented-cell/-/compare-oriented-cell-1.0.1.tgz", + "integrity": "sha1-ahSf7vnfxPj8YjWOUd1C7/u9w54=", + "requires": { + "cell-orientation": "^1.0.1", + "compare-cell": "^1.0.0" + } + }, "compare-versions": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/compare-versions/-/compare-versions-3.1.0.tgz", "integrity": "sha512-4hAxDSBypT/yp2ySFD346So6Ragw5xmBn/e/agIGl3bZr6DLUqnoRZPusxKrXdYRZpgexO9daejmIenlq/wrIQ==" }, + "component-classes": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/component-classes/-/component-classes-1.2.6.tgz", + "integrity": "sha1-xkI5TDYYpNiwuJGe/Mu9kw5c1pE=", + "requires": { + "component-indexof": "0.0.3" + } + }, "component-emitter": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=" }, + "component-indexof": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/component-indexof/-/component-indexof-0.0.3.tgz", + "integrity": "sha1-EdCRMSI5648yyPJa6csAL/6NPCQ=" + }, "compressible": { "version": "2.0.13", "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.13.tgz", @@ -2398,6 +3160,16 @@ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.5.1.tgz", "integrity": "sha1-uCeAl7m8IpNl3lxiz1/K7YtVmeU=" }, + "convex-hull": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/convex-hull/-/convex-hull-1.0.3.tgz", + "integrity": "sha1-IKOqbOh/St6i/30XlxyfwcZ+H/8=", + "requires": { + "affine-hull": "^1.0.0", + "incremental-convex-hull": "^1.0.1", + "monotone-convex-hull-2d": "^1.0.1" + } + }, "cookie": { "version": "0.3.1", "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz", @@ -2413,6 +3185,14 @@ "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=" }, + "copy-to-clipboard": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/copy-to-clipboard/-/copy-to-clipboard-3.0.8.tgz", + "integrity": "sha512-c3GdeY8qxCHGezVb1EFQfHYK/8NZRemgcTIzPq7PuxjHAf/raKibn2QdhHPb/y6q74PMgH6yizaDZlRmw6QyKw==", + "requires": { + "toggle-selection": "^1.0.3" + } + }, "core-js": { "version": "1.2.7", "resolved": "https://registry.npmjs.org/core-js/-/core-js-1.2.7.tgz", @@ -2444,6 +3224,11 @@ } } }, + "country-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/country-regex/-/country-regex-1.1.0.tgz", + "integrity": "sha1-UcMz3N8Sknt+XuucEKyBEqYSCJY=" + }, "create-ecdh": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz", @@ -2486,6 +3271,16 @@ "sha.js": "^2.4.8" } }, + "create-react-class": { + "version": "15.6.3", + "resolved": "https://registry.npmjs.org/create-react-class/-/create-react-class-15.6.3.tgz", + "integrity": "sha512-M+/3Q6E6DLO6Yx3OwrWjwHBnvfXXYA7W+dFjt/ZDBemHO1DDZhsalX/NUtnTYclN6GfnBDRh4qRHjcDHmlJBJg==", + "requires": { + "fbjs": "^0.8.9", + "loose-envify": "^1.3.1", + "object-assign": "^4.1.1" + } + }, "create-react-context": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/create-react-context/-/create-react-context-0.2.3.tgz", @@ -2546,11 +3341,61 @@ "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-1.0.0.tgz", "integrity": "sha1-ojD2T1aDEOFJgAmUB5DsmVRbyn4=" }, + "css-animation": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/css-animation/-/css-animation-1.5.0.tgz", + "integrity": "sha512-hWYoWiOZ7Vr20etzLh3kpWgtC454tW5vn4I6rLANDgpzNSkO7UfOqyCEeaoBSG9CYWQpRkFWTWbWW8o3uZrNLw==", + "requires": { + "babel-runtime": "6.x", + "component-classes": "^1.2.5" + } + }, "css-color-names": { "version": "0.0.4", "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz", "integrity": "sha1-gIrcLnnPhHOAabZGyyDsJ762KeA=" }, + "css-font": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/css-font/-/css-font-1.2.0.tgz", + "integrity": "sha512-V4U4Wps4dPDACJ4WpgofJ2RT5Yqwe1lEH6wlOOaIxMi0gTjdIijsc5FmxQlZ7ZZyKQkkutqqvULOp07l9c7ssA==", + "requires": { + "css-font-size-keywords": "^1.0.0", + "css-font-stretch-keywords": "^1.0.1", + "css-font-style-keywords": "^1.0.1", + "css-font-weight-keywords": "^1.0.0", + "css-global-keywords": "^1.0.1", + "css-system-font-keywords": "^1.0.0", + "pick-by-alias": "^1.2.0", + "string-split-by": "^1.0.0", + "unquote": "^1.1.0" + } + }, + "css-font-size-keywords": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/css-font-size-keywords/-/css-font-size-keywords-1.0.0.tgz", + "integrity": "sha1-hUh1rOmspqjS7g00WkSq6btttss=" + }, + "css-font-stretch-keywords": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/css-font-stretch-keywords/-/css-font-stretch-keywords-1.0.1.tgz", + "integrity": "sha1-UM7puboDH7XJUtRyMTnx4Qe1SxA=" + }, + "css-font-style-keywords": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/css-font-style-keywords/-/css-font-style-keywords-1.0.1.tgz", + "integrity": "sha1-XDUygT9jtKHelU0TzqhqtDM0CeQ=" + }, + "css-font-weight-keywords": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/css-font-weight-keywords/-/css-font-weight-keywords-1.0.0.tgz", + "integrity": "sha1-m8BGcayFvHJLV07106yWsNYE/Zc=" + }, + "css-global-keywords": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/css-global-keywords/-/css-global-keywords-1.0.1.tgz", + "integrity": "sha1-cqmupyeW0Bmx0qMlLeTlqqN+Smk=" + }, "css-loader": { "version": "0.28.7", "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-0.28.7.tgz", @@ -2636,11 +3481,21 @@ } } }, + "css-system-font-keywords": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/css-system-font-keywords/-/css-system-font-keywords-1.0.0.tgz", + "integrity": "sha1-hcbwhquk6zLFcaMIav/ENLhII+0=" + }, "css-what": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/css-what/-/css-what-2.1.0.tgz", "integrity": "sha1-lGfQMsOM+u+58teVASUwYvh/ob0=" }, + "csscolorparser": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/csscolorparser/-/csscolorparser-1.0.3.tgz", + "integrity": "sha1-s085HupNqPPpgjHizNjfnAQfFxs=" + }, "cssesc": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-0.1.0.tgz", @@ -2767,6 +3622,16 @@ "cssom": "0.3.x" } }, + "csstype": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-2.6.3.tgz", + "integrity": "sha512-rINUZXOkcBmoHWEyu7JdHu5JMzkGRoMX4ov9830WNgxf5UYxcBUO0QTKAqeJ5EZfSdlrcJYkC8WwfVW7JYi4yg==" + }, + "cubic-hermite": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/cubic-hermite/-/cubic-hermite-1.0.0.tgz", + "integrity": "sha1-hOOy8nKzFFToOTuZu2rtRRaMFOU=" + }, "currently-unhandled": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", @@ -2775,6 +3640,69 @@ "array-find-index": "^1.0.1" } }, + "cwise": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/cwise/-/cwise-1.0.10.tgz", + "integrity": "sha1-JO7mBy69/WuMb12tsXCQtkmxK+8=", + "requires": { + "cwise-compiler": "^1.1.1", + "cwise-parser": "^1.0.0", + "static-module": "^1.0.0", + "uglify-js": "^2.6.0" + }, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=" + }, + "uglify-js": { + "version": "2.8.29", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-2.8.29.tgz", + "integrity": "sha1-KcVzMUgFe7Th913zW3qcty5qWd0=", + "requires": { + "source-map": "~0.5.1", + "uglify-to-browserify": "~1.0.0", + "yargs": "~3.10.0" + } + }, + "yargs": { + "version": "3.10.0", + "resolved": "http://registry.npmjs.org/yargs/-/yargs-3.10.0.tgz", + "integrity": "sha1-9+572FfdfB0tOMDnTvvWgdFDH9E=", + "requires": { + "camelcase": "^1.0.2", + "cliui": "^2.1.0", + "decamelize": "^1.0.0", + "window-size": "0.1.0" + } + } + } + }, + "cwise-compiler": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/cwise-compiler/-/cwise-compiler-1.1.3.tgz", + "integrity": "sha1-9NZnQQ6FDToxOn0tt7HlBbsDTMU=", + "requires": { + "uniq": "^1.0.0" + } + }, + "cwise-parser": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/cwise-parser/-/cwise-parser-1.0.3.tgz", + "integrity": "sha1-jkk8F9VPl8sDCp6YVLyGyd+zVP4=", + "requires": { + "esprima": "^1.0.3", + "uniq": "^1.0.0" + }, + "dependencies": { + "esprima": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.2.5.tgz", + "integrity": "sha1-CZNQL+r2aBODJXVvMPmlH+7sEek=" + } + } + }, "d": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/d/-/d-1.0.0.tgz", @@ -2783,6 +3711,11 @@ "es5-ext": "^0.10.9" } }, + "d3": { + "version": "3.5.17", + "resolved": "https://registry.npmjs.org/d3/-/d3-3.5.17.tgz", + "integrity": "sha1-vEZ0gAQ3iyGjYMn8fPUjF5B2L7g=" + }, "d3-array": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-1.2.1.tgz", @@ -2798,6 +3731,22 @@ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-1.2.0.tgz", "integrity": "sha512-dmL9Zr/v39aSSMnLOTd58in2RbregCg4UtGyUArvEKTTN6S3HKEy+ziBWVYo9PTzRyVW+pUBHUtRKz0HYX+SQg==" }, + "d3-dispatch": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-1.0.5.tgz", + "integrity": "sha512-vwKx+lAqB1UuCeklr6Jh1bvC4SZgbSqbkGBLClItFBIYH4vqDJCA7qfoy14lXmJdnBOdxndAMxjCbImJYW7e6g==" + }, + "d3-force": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-1.1.2.tgz", + "integrity": "sha512-p1vcHAUF1qH7yR+e8ip7Bs61AHjLeKkIn8Z2gzwU2lwEf2wkSpWdjXG0axudTHsVFnYGlMkFaEsVy2l8tAg1Gw==", + "requires": { + "d3-collection": "1", + "d3-dispatch": "1", + "d3-quadtree": "1", + "d3-timer": "1" + } + }, "d3-format": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-1.3.0.tgz", @@ -2816,6 +3765,11 @@ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.5.tgz", "integrity": "sha1-JB6xhJvZ6egCHA0KeZ+KDo5EF2Q=" }, + "d3-quadtree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-1.0.5.tgz", + "integrity": "sha512-U2tjwDFbZ75JRAg8A+cqMvqPg1G3BE7UTJn3h8DHjY/pnsAfWdbJKgyfcy7zKjqGtLAmI0q8aDSeG1TVIKRaHQ==" + }, "d3-scale": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-1.0.6.tgz", @@ -2851,6 +3805,11 @@ "d3-time": "1" } }, + "d3-timer": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-1.0.9.tgz", + "integrity": "sha512-rT34J5HnQUHhcLvhSB9GjCkN0Ddd5Y8nCwDBG2u6wQEeYxT/Lf51fTFFkldeib/sE/J0clIe0pnCfs6g/lRbyg==" + }, "damerau-levenshtein": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.4.tgz", @@ -2985,6 +3944,15 @@ "rimraf": "^2.2.8" } }, + "delaunay-triangulate": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/delaunay-triangulate/-/delaunay-triangulate-1.1.6.tgz", + "integrity": "sha1-W7yiGweBmNS8PHV5ajXLuYwllUw=", + "requires": { + "incremental-convex-hull": "^1.0.1", + "uniq": "^1.0.1" + } + }, "delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -3017,6 +3985,11 @@ "repeating": "^2.0.0" } }, + "detect-kerning": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-kerning/-/detect-kerning-2.1.2.tgz", + "integrity": "sha512-I3JIbrnKPAntNLl1I6TpSQQdQ4AutYzv/sKMFKbepawV/hlH0GmYKhUoOEMd4xqaUHT+Bm0f4127lh5qs1m1tw==" + }, "detect-node": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.0.3.tgz", @@ -3082,6 +4055,19 @@ "esutils": "^2.0.2" } }, + "dom-align": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/dom-align/-/dom-align-1.8.2.tgz", + "integrity": "sha512-17vInOylbB7H4qua7QRsmQT05FFTZemO8BhnOPgF9BPqjAPDyQr/9V8fmJbn05vQ31m2gu3EJSSYN2u94szUZg==" + }, + "dom-closest": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-closest/-/dom-closest-0.2.0.tgz", + "integrity": "sha1-69n5HRvyLo1vR3h2u80+yQIWwM8=", + "requires": { + "dom-matches": ">=1.0.1" + } + }, "dom-converter": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.1.4.tgz", @@ -3102,6 +4088,16 @@ "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-3.3.1.tgz", "integrity": "sha512-2Sm+JaYn74OiTM2wHvxJOo3roiq/h25Yi69Fqk269cNUwIXsCvATB6CRSFC9Am/20G2b28hGv/+7NiWydIrPvg==" }, + "dom-matches": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-matches/-/dom-matches-2.0.0.tgz", + "integrity": "sha1-0nKLQWqHUzmA6wibhI0lPPI6dYw=" + }, + "dom-scroll-into-view": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/dom-scroll-into-view/-/dom-scroll-into-view-1.2.1.tgz", + "integrity": "sha1-6PNnMt0ImwIBqI14Fdw/iObWbH4=" + }, "dom-serializer": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.1.0.tgz", @@ -3171,6 +4167,11 @@ "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-4.2.0.tgz", "integrity": "sha1-3vHxyl1gWdJKdm5YeULCEQbOEnU=" }, + "double-bits": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/double-bits/-/double-bits-1.1.1.tgz", + "integrity": "sha1-WKu6RUlNpND6Nrc60RoobJGEscY=" + }, "draft-js": { "version": "0.10.5", "resolved": "https://registry.npmjs.org/draft-js/-/draft-js-0.10.5.tgz", @@ -3188,16 +4189,82 @@ } } }, + "draw-svg-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/draw-svg-path/-/draw-svg-path-1.0.0.tgz", + "integrity": "sha1-bxFtli3TFLmepTTW9Y3WbNvWk3k=", + "requires": { + "abs-svg-path": "~0.1.1", + "normalize-svg-path": "~0.1.0" + } + }, + "dtype": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dtype/-/dtype-2.0.0.tgz", + "integrity": "sha1-zQUjI84GFETs0uj1dI9popvihDQ=" + }, + "dup": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/dup/-/dup-1.0.0.tgz", + "integrity": "sha1-UfxaxoX4GWRp3wuQXpNLIK9bQCk=" + }, "duplexer": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.1.tgz", "integrity": "sha1-rOb/gIwc5mtX0ev5eXessCM0z8E=" }, + "duplexer2": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.0.2.tgz", + "integrity": "sha1-xhTc9n4vsUmVqRcR5aYX6KYKMds=", + "requires": { + "readable-stream": "~1.1.9" + }, + "dependencies": { + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + }, + "readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" + } + } + }, "duplexer3": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" }, + "duplexify": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.6.1.tgz", + "integrity": "sha512-vM58DwdnKmty+FSPzT14K9JXb90H+j5emaR4KYbr2KTIz00WHGbWOe5ghQTx233ZCLZtrGDALzKwcjEtSt35mA==", + "requires": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "earcut": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/earcut/-/earcut-2.1.3.tgz", + "integrity": "sha512-AxdCdWUk1zzK/NuZ7e1ljj6IGC+VAdC3Qb7QQDsXpfNrc5IM8tL9nNXUmEGE6jRHTfZ10zhzRhtDmWVsR5pd3A==" + }, "ecc-jsbn": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz", @@ -3207,6 +4274,14 @@ "jsbn": "~0.1.0" } }, + "edges-to-adjacency-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/edges-to-adjacency-list/-/edges-to-adjacency-list-1.0.0.tgz", + "integrity": "sha1-wUbS4ISt37p0pRKTxuAZmkn3V/E=", + "requires": { + "uniq": "^1.0.0" + } + }, "ee-first": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", @@ -3217,6 +4292,11 @@ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.45.tgz", "integrity": "sha1-RYrBscXHYM6IEaFtK/vZfsMLr7g=" }, + "element-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/element-size/-/element-size-1.1.1.tgz", + "integrity": "sha1-ZOXxWdlxIWMYRby67K8nnDm1404=" + }, "elliptic": { "version": "6.4.0", "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.4.0.tgz", @@ -3254,6 +4334,14 @@ "iconv-lite": "~0.4.13" } }, + "end-of-stream": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.1.tgz", + "integrity": "sha512-1MkrZNvWTKCaigbn+W15elq2BB/L22nqrSY5DKlo3X6+vclJm8Bb5djXJBmEX6fS3+zCh/F4VBK5Z2KxJt4s2Q==", + "requires": { + "once": "^1.4.0" + } + }, "enhanced-resolve": { "version": "3.4.1", "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-3.4.1.tgz", @@ -3265,6 +4353,11 @@ "tapable": "^0.2.7" } }, + "enquire.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/enquire.js/-/enquire.js-2.1.6.tgz", + "integrity": "sha1-PoeAybi4NQhMP2DhZtvDwqPImBQ=" + }, "entities": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.1.tgz", @@ -3980,6 +5073,11 @@ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-3.1.0.tgz", "integrity": "sha512-ivIvhpq/Y0uSjcHDcOIccjmYjGLcP09MFGE7ysAwkAvkXfpZlC985pH2/ui64DKazbTW/4kN3yqozUxlXzI6cA==" }, + "eventlistener": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/eventlistener/-/eventlistener-0.0.1.tgz", + "integrity": "sha1-7Suqu4UiJ68rz4iRUscsY8pTLrg=" + }, "events": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", @@ -4115,6 +5213,11 @@ "homedir-polyfill": "^1.0.1" } }, + "expect.js": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/expect.js/-/expect.js-0.2.0.tgz", + "integrity": "sha1-EChTPSwcNj90pnlv9X7AUg3tK+E=" + }, "express": { "version": "4.16.3", "resolved": "https://registry.npmjs.org/express/-/express-4.16.3.tgz", @@ -4267,6 +5370,11 @@ } } }, + "extract-frustum-planes": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/extract-frustum-planes/-/extract-frustum-planes-1.0.0.tgz", + "integrity": "sha1-l9VwP/BWTIw8aDjKxF+ee8UsnvU=" + }, "extract-text-webpack-plugin": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/extract-text-webpack-plugin/-/extract-text-webpack-plugin-3.0.2.tgz", @@ -4283,14 +5391,40 @@ "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" }, + "falafel": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/falafel/-/falafel-2.1.0.tgz", + "integrity": "sha1-lrsXdh2rqU9G0AFzizzt86Z/4Gw=", + "requires": { + "acorn": "^5.0.0", + "foreach": "^2.0.5", + "isarray": "0.0.1", + "object-keys": "^1.0.6" + }, + "dependencies": { + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + } + } + }, "fast-deep-equal": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz", "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ=" }, - "fast-json-stable-stringify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", + "fast-isnumeric": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/fast-isnumeric/-/fast-isnumeric-1.1.2.tgz", + "integrity": "sha512-D7zJht1+NZBBv4759yXn/CJFUNJpILdgdosPFN1AjqQn9TfQJqSeCZfu0SY4bwIlXuDhzkxKoQ8BOqdiXpVzvA==", + "requires": { + "is-string-blank": "^1.0.1" + } + }, + "fast-json-stable-stringify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=" }, "fast-levenshtein": { @@ -4404,6 +5538,15 @@ } } }, + "filtered-vector": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/filtered-vector/-/filtered-vector-1.2.4.tgz", + "integrity": "sha1-VkU8A030MC0pPKjs3qw/kKvGeNM=", + "requires": { + "binary-search-bounds": "^1.0.0", + "cubic-hermite": "^1.0.0" + } + }, "finalhandler": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz", @@ -4458,6 +5601,14 @@ "resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.2.tgz", "integrity": "sha1-2uRqnXj74lKSJYzB54CkHZXAN4I=" }, + "flatten-vertex-data": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/flatten-vertex-data/-/flatten-vertex-data-1.0.2.tgz", + "integrity": "sha512-BvCBFK2NZqerFTdMDgqfHBwxYWnxeCkwONsw6PvBMcUXqo8U/KDWwmXhqx1x2kLIg7DqIsJfOaJFOmlua3Lxuw==", + "requires": { + "dtype": "^2.0.0" + } + }, "fn-name": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/fn-name/-/fn-name-2.0.1.tgz", @@ -4481,6 +5632,39 @@ } } }, + "font-atlas": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/font-atlas/-/font-atlas-2.1.0.tgz", + "integrity": "sha512-kP3AmvX+HJpW4w3d+PiPR2X6E1yvsBXt2yhuCw+yReO9F1WYhvZwx3c95DGZGwg9xYzDGrgJYa885xmVA+28Cg==", + "requires": { + "css-font": "^1.0.0" + } + }, + "font-atlas-sdf": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/font-atlas-sdf/-/font-atlas-sdf-1.3.3.tgz", + "integrity": "sha512-GxUpcdkdoHgC3UrpMuA7JmG1Ty/MY0BhfmV8r7ZSv3bkqBY5vmRIjcj7Pg8iqj20B03vlU6fUhdpyIgEo/Z35w==", + "requires": { + "optical-properties": "^1.0.0", + "tiny-sdf": "^1.0.2" + } + }, + "font-measure": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/font-measure/-/font-measure-1.2.2.tgz", + "integrity": "sha512-mRLEpdrWzKe9hbfaF3Qpr06TAjquuBVP5cHy4b3hyeNdjc9i0PO6HniGsX5vjL5OWv7+Bd++NiooNpT/s8BvIA==", + "requires": { + "css-font": "^1.2.0" + } + }, + "for-each": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz", + "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==", + "requires": { + "is-callable": "^1.1.3" + } + }, "for-in": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", @@ -4555,6 +5739,15 @@ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=" }, + "from2": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", + "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, "fs-extra": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-3.0.1.tgz", @@ -4589,7 +5782,8 @@ "ansi-regex": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=" + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "optional": true }, "aproba": { "version": "1.2.0", @@ -4610,12 +5804,14 @@ "balanced-match": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", - "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "optional": true }, "brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "optional": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -4630,17 +5826,20 @@ "code-point-at": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=" + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", + "optional": true }, "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "optional": true }, "console-control-strings": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", - "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=" + "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=", + "optional": true }, "core-util-is": { "version": "1.0.2", @@ -4757,7 +5956,8 @@ "inherits": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "optional": true }, "ini": { "version": "1.3.5", @@ -4769,6 +5969,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "optional": true, "requires": { "number-is-nan": "^1.0.0" } @@ -4783,6 +5984,7 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "optional": true, "requires": { "brace-expansion": "^1.1.7" } @@ -4790,12 +5992,14 @@ "minimist": { "version": "0.0.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", - "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=" + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", + "optional": true }, "minipass": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/minipass/-/minipass-2.2.4.tgz", "integrity": "sha512-hzXIWWet/BzWhYs2b+u7dRHlruXhwdgvlTMDKC6Cb1U7ps6Ac6yQlR39xsbjWJE377YTCtKwIXIpJ5oP+j5y8g==", + "optional": true, "requires": { "safe-buffer": "^5.1.1", "yallist": "^3.0.0" @@ -4814,6 +6018,7 @@ "version": "0.5.1", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "optional": true, "requires": { "minimist": "0.0.8" } @@ -4894,7 +6099,8 @@ "number-is-nan": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "optional": true }, "object-assign": { "version": "4.1.1", @@ -4906,6 +6112,7 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "optional": true, "requires": { "wrappy": "1" } @@ -4991,7 +6198,8 @@ "safe-buffer": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", - "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" + "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==", + "optional": true }, "safer-buffer": { "version": "2.1.2", @@ -5027,6 +6235,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "optional": true, "requires": { "code-point-at": "^1.0.0", "is-fullwidth-code-point": "^1.0.0", @@ -5046,6 +6255,7 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "optional": true, "requires": { "ansi-regex": "^2.0.0" } @@ -5089,12 +6299,14 @@ "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "optional": true }, "yallist": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.0.2.tgz", - "integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k=" + "integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k=", + "optional": true } } }, @@ -5119,11 +6331,44 @@ "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=" }, + "gamma": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/gamma/-/gamma-0.1.0.tgz", + "integrity": "sha1-MxVkNAO/J5BsqAqzfDbs6UQO8zA=" + }, + "geojson-rewind": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/geojson-rewind/-/geojson-rewind-0.3.1.tgz", + "integrity": "sha1-IiQHl8hHzC8MHTE+SqDJFa+n8p0=", + "requires": { + "@mapbox/geojson-area": "0.2.2", + "concat-stream": "~1.6.0", + "minimist": "1.2.0", + "sharkdown": "^0.1.0" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + } + } + }, + "geojson-vt": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/geojson-vt/-/geojson-vt-3.2.1.tgz", + "integrity": "sha512-EvGQQi/zPrDA6zr6BnJD/YhwAkBP8nnJ9emh3EnHQKVMfg/MRVtPbMYdgVy/IaEmn4UfagD2a6fafPDL5hbtwg==" + }, "get-caller-file": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-1.0.2.tgz", "integrity": "sha1-9wLmMSfn4jHBYKgMFVSstw1QR+U=" }, + "get-canvas-context": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/get-canvas-context/-/get-canvas-context-1.0.2.tgz", + "integrity": "sha1-1ue1C8TkyGNXzTnyJkeoS3NgHpM=" + }, "get-stdin": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz", @@ -5147,6 +6392,423 @@ "assert-plus": "^1.0.0" } }, + "gl-axes3d": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/gl-axes3d/-/gl-axes3d-1.4.0.tgz", + "integrity": "sha512-aakup65ywK7Bo0k/2IAq8AdvtZYHJANskePJpElcmuC1vm0l+4sRKmXevdR9AYBDNh5KEULFSnTe9RHVPvBtxQ==", + "requires": { + "bit-twiddle": "^1.0.0", + "dup": "^1.0.0", + "extract-frustum-planes": "^1.0.0", + "gl-buffer": "^2.0.3", + "gl-mat4": "^1.0.1", + "gl-shader": "^4.0.4", + "gl-state": "^1.0.0", + "gl-vao": "^1.1.1", + "gl-vec4": "^1.0.1", + "glslify": "^6.1.0", + "robust-orientation": "^1.1.3", + "split-polygon": "^1.0.0", + "vectorize-text": "^3.2.0" + } + }, + "gl-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/gl-buffer/-/gl-buffer-2.1.2.tgz", + "integrity": "sha1-LbjZwaVSf7oM25EonCBuiCuInNs=", + "requires": { + "ndarray": "^1.0.15", + "ndarray-ops": "^1.1.0", + "typedarray-pool": "^1.0.0" + } + }, + "gl-cone3d": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/gl-cone3d/-/gl-cone3d-1.2.1.tgz", + "integrity": "sha512-6Hc/l2qHnQXtVWzE/9i3ZmCVrLaBUYO0VTTH3g46jdkBuNlbALr9bo8ZCtHMIkeZhvzfKzfNylQGLiJL7zqdxw==", + "requires": { + "gl-shader": "^4.2.1", + "gl-vec3": "^1.0.0", + "glsl-inverse": "^1.0.0", + "glsl-out-of-range": "^1.0.3", + "glslify": "^6.1.0" + } + }, + "gl-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/gl-constants/-/gl-constants-1.0.0.tgz", + "integrity": "sha1-WXpQTjZHUP9QJTqjX43qevSl0jM=" + }, + "gl-contour2d": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/gl-contour2d/-/gl-contour2d-1.1.4.tgz", + "integrity": "sha512-deoY6k5ZcQfh5brlF3nXKs8FqhMNejlxIqWcK+bKenLcThJF94OR7DtQDwLwNXsYAZlsoDt+G01efXid6Modkg==", + "requires": { + "binary-search-bounds": "^2.0.0", + "cdt2d": "^1.0.0", + "clean-pslg": "^1.1.0", + "gl-buffer": "^2.1.2", + "gl-shader": "^4.0.5", + "glslify": "^6.1.0", + "iota-array": "^1.0.0", + "ndarray": "^1.0.18", + "surface-nets": "^1.0.2" + }, + "dependencies": { + "binary-search-bounds": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/binary-search-bounds/-/binary-search-bounds-2.0.4.tgz", + "integrity": "sha512-2hg5kgdKql5ClF2ErBcSx0U5bnl5hgS4v7wMnLFodyR47yMtj2w+UAZB+0CiqyHct2q543i7Bi4/aMIegorCCg==" + } + } + }, + "gl-error3d": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/gl-error3d/-/gl-error3d-1.0.9.tgz", + "integrity": "sha512-YGwUzfPx8CqYDFD20+jaQTSi0K96s0DA+a/FO6d8OxrLnCyTvrRiglx2bdekAHxjgEAOep0CRaIe7iLvItbiyw==", + "requires": { + "gl-buffer": "^2.1.2", + "gl-shader": "^4.2.1", + "gl-vao": "^1.3.0", + "glsl-out-of-range": "^1.0.3", + "glslify": "^6.0.2" + } + }, + "gl-fbo": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/gl-fbo/-/gl-fbo-2.0.5.tgz", + "integrity": "sha1-D6daSXz3h2lVMGkcjwSrtvtV+iI=", + "requires": { + "gl-texture2d": "^2.0.0" + } + }, + "gl-format-compiler-error": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/gl-format-compiler-error/-/gl-format-compiler-error-1.0.3.tgz", + "integrity": "sha1-DHmxdRiZzpcy6GJA8JCqQemEcag=", + "requires": { + "add-line-numbers": "^1.0.1", + "gl-constants": "^1.0.0", + "glsl-shader-name": "^1.0.0", + "sprintf-js": "^1.0.3" + } + }, + "gl-heatmap2d": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/gl-heatmap2d/-/gl-heatmap2d-1.0.4.tgz", + "integrity": "sha512-AWJykMTbCM0ZT20jiFaauRVmLv9dxtNNuTS1NQlKD8yBD0iZ62mgWLeYLUMjil6XN8K3P9EpUCBolvcx1Wf0kA==", + "requires": { + "binary-search-bounds": "^2.0.3", + "gl-buffer": "^2.1.2", + "gl-shader": "^4.0.5", + "glslify": "^6.1.0", + "iota-array": "^1.0.0", + "typedarray-pool": "^1.1.0" + }, + "dependencies": { + "binary-search-bounds": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/binary-search-bounds/-/binary-search-bounds-2.0.4.tgz", + "integrity": "sha512-2hg5kgdKql5ClF2ErBcSx0U5bnl5hgS4v7wMnLFodyR47yMtj2w+UAZB+0CiqyHct2q543i7Bi4/aMIegorCCg==" + } + } + }, + "gl-line3d": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/gl-line3d/-/gl-line3d-1.1.6.tgz", + "integrity": "sha512-22DcHvezFTJ0BK1lYyV9FRV4Z2moey0RAiFynGEIrvbUq3EBd7e+Sftv1/A6kxNUqdp5SIWmMdGznoAPD9P8FQ==", + "requires": { + "binary-search-bounds": "^1.0.0", + "gl-buffer": "^2.0.8", + "gl-shader": "^4.2.1", + "gl-texture2d": "^2.0.2", + "gl-vao": "^1.1.3", + "glsl-out-of-range": "^1.0.3", + "glsl-read-float": "^1.0.0", + "glslify": "^6.1.0", + "ndarray": "^1.0.16" + } + }, + "gl-mat2": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gl-mat2/-/gl-mat2-1.0.1.tgz", + "integrity": "sha1-FCUFcwpcL+Hp8l2ezj0NbMJxCjA=" + }, + "gl-mat3": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/gl-mat3/-/gl-mat3-1.0.0.tgz", + "integrity": "sha1-iWMyGcpCk3mha5GF2V1BcTRTuRI=" + }, + "gl-mat4": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gl-mat4/-/gl-mat4-1.2.0.tgz", + "integrity": "sha512-sT5C0pwB1/e9G9AvAoLsoaJtbMGjfd/jfxo8jMCKqYYEnjZuFvqV5rehqar0538EmssjdDeiEWnKyBSTw7quoA==" + }, + "gl-matrix-invert": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/gl-matrix-invert/-/gl-matrix-invert-1.0.0.tgz", + "integrity": "sha1-o2173jZUxFkKEn7nxo9uE/6oxj0=", + "requires": { + "gl-mat2": "^1.0.0", + "gl-mat3": "^1.0.0", + "gl-mat4": "^1.0.0" + } + }, + "gl-mesh3d": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/gl-mesh3d/-/gl-mesh3d-2.0.2.tgz", + "integrity": "sha512-gKkeEDBVP1rp6iDzz/aomAMsDkkoieihsXJccampo0zhfi9To6xhadEDt6axdUpv5rNjM8l02IPp/wuLDuLJOg==", + "requires": { + "barycentric": "^1.0.1", + "colormap": "^2.1.0", + "gl-buffer": "^2.0.8", + "gl-mat4": "^1.0.0", + "gl-shader": "^4.2.1", + "gl-texture2d": "^2.0.8", + "gl-vao": "^1.1.3", + "glsl-face-normal": "^1.0.2", + "glsl-out-of-range": "^1.0.3", + "glsl-specular-cook-torrance": "^2.0.1", + "glslify": "^6.1.0", + "ndarray": "^1.0.15", + "normals": "^1.0.1", + "polytope-closest-point": "^1.0.0", + "simplicial-complex-contour": "^1.0.0", + "typedarray-pool": "^1.1.0" + } + }, + "gl-plot2d": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/gl-plot2d/-/gl-plot2d-1.4.0.tgz", + "integrity": "sha512-cO1R6TSMHZKxpsxT2jSxxZ/sN6KdkPLvpzp1t5W5qB5xUs4RiTmAw1jd9s1ogdZYBqYJVIrj6ktCrua3Ligc+Q==", + "requires": { + "binary-search-bounds": "^2.0.3", + "gl-buffer": "^2.1.2", + "gl-select-static": "^2.0.2", + "gl-shader": "^4.2.1", + "glsl-inverse": "^1.0.0", + "glslify": "^6.1.0", + "text-cache": "^4.2.0" + }, + "dependencies": { + "binary-search-bounds": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/binary-search-bounds/-/binary-search-bounds-2.0.4.tgz", + "integrity": "sha512-2hg5kgdKql5ClF2ErBcSx0U5bnl5hgS4v7wMnLFodyR47yMtj2w+UAZB+0CiqyHct2q543i7Bi4/aMIegorCCg==" + } + } + }, + "gl-plot3d": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/gl-plot3d/-/gl-plot3d-1.6.0.tgz", + "integrity": "sha512-SWUXVuWlBE+GIQWysB5HmoqBDkkaCydT8JJl5CWyApau3bTtHzEEafMEBBfkc4THmk/3YXgmjmSlXF5vefTo/g==", + "requires": { + "3d-view-controls": "^2.2.0", + "a-big-triangle": "^1.0.0", + "gl-axes3d": "^1.4.0", + "gl-fbo": "^2.0.3", + "gl-mat4": "^1.1.2", + "gl-select-static": "^2.0.2", + "gl-shader": "^4.2.1", + "gl-spikes3d": "^1.0.3", + "glslify": "^6.1.0", + "is-mobile": "^2.0.0", + "mouse-change": "^1.1.1", + "ndarray": "^1.0.16" + } + }, + "gl-pointcloud2d": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gl-pointcloud2d/-/gl-pointcloud2d-1.0.1.tgz", + "integrity": "sha512-bCNaPSrZjBiKRrlbhHdipnmTc5xteubksevbPrmdlk2R6PTwQlQ38TDxuRYan02j0uDtem9wEp8etYYMjZFMhA==", + "requires": { + "gl-buffer": "^2.1.2", + "gl-shader": "^4.2.1", + "glslify": "^6.1.0", + "typedarray-pool": "^1.1.0" + } + }, + "gl-quat": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/gl-quat/-/gl-quat-1.0.0.tgz", + "integrity": "sha1-CUXskjOG9FMpvl3DV7HIwtR1hsU=", + "requires": { + "gl-mat3": "^1.0.0", + "gl-vec3": "^1.0.3", + "gl-vec4": "^1.0.0" + } + }, + "gl-scatter3d": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/gl-scatter3d/-/gl-scatter3d-1.1.0.tgz", + "integrity": "sha512-8O/YXxRZloG0LPkmd5hr50IMmgbqdvQZ1axH+E90CpBrqez6D24WFJg74vPka2YJf89DIms8i6kElDlSFHCrCA==", + "requires": { + "gl-buffer": "^2.0.6", + "gl-mat4": "^1.0.0", + "gl-shader": "^4.2.0", + "gl-vao": "^1.1.2", + "glsl-out-of-range": "^1.0.3", + "glslify": "^6.1.0", + "is-string-blank": "^1.0.1", + "typedarray-pool": "^1.0.2", + "vectorize-text": "^3.2.0" + } + }, + "gl-select-box": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/gl-select-box/-/gl-select-box-1.0.2.tgz", + "integrity": "sha512-QCheTcyHiamTgOQ92P9swHgJoR25T8GGRCANASRtjdMXndlAbQG4qxBP15MRJx7RFWlOVvEeUzCvPn7r116orA==", + "requires": { + "gl-buffer": "^2.1.2", + "gl-shader": "^4.0.5", + "glslify": "^6.1.0" + } + }, + "gl-select-static": { + "version": "2.0.2", + "resolved": "http://registry.npmjs.org/gl-select-static/-/gl-select-static-2.0.2.tgz", + "integrity": "sha1-8+GQHfAxgdUy55WFMjBnnUr1fuk=", + "requires": { + "bit-twiddle": "^1.0.2", + "cwise": "^1.0.3", + "gl-fbo": "^2.0.3", + "ndarray": "^1.0.15", + "typedarray-pool": "^1.1.0" + } + }, + "gl-shader": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/gl-shader/-/gl-shader-4.2.1.tgz", + "integrity": "sha1-vJuAjpKTxRtmjojeYVsMETcI3C8=", + "requires": { + "gl-format-compiler-error": "^1.0.2", + "weakmap-shim": "^1.1.0" + } + }, + "gl-spikes2d": { + "version": "1.0.1", + "resolved": "http://registry.npmjs.org/gl-spikes2d/-/gl-spikes2d-1.0.1.tgz", + "integrity": "sha1-ys2y09vNICuFNFLoUAqLB3lJzAM=" + }, + "gl-spikes3d": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/gl-spikes3d/-/gl-spikes3d-1.0.6.tgz", + "integrity": "sha512-mXRG+3iCs4bDH7if2aOr1G5UpbNqKxfWpy7GR/afOHDSNsrq2ZjnWAwPmIJG7KdClPNPgiK30cVo7XisLt8PCQ==", + "requires": { + "gl-buffer": "^2.1.2", + "gl-shader": "^4.0.4", + "gl-vao": "^1.2.1", + "glslify": "^6.1.0" + } + }, + "gl-state": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/gl-state/-/gl-state-1.0.0.tgz", + "integrity": "sha1-Ji+qdYNbC5xTLBLzitxCXR0wzRc=", + "requires": { + "uniq": "^1.0.0" + } + }, + "gl-streamtube3d": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/gl-streamtube3d/-/gl-streamtube3d-1.1.1.tgz", + "integrity": "sha512-6UKZ4C9RQVTuVFYhEE/k0vgFvXCm5G0mmw8p+s6vaR+pwAxwU+bTQXLyW6n+gOIy7/F6DiViy1vIq0pc6MZxSw==", + "requires": { + "gl-vec3": "^1.0.0", + "glsl-inverse": "^1.0.0", + "glsl-out-of-range": "^1.0.3", + "glslify": "^6.1.1" + } + }, + "gl-surface3d": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/gl-surface3d/-/gl-surface3d-1.3.7.tgz", + "integrity": "sha512-Q8230JPRBqYb8yDR7ORDZfK3uRY0k0gmqlujPIL36SQdZ1utKSCn/dNIe9SKiqyE7ycfdIBp0Z1otZM23Nn6bA==", + "requires": { + "binary-search-bounds": "^1.0.0", + "bit-twiddle": "^1.0.2", + "colormap": "^2.1.0", + "dup": "^1.0.0", + "gl-buffer": "^2.0.3", + "gl-mat4": "^1.0.0", + "gl-shader": "^4.2.0", + "gl-texture2d": "^2.0.0", + "gl-vao": "^1.1.1", + "glsl-out-of-range": "^1.0.3", + "glsl-specular-beckmann": "^1.1.2", + "glslify": "^6.1.0", + "ndarray": "^1.0.16", + "ndarray-gradient": "^1.0.0", + "ndarray-ops": "^1.2.1", + "ndarray-pack": "^1.0.1", + "ndarray-scratch": "^1.1.1", + "surface-nets": "^1.0.2", + "typedarray-pool": "^1.0.0" + } + }, + "gl-text": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/gl-text/-/gl-text-1.1.6.tgz", + "integrity": "sha512-OB+Nc5JKO1gyYYqBOJrYvCvRXIecfVpIKP7AviQNY63jrWPM9hUFSwZG7sH/paVnR1yCZBVirqOPfiFeF1Qo4g==", + "requires": { + "bit-twiddle": "^1.0.2", + "color-normalize": "^1.1.0", + "css-font": "^1.2.0", + "detect-kerning": "^2.1.2", + "es6-weak-map": "^2.0.2", + "flatten-vertex-data": "^1.0.2", + "font-atlas": "^2.1.0", + "font-measure": "^1.2.2", + "gl-util": "^3.0.7", + "is-plain-obj": "^1.1.0", + "object-assign": "^4.1.1", + "parse-rect": "^1.2.0", + "parse-unit": "^1.0.1", + "pick-by-alias": "^1.2.0", + "regl": "^1.3.6", + "to-px": "^1.0.1", + "typedarray-pool": "^1.1.0" + } + }, + "gl-texture2d": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/gl-texture2d/-/gl-texture2d-2.1.0.tgz", + "integrity": "sha1-/2gk5+fDGoum/c2+nlxpXX4hh8c=", + "requires": { + "ndarray": "^1.0.15", + "ndarray-ops": "^1.2.2", + "typedarray-pool": "^1.1.0" + } + }, + "gl-util": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/gl-util/-/gl-util-3.1.0.tgz", + "integrity": "sha512-r/krwAgz7KWsp4A5XhUhSozmbjLaicoaiX1hJhgpUv/V5B7TCiEaRCBN20z/A4SR+u52HUjcAOW21lDg4CPZrA==", + "requires": { + "is-browser": "^2.0.1", + "is-firefox": "^1.0.3", + "is-plain-obj": "^1.1.0", + "number-is-integer": "^1.0.1", + "object-assign": "^4.1.0", + "pick-by-alias": "^1.2.0", + "weak-map": "^1.0.5" + } + }, + "gl-vao": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/gl-vao/-/gl-vao-1.3.0.tgz", + "integrity": "sha1-6ekqqVWIyrnVwvBLaTRAw99pGSM=" + }, + "gl-vec3": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/gl-vec3/-/gl-vec3-1.1.3.tgz", + "integrity": "sha512-jduKUqT0SGH02l8Yl+mV1yVsDfYgQAJyXGxkJQGyxPLHRiW25DwVIRPt6uvhrEMHftJfqhqKthRcyZqNEl9Xdw==" + }, + "gl-vec4": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gl-vec4/-/gl-vec4-1.0.1.tgz", + "integrity": "sha1-l9loeCgbFLUyy84QF4Xf0cs0CWQ=" + }, "glob": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", @@ -5225,56 +6887,312 @@ "pinkie-promise": "^2.0.0" } }, - "got": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/got/-/got-6.7.1.tgz", - "integrity": "sha1-JAzQV4WpoY5WHcG0S0HHY+8ejbA=", + "glsl-face-normal": { + "version": "1.0.2", + "resolved": "http://registry.npmjs.org/glsl-face-normal/-/glsl-face-normal-1.0.2.tgz", + "integrity": "sha1-fud12Rmk8u6S9Xu2mOh8x12/Eog=" + }, + "glsl-inject-defines": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/glsl-inject-defines/-/glsl-inject-defines-1.0.3.tgz", + "integrity": "sha1-3RqswsF/yyvT/DJBHGYz0Ne2D9Q=", "requires": { - "create-error-class": "^3.0.0", - "duplexer3": "^0.1.4", - "get-stream": "^3.0.0", - "is-redirect": "^1.0.0", - "is-retry-allowed": "^1.0.0", - "is-stream": "^1.0.0", - "lowercase-keys": "^1.0.0", - "safe-buffer": "^5.0.1", - "timed-out": "^4.0.0", - "unzip-response": "^2.0.1", - "url-parse-lax": "^1.0.0" + "glsl-token-inject-block": "^1.0.0", + "glsl-token-string": "^1.0.1", + "glsl-tokenizer": "^2.0.2" } }, - "graceful-fs": { - "version": "4.1.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz", - "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=" + "glsl-inverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/glsl-inverse/-/glsl-inverse-1.0.0.tgz", + "integrity": "sha1-EsCx0GX1WERNHm/q95td34qRiuY=" }, - "growly": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/growly/-/growly-1.3.0.tgz", - "integrity": "sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE=" + "glsl-out-of-range": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/glsl-out-of-range/-/glsl-out-of-range-1.0.3.tgz", + "integrity": "sha512-3uSoD4aX4TjHx3uRJnJbUpegePR0tRPf9VWLS7EjDMbHHV+qrKjl8ov93ifG3kqzcxIOmaSXDK248EmM5uoQ/g==" }, - "gud": { + "glsl-read-float": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/glsl-read-float/-/glsl-read-float-1.1.0.tgz", + "integrity": "sha1-37CIsBYtz8xW/E7d0vhuGMrDLyY=" + }, + "glsl-resolve": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/glsl-resolve/-/glsl-resolve-0.0.1.tgz", + "integrity": "sha1-iUvvc5ENeSyBtRQxgANdCnivdtM=", + "requires": { + "resolve": "^0.6.1", + "xtend": "^2.1.2" + }, + "dependencies": { + "resolve": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-0.6.3.tgz", + "integrity": "sha1-3ZV5gufnNt699TtYpN2RdUV13UY=" + }, + "xtend": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-2.2.0.tgz", + "integrity": "sha1-7vax8ZjByN6vrYsXZaBNrUoBxak=" + } + } + }, + "glsl-shader-name": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/gud/-/gud-1.0.0.tgz", - "integrity": "sha512-zGEOVKFM5sVPPrYs7J5/hYEw2Pof8KCyOwyhG8sAF26mCAeUFAcYPu1mwB7hhpIP29zOIBaDqwuHdLp0jvZXjw==" + "resolved": "https://registry.npmjs.org/glsl-shader-name/-/glsl-shader-name-1.0.0.tgz", + "integrity": "sha1-osMLO6c0mb77DMcYTXx3M91LSH0=", + "requires": { + "atob-lite": "^1.0.0", + "glsl-tokenizer": "^2.0.2" + } }, - "gzip-size": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-3.0.0.tgz", - "integrity": "sha1-VGGI6b3DN/Zzdy+BZgRks4nc5SA=", + "glsl-specular-beckmann": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/glsl-specular-beckmann/-/glsl-specular-beckmann-1.1.2.tgz", + "integrity": "sha1-/OkFaTPs3yRWJ4N2pU0IKJPndfE=" + }, + "glsl-specular-cook-torrance": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/glsl-specular-cook-torrance/-/glsl-specular-cook-torrance-2.0.1.tgz", + "integrity": "sha1-qJHMBsjHtPRyhwK0gk/ay7ln148=", "requires": { - "duplexer": "^0.1.1" + "glsl-specular-beckmann": "^1.1.1" } }, - "handle-thing": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-1.2.5.tgz", - "integrity": "sha1-/Xqtcmvxpf0W38KbL3pmAdJxOcQ=" + "glsl-token-assignments": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/glsl-token-assignments/-/glsl-token-assignments-2.0.2.tgz", + "integrity": "sha1-pdgqt4SZwuimuDy2lJXm5mXOAZ8=" }, - "handlebars": { - "version": "4.0.11", - "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.0.11.tgz", - "integrity": "sha1-Ywo13+ApS8KB7a5v/F0yn8eYLcw=", + "glsl-token-defines": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/glsl-token-defines/-/glsl-token-defines-1.0.0.tgz", + "integrity": "sha1-y4kqqVmTYjFyhHDU90AySJaX+p0=", + "requires": { + "glsl-tokenizer": "^2.0.0" + } + }, + "glsl-token-depth": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/glsl-token-depth/-/glsl-token-depth-1.1.2.tgz", + "integrity": "sha1-I8XjDuK9JViEtKKLyFC495HpXYQ=" + }, + "glsl-token-descope": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/glsl-token-descope/-/glsl-token-descope-1.0.2.tgz", + "integrity": "sha1-D8kKsyYYa4L1l7LnfcniHvzTIHY=", + "requires": { + "glsl-token-assignments": "^2.0.0", + "glsl-token-depth": "^1.1.0", + "glsl-token-properties": "^1.0.0", + "glsl-token-scope": "^1.1.0" + } + }, + "glsl-token-inject-block": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/glsl-token-inject-block/-/glsl-token-inject-block-1.1.0.tgz", + "integrity": "sha1-4QFfWYDBCRgkraomJfHf3ovQADQ=" + }, + "glsl-token-properties": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/glsl-token-properties/-/glsl-token-properties-1.0.1.tgz", + "integrity": "sha1-SD3D2Dnw1LXGFx0VkfJJvlPCip4=" + }, + "glsl-token-scope": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/glsl-token-scope/-/glsl-token-scope-1.1.2.tgz", + "integrity": "sha1-oXKOeN8kRE+cuT/RjvD3VQOmQ7E=" + }, + "glsl-token-string": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/glsl-token-string/-/glsl-token-string-1.0.1.tgz", + "integrity": "sha1-WUQdL4V958NEnJRWZgIezjWOSOw=" + }, + "glsl-token-whitespace-trim": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/glsl-token-whitespace-trim/-/glsl-token-whitespace-trim-1.0.0.tgz", + "integrity": "sha1-RtHf6Yx1vX1QTAXX0RsbPpzJOxA=" + }, + "glsl-tokenizer": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/glsl-tokenizer/-/glsl-tokenizer-2.1.5.tgz", + "integrity": "sha512-XSZEJ/i4dmz3Pmbnpsy3cKh7cotvFlBiZnDOwnj/05EwNp2XrhQ4XKJxT7/pDt4kp4YcpRSKz8eTV7S+mwV6MA==", + "requires": { + "through2": "^0.6.3" + } + }, + "glslify": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/glslify/-/glslify-6.4.1.tgz", + "integrity": "sha512-YDQ1Lei4Mj0TjJqjbf/llIJ1c10vsUTf6OQZ9N058PnVwOmIZyTmtr5Pgh9i99nxvP4M4sRWA5+IucQuOUnV5w==", + "requires": { + "bl": "^1.0.0", + "concat-stream": "^1.5.2", + "duplexify": "^3.4.5", + "falafel": "^2.1.0", + "from2": "^2.3.0", + "glsl-resolve": "0.0.1", + "glsl-token-whitespace-trim": "^1.0.0", + "glslify-bundle": "^5.0.0", + "glslify-deps": "^1.2.5", + "minimist": "^1.2.0", + "resolve": "^1.1.5", + "stack-trace": "0.0.9", + "static-eval": "^2.0.0", + "tape": "^4.6.0", + "through2": "^2.0.1", + "xtend": "^4.0.0" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + }, + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + } + } + }, + "glslify-bundle": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/glslify-bundle/-/glslify-bundle-5.1.1.tgz", + "integrity": "sha512-plaAOQPv62M1r3OsWf2UbjN0hUYAB7Aph5bfH58VxJZJhloRNbxOL9tl/7H71K7OLJoSJ2ZqWOKk3ttQ6wy24A==", + "requires": { + "glsl-inject-defines": "^1.0.1", + "glsl-token-defines": "^1.0.0", + "glsl-token-depth": "^1.1.1", + "glsl-token-descope": "^1.0.2", + "glsl-token-scope": "^1.1.1", + "glsl-token-string": "^1.0.1", + "glsl-token-whitespace-trim": "^1.0.0", + "glsl-tokenizer": "^2.0.2", + "murmurhash-js": "^1.0.0", + "shallow-copy": "0.0.1" + } + }, + "glslify-deps": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/glslify-deps/-/glslify-deps-1.3.1.tgz", + "integrity": "sha512-Ogm179MCazwIRyEqs3g3EOY4Y3XIAa0yl8J5RE9rJC6QH1w8weVOp2RZu0mvnYy/2xIas1w166YR2eZdDkWQxg==", + "requires": { + "@choojs/findup": "^0.2.0", + "events": "^1.0.2", + "glsl-resolve": "0.0.1", + "glsl-tokenizer": "^2.0.0", + "graceful-fs": "^4.1.2", + "inherits": "^2.0.1", + "map-limit": "0.0.1", + "resolve": "^1.0.0" + } + }, + "got": { + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/got/-/got-6.7.1.tgz", + "integrity": "sha1-JAzQV4WpoY5WHcG0S0HHY+8ejbA=", + "requires": { + "create-error-class": "^3.0.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "is-redirect": "^1.0.0", + "is-retry-allowed": "^1.0.0", + "is-stream": "^1.0.0", + "lowercase-keys": "^1.0.0", + "safe-buffer": "^5.0.1", + "timed-out": "^4.0.0", + "unzip-response": "^2.0.1", + "url-parse-lax": "^1.0.0" + } + }, + "graceful-fs": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz", + "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=" + }, + "gray-matter": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-3.1.1.tgz", + "integrity": "sha512-nZ1qjLmayEv0/wt3sHig7I0s3/sJO0dkAaKYQ5YAOApUtYEOonXSFdWvL1khvnZMTvov4UufkqlFsilPnejEXA==", + "requires": { + "extend-shallow": "^2.0.1", + "js-yaml": "^3.10.0", + "kind-of": "^5.0.2", + "strip-bom-string": "^1.0.0" + }, + "dependencies": { + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "requires": { + "is-extendable": "^0.1.0" + } + }, + "js-yaml": { + "version": "3.12.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", + "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + } + } + }, + "grid-index": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/grid-index/-/grid-index-1.0.0.tgz", + "integrity": "sha1-rSxdVM5bNUN/r/HXCprrPR0mERA=" + }, + "growly": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/growly/-/growly-1.3.0.tgz", + "integrity": "sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE=" + }, + "gud": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/gud/-/gud-1.0.0.tgz", + "integrity": "sha512-zGEOVKFM5sVPPrYs7J5/hYEw2Pof8KCyOwyhG8sAF26mCAeUFAcYPu1mwB7hhpIP29zOIBaDqwuHdLp0jvZXjw==" + }, + "gzip-size": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-3.0.0.tgz", + "integrity": "sha1-VGGI6b3DN/Zzdy+BZgRks4nc5SA=", + "requires": { + "duplexer": "^0.1.1" + } + }, + "hammerjs": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/hammerjs/-/hammerjs-2.0.8.tgz", + "integrity": "sha1-BO93hiz/K7edMPdpIJWTAiK/YPE=" + }, + "handle-thing": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-1.2.5.tgz", + "integrity": "sha1-/Xqtcmvxpf0W38KbL3pmAdJxOcQ=" + }, + "handlebars": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.0.11.tgz", + "integrity": "sha1-Ywo13+ApS8KB7a5v/F0yn8eYLcw=", "requires": { "async": "^1.4.0", "optimist": "^0.6.1", @@ -5363,6 +7281,22 @@ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" }, + "has-hover": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-hover/-/has-hover-1.0.1.tgz", + "integrity": "sha1-PZdDeusZnGK4rAisvcU9O8UsF/c=", + "requires": { + "is-browser": "^2.0.1" + } + }, + "has-passive-events": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-passive-events/-/has-passive-events-1.0.0.tgz", + "integrity": "sha512-2vSj6IeIsgvsRMyeQ0JaCX5Q3lX4zMn5HpoVc7MEhQ6pv8Iq9rsXjsp+E5ZwaT7T0xhMT0KmU8gtt1EFVdbJiw==", + "requires": { + "is-browser": "^2.0.1" + } + }, "has-symbols": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.0.tgz", @@ -5497,6 +7431,11 @@ "wbuf": "^1.1.0" } }, + "hsluv": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/hsluv/-/hsluv-0.0.3.tgz", + "integrity": "sha1-gpEH2vtKn4tSoYCe0C4JHq3mdUw=" + }, "html-comment-regex": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/html-comment-regex/-/html-comment-regex-1.1.1.tgz", @@ -5818,6 +7757,15 @@ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=" }, + "incremental-convex-hull": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/incremental-convex-hull/-/incremental-convex-hull-1.0.1.tgz", + "integrity": "sha1-UUKMFMudmmFEv+abKFH7N3M0vh4=", + "requires": { + "robust-orientation": "^1.1.2", + "simplicial-complex": "^1.0.0" + } + }, "indent-string": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-2.1.0.tgz", @@ -5923,6 +7871,14 @@ "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.1.0.tgz", "integrity": "sha1-ftGxQQxqDg94z5XTuEQMY/eLhhQ=" }, + "interval-tree-1d": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/interval-tree-1d/-/interval-tree-1d-1.0.3.tgz", + "integrity": "sha1-j9veArayx9verWNry+2OCHENhcE=", + "requires": { + "binary-search-bounds": "^1.0.0" + } + }, "invariant": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", @@ -5936,6 +7892,16 @@ "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=" }, + "invert-permutation": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/invert-permutation/-/invert-permutation-1.0.0.tgz", + "integrity": "sha1-oKeAQurbNrwXVR54fv0UOa3VSTM=" + }, + "iota-array": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/iota-array/-/iota-array-1.0.0.tgz", + "integrity": "sha1-ge9X/l0FgUzVjCSDYyqZwwoOgIc=" + }, "ip": { "version": "1.1.5", "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", @@ -6006,6 +7972,11 @@ "integrity": "sha1-mPiygDBoQhmpXzdc+9iM40Bd/5M=", "dev": true }, + "is-browser": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-browser/-/is-browser-2.1.0.tgz", + "integrity": "sha512-F5rTJxDQ2sW81fcfOR1GnCXT6sVJC104fCyfj+mjpwNEwaPYSn5fte5jiHmBg3DHsIoL/l8Kvw5VN5SsTRcRFQ==" + }, "is-buffer": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", @@ -6108,6 +8079,11 @@ "number-is-nan": "^1.0.0" } }, + "is-firefox": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-firefox/-/is-firefox-1.0.3.tgz", + "integrity": "sha1-KioVZ3g6QX9uFYMjEI84YbCRhWI=" + }, "is-fullwidth-code-point": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", @@ -6121,6 +8097,11 @@ "is-extglob": "^1.0.0" } }, + "is-iexplorer": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-iexplorer/-/is-iexplorer-1.0.0.tgz", + "integrity": "sha1-HXK8ZtP+Iur2Fw3ajPEJQySM/HY=" + }, "is-installed-globally": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.1.0.tgz", @@ -6130,6 +8111,16 @@ "is-path-inside": "^1.0.0" } }, + "is-mobile": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-mobile/-/is-mobile-2.0.0.tgz", + "integrity": "sha512-k2+p7BBCzhqHMdYJwGUNNo+6zegGiMIVbM6bEPzxWXpQV6BUzV892UW0oDFgqxT6DygO7LdxRbwC0xmOhJdbew==" + }, + "is-negative-zero": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.0.tgz", + "integrity": "sha1-lVOxIbD6wohp2p7UWeIMdUN4hGE=" + }, "is-npm": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-1.0.0.tgz", @@ -6276,6 +8267,11 @@ "integrity": "sha1-zDqbaYV9Yh6WNyWiTK7shzuCbmQ=", "dev": true }, + "is-string-blank": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-string-blank/-/is-string-blank-1.0.1.tgz", + "integrity": "sha512-9H+ZBCVs3L9OYqv8nuUAzpcT9OTgMD1yAWrG7ihlnibdkbtB850heAmYWxHuXc4CHy4lKeK69tN+ny1K7gBIrw==" + }, "is-subset": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/is-subset/-/is-subset-0.1.1.tgz", @@ -6290,6 +8286,11 @@ "html-comment-regex": "^1.1.0" } }, + "is-svg-path": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-svg-path/-/is-svg-path-1.0.2.tgz", + "integrity": "sha1-d6tZDBKz0gNI5cehPQBAyHeE3aA=" + }, "is-symbol": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.1.tgz", @@ -6334,6 +8335,11 @@ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" }, + "ismobilejs": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/ismobilejs/-/ismobilejs-0.5.1.tgz", + "integrity": "sha512-QX4STsOcBYqlTjVGuAdP1MiRVxtiUbRHOKH0v7Gn1EvfUVIQnrSdgCM4zB4VCZuIejnb2NUMUx0Bwd3EIG6yyA==" + }, "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", @@ -6766,6 +8772,12 @@ "p-map": "^1.1.1" } }, + "jest-localstorage-mock": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/jest-localstorage-mock/-/jest-localstorage-mock-2.3.0.tgz", + "integrity": "sha512-Lk+awEPuIz0PSERHtnsXyMVLvf/4mZ3sZBEjKG5sJHvey2/i2JfQmmb/NHhialMbHXZILBORzuH64YXhWGlLsQ==", + "dev": true + }, "jest-matcher-utils": { "version": "20.0.3", "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-20.0.3.tgz", @@ -7136,6 +9148,14 @@ "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" }, + "json2mq": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/json2mq/-/json2mq-0.2.0.tgz", + "integrity": "sha1-tje9O6nqvhIsg+lyBIOusQ0skEo=", + "requires": { + "string-convert": "^0.2.0" + } + }, "json3": { "version": "3.3.2", "resolved": "https://registry.npmjs.org/json3/-/json3-3.3.2.tgz", @@ -7175,6 +9195,11 @@ "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-1.4.1.tgz", "integrity": "sha1-OGchPo3Xm/Ho8jAMDPwe+xgsDfE=" }, + "kdbush": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/kdbush/-/kdbush-1.0.1.tgz", + "integrity": "sha1-PL0D6d6tnA9vZszblkUOXOzGQOA=" + }, "keycode": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/keycode/-/keycode-2.2.0.tgz", @@ -7219,6 +9244,16 @@ "invert-kv": "^1.0.0" } }, + "left-pad": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/left-pad/-/left-pad-1.3.0.tgz", + "integrity": "sha512-XI5MPzVNApjAyhQzphX8BkmKsKUxD4LdyK24iZeQGinBN9yTQT3bFlCBy/aVx2HrNcqQGsdot8ghrjyrvMCoEA==" + }, + "lerp": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/lerp/-/lerp-1.0.3.tgz", + "integrity": "sha1-oYyJaPkXiW3hXM/MKNVaa3Med24=" + }, "leven": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/leven/-/leven-2.1.0.tgz", @@ -7316,9 +9351,9 @@ } }, "lodash": { - "version": "4.17.10", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==" + "version": "4.17.11", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz", + "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==" }, "lodash-es": { "version": "4.17.10", @@ -7365,6 +9400,11 @@ "resolved": "https://registry.npmjs.org/lodash.cond/-/lodash.cond-4.5.2.tgz", "integrity": "sha1-9HGh2khr5g9quVXRcRVSPdHSVdU=" }, + "lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" + }, "lodash.defaults": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz", @@ -7460,6 +9500,11 @@ "lodash._reinterpolate": "~3.0.0" } }, + "lodash.throttle": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz", + "integrity": "sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ=" + }, "lodash.topath": { "version": "4.5.2", "resolved": "https://registry.npmjs.org/lodash.topath/-/lodash.topath-4.5.2.tgz", @@ -7521,6 +9566,14 @@ "resolved": "https://registry.npmjs.org/macaddress/-/macaddress-0.2.8.tgz", "integrity": "sha1-WQTcU3w57G2+/q6QIycTX6hRHxI=" }, + "magic-string": { + "version": "0.22.5", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.22.5.tgz", + "integrity": "sha512-oreip9rJZkzvA8Qzk9HFs8fZGF/u7H/gtrE8EN6RjKJ9kh2HlC+yQ2QezifqTZfGyiuAV0dRv5a+y/8gBb1m9w==", + "requires": { + "vlq": "^0.2.2" + } + }, "make-dir": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", @@ -7549,6 +9602,24 @@ "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=" }, + "map-limit": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/map-limit/-/map-limit-0.0.1.tgz", + "integrity": "sha1-63lhAxwPDo0AG/LVb6toXViCLzg=", + "requires": { + "once": "~1.3.0" + }, + "dependencies": { + "once": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/once/-/once-1.3.3.tgz", + "integrity": "sha1-suJhVXzkwxTsgwTz+oJmPkKXyiA=", + "requires": { + "wrappy": "1" + } + } + } + }, "map-obj": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", @@ -7562,16 +9633,113 @@ "object-visit": "^1.0.0" } }, + "mapbox-gl": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/mapbox-gl/-/mapbox-gl-0.45.0.tgz", + "integrity": "sha1-r3HMgk8NflHM1cUF6q5BG8CRDM0=", + "requires": { + "@mapbox/gl-matrix": "^0.0.1", + "@mapbox/jsonlint-lines-primitives": "^2.0.1", + "@mapbox/mapbox-gl-supported": "^1.3.1", + "@mapbox/point-geometry": "^0.1.0", + "@mapbox/shelf-pack": "^3.1.0", + "@mapbox/tiny-sdf": "^1.1.0", + "@mapbox/unitbezier": "^0.0.0", + "@mapbox/vector-tile": "^1.3.1", + "@mapbox/whoots-js": "^3.0.0", + "brfs": "^1.4.4", + "csscolorparser": "~1.0.2", + "earcut": "^2.1.3", + "geojson-rewind": "^0.3.0", + "geojson-vt": "^3.1.0", + "gray-matter": "^3.0.8", + "grid-index": "^1.0.0", + "minimist": "0.0.8", + "pbf": "^3.0.5", + "quickselect": "^1.0.0", + "rw": "^1.3.3", + "shuffle-seed": "^1.1.6", + "sort-object": "^0.3.2", + "supercluster": "^2.3.0", + "through2": "^2.0.3", + "tinyqueue": "^1.1.0", + "vt-pbf": "^3.0.1" + }, + "dependencies": { + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + } + } + }, + "marching-simplex-table": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/marching-simplex-table/-/marching-simplex-table-1.0.0.tgz", + "integrity": "sha1-vBYlbg+Pm1WKqbKHL4gy2UM/Uuo=", + "requires": { + "convex-hull": "^1.0.3" + } + }, + "mat4-decompose": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mat4-decompose/-/mat4-decompose-1.0.4.tgz", + "integrity": "sha1-ZetP451wh496RE60Yk1S9+frL68=", + "requires": { + "gl-mat4": "^1.0.1", + "gl-vec3": "^1.0.2" + } + }, + "mat4-interpolate": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mat4-interpolate/-/mat4-interpolate-1.0.4.tgz", + "integrity": "sha1-Vf/p6zw1KV4sDVqfdyXZBoqJ/3Q=", + "requires": { + "gl-mat4": "^1.0.1", + "gl-vec3": "^1.0.2", + "mat4-decompose": "^1.0.3", + "mat4-recompose": "^1.0.3", + "quat-slerp": "^1.0.0" + } + }, + "mat4-recompose": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mat4-recompose/-/mat4-recompose-1.0.4.tgz", + "integrity": "sha1-OVPCMP8kc9x3LuAUpSySXPgbDk0=", + "requires": { + "gl-mat4": "^1.0.1" + } + }, "math-expression-evaluator": { "version": "1.2.17", "resolved": "https://registry.npmjs.org/math-expression-evaluator/-/math-expression-evaluator-1.2.17.tgz", "integrity": "sha1-3oGf282E3M2PrlnGrreWFbnSZqw=" }, + "math-log2": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/math-log2/-/math-log2-1.0.1.tgz", + "integrity": "sha1-+4lBvl9evol55xjmJzsXjlhpRWU=" + }, "math-random": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.1.tgz", "integrity": "sha1-izqsWIuKZuSXXjzepn97sylgH6w=" }, + "matrix-camera-controller": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/matrix-camera-controller/-/matrix-camera-controller-2.1.3.tgz", + "integrity": "sha1-NeUmDMHNVQliunmfLY1OlLGjk3A=", + "requires": { + "binary-search-bounds": "^1.0.0", + "gl-mat4": "^1.1.2", + "gl-vec3": "^1.0.3", + "mat4-interpolate": "^1.0.3" + } + }, "md5.js": { "version": "1.3.4", "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.4.tgz", @@ -7628,15 +9796,30 @@ } }, "merge": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/merge/-/merge-1.2.0.tgz", - "integrity": "sha1-dTHjnUlJwoGma4xabgJl6LBYlNo=" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/merge/-/merge-1.2.1.tgz", + "integrity": "sha512-VjFo4P5Whtj4vsLzsYBu5ayHhoHJ0UqNm7ibvShmbmoz7tGi0vXaoJbGdB+GmDMLUdg8DpQXEIeVDAe8MaABvQ==" }, "merge-descriptors": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" }, + "merge-source-map": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.0.4.tgz", + "integrity": "sha1-pd5GU42uhNQRTMXqArR3KmNGcB8=", + "requires": { + "source-map": "^0.5.6" + }, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=" + } + } + }, "methods": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", @@ -7694,6 +9877,24 @@ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==" }, + "mini-store": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mini-store/-/mini-store-2.0.0.tgz", + "integrity": "sha512-EG0CuwpQmX+XL4QVS0kxNwHW5ftSbhygu1qxQH0pipugjnPkbvkalCdQbEihMwtQY6d3MTN+MS0q+aurs+RfLQ==", + "requires": { + "hoist-non-react-statics": "^2.3.1", + "prop-types": "^15.6.0", + "react-lifecycles-compat": "^3.0.4", + "shallowequal": "^1.0.2" + }, + "dependencies": { + "shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + } + } + }, "minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", @@ -7744,12 +9945,60 @@ "minimist": "0.0.8" } }, + "moment": { + "version": "2.24.0", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.24.0.tgz", + "integrity": "sha512-bV7f+6l2QigeBBZSM/6yTNq4P2fNpSWj/0e7jQcy87A8e7o2nAfP/34/2ky5Vw4B9S446EtIhodAzkFCcR4dQg==" + }, + "monotone-convex-hull-2d": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/monotone-convex-hull-2d/-/monotone-convex-hull-2d-1.0.1.tgz", + "integrity": "sha1-R/Xa6t88Sv03dkuqGqh4ekDu4Iw=", + "requires": { + "robust-orientation": "^1.1.3" + } + }, "moo": { "version": "0.4.3", "resolved": "https://registry.npmjs.org/moo/-/moo-0.4.3.tgz", "integrity": "sha512-gFD2xGCl8YFgGHsqJ9NKRVdwlioeW3mI1iqfLNYQOv0+6JRwG58Zk9DIGQgyIaffSYaO1xsKnMaYzzNr1KyIAw==", "dev": true }, + "mouse-change": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/mouse-change/-/mouse-change-1.4.0.tgz", + "integrity": "sha1-wrd+W/o0pDzhRFyBV6Tk3JiVwU8=", + "requires": { + "mouse-event": "^1.0.0" + } + }, + "mouse-event": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/mouse-event/-/mouse-event-1.0.5.tgz", + "integrity": "sha1-s3ie23EJmX1aky0dAdqhVDpQFzI=" + }, + "mouse-event-offset": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mouse-event-offset/-/mouse-event-offset-3.0.2.tgz", + "integrity": "sha1-39hqbiSMa6jK1TuQXVA3ogY+mYQ=" + }, + "mouse-wheel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mouse-wheel/-/mouse-wheel-1.2.0.tgz", + "integrity": "sha1-bSkDseqPtI5h8bU7kDZ3PwQs21w=", + "requires": { + "right-now": "^1.0.0", + "signum": "^1.0.0", + "to-px": "^1.0.1" + }, + "dependencies": { + "signum": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/signum/-/signum-1.0.0.tgz", + "integrity": "sha1-dKfSvyogtA66FqkrFSEk8dVZ+nc=" + } + } + }, "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", @@ -7769,6 +10018,24 @@ "resolved": "https://registry.npmjs.org/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz", "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE=" }, + "mumath": { + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/mumath/-/mumath-3.3.4.tgz", + "integrity": "sha1-SNSg8P2MrU57Mglu6JsWGmPTC78=", + "requires": { + "almost-equal": "^1.1.0" + } + }, + "murmurhash-js": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/murmurhash-js/-/murmurhash-js-1.0.0.tgz", + "integrity": "sha1-sGJ44h/Gw3+lMTcysEEry2rhX1E=" + }, + "mutationobserver-shim": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/mutationobserver-shim/-/mutationobserver-shim-0.3.3.tgz", + "integrity": "sha512-gciOLNN8Vsf7YzcqRjKzlAJ6y7e+B86u7i3KXes0xfxx/nfLmozlW1Vn+Sc9x3tPIePFgc1AeIFhtRgkqTjzDQ==" + }, "mute-stream": { "version": "0.0.7", "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.7.tgz", @@ -7804,6 +10071,98 @@ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=" }, + "ndarray": { + "version": "1.0.18", + "resolved": "https://registry.npmjs.org/ndarray/-/ndarray-1.0.18.tgz", + "integrity": "sha1-tg06cyJOxVXQ+qeXEeUCRI/T95M=", + "requires": { + "iota-array": "^1.0.0", + "is-buffer": "^1.0.2" + } + }, + "ndarray-extract-contour": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/ndarray-extract-contour/-/ndarray-extract-contour-1.0.1.tgz", + "integrity": "sha1-Cu4ROjozsia5DEiIz4d79HUTBeQ=", + "requires": { + "typedarray-pool": "^1.0.0" + } + }, + "ndarray-fill": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/ndarray-fill/-/ndarray-fill-1.0.2.tgz", + "integrity": "sha1-owpg9xiODJWC/N1YiWrNy1IqHtY=", + "requires": { + "cwise": "^1.0.10" + } + }, + "ndarray-gradient": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/ndarray-gradient/-/ndarray-gradient-1.0.0.tgz", + "integrity": "sha1-t0kaUVxqZJ8ZpiMk//byf8jCU5M=", + "requires": { + "cwise-compiler": "^1.0.0", + "dup": "^1.0.0" + } + }, + "ndarray-homography": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/ndarray-homography/-/ndarray-homography-1.0.0.tgz", + "integrity": "sha1-w1UW6oa8KGK06ASiNqJwcwn+KWs=", + "requires": { + "gl-matrix-invert": "^1.0.0", + "ndarray-warp": "^1.0.0" + } + }, + "ndarray-linear-interpolate": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/ndarray-linear-interpolate/-/ndarray-linear-interpolate-1.0.0.tgz", + "integrity": "sha1-eLySuFuavBW25n7mWCj54hN65ys=" + }, + "ndarray-ops": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/ndarray-ops/-/ndarray-ops-1.2.2.tgz", + "integrity": "sha1-WeiNLDKn7ryxvGkPrhQVeVV6YU4=", + "requires": { + "cwise-compiler": "^1.0.0" + } + }, + "ndarray-pack": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ndarray-pack/-/ndarray-pack-1.2.1.tgz", + "integrity": "sha1-jK6+qqJNXs9w/4YCBjeXfajuWFo=", + "requires": { + "cwise-compiler": "^1.1.2", + "ndarray": "^1.0.13" + } + }, + "ndarray-scratch": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/ndarray-scratch/-/ndarray-scratch-1.2.0.tgz", + "integrity": "sha1-YwRjbWLrqT20cnrBPGkzQdulDgE=", + "requires": { + "ndarray": "^1.0.14", + "ndarray-ops": "^1.2.1", + "typedarray-pool": "^1.0.2" + } + }, + "ndarray-sort": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/ndarray-sort/-/ndarray-sort-1.0.1.tgz", + "integrity": "sha1-/qBbTLg0x/TgIWo1TzynUTAN/Wo=", + "requires": { + "typedarray-pool": "^1.0.0" + } + }, + "ndarray-warp": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/ndarray-warp/-/ndarray-warp-1.0.1.tgz", + "integrity": "sha1-qKElqqu6C+v5O9bKg+ar1oIqNOA=", + "requires": { + "cwise": "^1.0.4", + "ndarray-linear-interpolate": "^1.0.0" + } + }, "nearley": { "version": "2.15.1", "resolved": "https://registry.npmjs.org/nearley/-/nearley-2.15.1.tgz", @@ -7832,6 +10191,14 @@ "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" }, + "nextafter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/nextafter/-/nextafter-1.0.0.tgz", + "integrity": "sha1-t9d7U1MQ4+CX5gJauwqQNHfsGjo=", + "requires": { + "double-bits": "^1.1.0" + } + }, "no-case": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/no-case/-/no-case-2.3.2.tgz", @@ -7949,6 +10316,11 @@ "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=" }, + "normalize-svg-path": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/normalize-svg-path/-/normalize-svg-path-0.1.0.tgz", + "integrity": "sha1-RWNg5g7Odfvve11+FgSA5//Rb+U=" + }, "normalize-url": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-1.9.1.tgz", @@ -7976,6 +10348,11 @@ } } }, + "normals": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/normals/-/normals-1.1.0.tgz", + "integrity": "sha1-MltZXtNK/kZ6bFWhT9kIV4f/WcA=" + }, "npm": { "version": "6.4.1", "resolved": "https://registry.npmjs.org/npm/-/npm-6.4.1.tgz", @@ -11079,11 +13456,24 @@ "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz", "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=" }, + "number-is-integer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-integer/-/number-is-integer-1.0.1.tgz", + "integrity": "sha1-5ZvKFy/+0nMY55x862y3LAlbIVI=", + "requires": { + "is-finite": "^1.0.1" + } + }, "number-is-nan": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" }, + "numeric": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/numeric/-/numeric-1.2.6.tgz", + "integrity": "sha1-dlsCvvl5iPz4gNTrPza4D6MTNao=" + }, "nwmatcher": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/nwmatcher/-/nwmatcher-1.4.4.tgz", @@ -11135,8 +13525,7 @@ "object-inspect": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.6.0.tgz", - "integrity": "sha512-GJzfBZ6DgDAmnuaM3104jR4s1Myxr3Y3zfIyN4z3UdqN69oSRacNK8UhnobDdC+7J2AHCjGwxQubNJfE70SXXQ==", - "dev": true + "integrity": "sha512-GJzfBZ6DgDAmnuaM3104jR4s1Myxr3Y3zfIyN4z3UdqN69oSRacNK8UhnobDdC+7J2AHCjGwxQubNJfE70SXXQ==" }, "object-is": { "version": "1.0.1", @@ -11215,6 +13604,14 @@ "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" }, + "omit.js": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/omit.js/-/omit.js-1.0.0.tgz", + "integrity": "sha512-O1rwbvEfAdhtonTv+v6IQeMOKTi/wlHcXpI3hehyPDlujkjSBQC6Vtzg0mdy+v2KVDmuPf7hAbHlTBM6q1bUHQ==", + "requires": { + "babel-runtime": "^6.23.0" + } + }, "on-finished": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", @@ -11252,6 +13649,11 @@ "is-wsl": "^1.1.0" } }, + "optical-properties": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/optical-properties/-/optical-properties-1.0.0.tgz", + "integrity": "sha512-XnBQYbIIzDVr7U3L7d3xyAEqp1W+HTkqmw/G4L/Ae/+dq57bT1jqW2uDwV0wCUzO8gsTDIZhGQsGrMb17VSkEA==" + }, "optimist": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", @@ -11281,6 +13683,15 @@ "wordwrap": "~1.0.0" } }, + "orbit-camera-controller": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/orbit-camera-controller/-/orbit-camera-controller-4.0.0.tgz", + "integrity": "sha1-bis28OeHhmPDMPUNqbfOaGwncAU=", + "requires": { + "filtered-vector": "^1.2.1", + "gl-mat4": "^1.0.3" + } + }, "original": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/original/-/original-1.0.0.tgz", @@ -11365,6 +13776,14 @@ "semver": "^5.1.0" } }, + "pad-left": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/pad-left/-/pad-left-1.0.2.tgz", + "integrity": "sha1-GeVzXqmDlaJs7carkm6tEPMQDUw=", + "requires": { + "repeat-string": "^1.3.0" + } + }, "pako": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.6.tgz", @@ -11378,6 +13797,11 @@ "no-case": "^2.2.0" } }, + "parenthesis": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/parenthesis/-/parenthesis-3.1.5.tgz", + "integrity": "sha512-9KbfUp3+gD0MIl4AGfLBwVNvcPf1fokUJtYxql511chVNnS8DrYFazqBfZDqD4GV76XUhQbbxmZJPPOsV4GIbw==" + }, "parse-asn1": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.1.tgz", @@ -11414,6 +13838,24 @@ "resolved": "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz", "integrity": "sha1-bVuTSkVpk7I9N/QKOC1vFmao5cY=" }, + "parse-rect": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/parse-rect/-/parse-rect-1.2.0.tgz", + "integrity": "sha512-4QZ6KYbnE6RTwg9E0HpLchUM9EZt6DnDxajFZZDSV4p/12ZJEvPO702DZpGvRYEPo00yKDys7jASi+/w7aO8LA==", + "requires": { + "pick-by-alias": "^1.2.0" + } + }, + "parse-svg-path": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/parse-svg-path/-/parse-svg-path-0.1.2.tgz", + "integrity": "sha1-en7A0esG+lMlx9PgCbhZoJtdSes=" + }, + "parse-unit": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parse-unit/-/parse-unit-1.0.1.tgz", + "integrity": "sha1-fhu21b7zh0wo45JSaiVBFwKR7s8=" + }, "parse5": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/parse5/-/parse5-1.5.1.tgz", @@ -11489,6 +13931,15 @@ "pinkie-promise": "^2.0.0" } }, + "pbf": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pbf/-/pbf-3.1.0.tgz", + "integrity": "sha512-/hYJmIsTmh7fMkHAWWXJ5b8IKLWdjdlAFb3IHkRBn1XUhIYBChVGfVwmHEAV3UfXTxsP/AKfYTXTS/dCPxJd5w==", + "requires": { + "ieee754": "^1.1.6", + "resolve-protobuf-schema": "^2.0.0" + } + }, "pbkdf2": { "version": "3.0.16", "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.16.tgz", @@ -11506,6 +13957,28 @@ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, + "permutation-parity": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/permutation-parity/-/permutation-parity-1.0.0.tgz", + "integrity": "sha1-AXTVH8pwSxG5pLFSsj1Tf9xrXvQ=", + "requires": { + "typedarray-pool": "^1.0.0" + } + }, + "permutation-rank": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/permutation-rank/-/permutation-rank-1.0.0.tgz", + "integrity": "sha1-n9mLvOzwj79ZlLXq3JSmLmeUg7U=", + "requires": { + "invert-permutation": "^1.0.0", + "typedarray-pool": "^1.0.0" + } + }, + "pick-by-alias": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pick-by-alias/-/pick-by-alias-1.2.0.tgz", + "integrity": "sha1-X3yysfIabh6ISgyHhVqko3NhEHs=" + }, "pify": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", @@ -11532,11 +14005,155 @@ "find-up": "^2.1.0" } }, + "planar-dual": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/planar-dual/-/planar-dual-1.0.2.tgz", + "integrity": "sha1-tqQjVSOxsMt55fkm+OozXdmC1WM=", + "requires": { + "compare-angle": "^1.0.0", + "dup": "^1.0.0" + } + }, + "planar-graph-to-polyline": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/planar-graph-to-polyline/-/planar-graph-to-polyline-1.0.5.tgz", + "integrity": "sha1-iCuGBRmbqIv9RkyVUzA1VsUrmIo=", + "requires": { + "edges-to-adjacency-list": "^1.0.0", + "planar-dual": "^1.0.0", + "point-in-big-polygon": "^2.0.0", + "robust-orientation": "^1.0.1", + "robust-sum": "^1.0.0", + "two-product": "^1.0.0", + "uniq": "^1.0.0" + } + }, + "plotly.js": { + "version": "1.42.5", + "resolved": "https://registry.npmjs.org/plotly.js/-/plotly.js-1.42.5.tgz", + "integrity": "sha512-8CNLc4KGOv6Vjwm4tKVTSVNbBb2P2j7wqCrkSFNtKUA7iFkharaheY63lyg+/dZH4apucHYje/Yrh6eUPlF3GA==", + "requires": { + "3d-view": "^2.0.0", + "@plotly/d3-sankey": "^0.5.1", + "alpha-shape": "^1.0.0", + "array-range": "^1.0.1", + "canvas-fit": "^1.5.0", + "color-normalize": "^1.3.0", + "convex-hull": "^1.0.3", + "country-regex": "^1.1.0", + "d3": "^3.5.12", + "d3-force": "^1.0.6", + "delaunay-triangulate": "^1.1.6", + "es6-promise": "^3.0.2", + "fast-isnumeric": "^1.1.2", + "font-atlas-sdf": "^1.3.3", + "gl-cone3d": "^1.2.0", + "gl-contour2d": "^1.1.4", + "gl-error3d": "^1.0.8", + "gl-heatmap2d": "^1.0.4", + "gl-line3d": "^1.1.5", + "gl-mat4": "^1.2.0", + "gl-mesh3d": "^2.0.1", + "gl-plot2d": "^1.3.1", + "gl-plot3d": "^1.5.10", + "gl-pointcloud2d": "^1.0.1", + "gl-scatter3d": "^1.0.14", + "gl-select-box": "^1.0.2", + "gl-spikes2d": "^1.0.1", + "gl-streamtube3d": "^1.1.0", + "gl-surface3d": "^1.3.6", + "gl-text": "^1.1.6", + "glslify": "^6.3.1", + "has-hover": "^1.0.1", + "has-passive-events": "^1.0.0", + "mapbox-gl": "0.45.0", + "matrix-camera-controller": "^2.1.3", + "mouse-change": "^1.4.0", + "mouse-event-offset": "^3.0.2", + "mouse-wheel": "^1.0.2", + "ndarray": "^1.0.18", + "ndarray-fill": "^1.0.2", + "ndarray-homography": "^1.0.0", + "ndarray-ops": "^1.2.2", + "point-cluster": "^3.1.4", + "polybooljs": "^1.2.0", + "regl": "^1.3.7", + "regl-error2d": "^2.0.5", + "regl-line2d": "^3.0.12", + "regl-scatter2d": "^3.0.6", + "regl-splom": "^1.0.4", + "right-now": "^1.0.0", + "robust-orientation": "^1.1.3", + "sane-topojson": "^2.0.0", + "strongly-connected-components": "^1.0.1", + "superscript-text": "^1.0.0", + "svg-path-sdf": "^1.1.2", + "tinycolor2": "^1.3.0", + "topojson-client": "^2.1.0", + "webgl-context": "^2.2.0", + "world-calendars": "^1.0.3" + }, + "dependencies": { + "es6-promise": { + "version": "3.3.1", + "resolved": "http://registry.npmjs.org/es6-promise/-/es6-promise-3.3.1.tgz", + "integrity": "sha1-oIzd6EzNvzTQJ6FFG8kdS80ophM=" + } + } + }, "pluralize": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-7.0.0.tgz", "integrity": "sha512-ARhBOdzS3e41FbkW/XWrTEtukqqLoK5+Z/4UeDaLuSW+39JPeFgs4gCGqsrJHVZX0fUrx//4OF0K1CUGwlIFow==" }, + "point-cluster": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/point-cluster/-/point-cluster-3.1.4.tgz", + "integrity": "sha512-jVjzC1vYoZlvcLWi170i41he5LhJTncOgFPaZx1uoqNn+8q+24xjLS9yG68XfN6/U1F52kliD6a3oXjJduerTQ==", + "requires": { + "array-bounds": "^1.0.1", + "array-normalize": "^1.1.3", + "binary-search-bounds": "^2.0.4", + "bubleify": "^1.1.0", + "clamp": "^1.0.1", + "dtype": "^2.0.0", + "flatten-vertex-data": "^1.0.0", + "is-obj": "^1.0.1", + "math-log2": "^1.0.1", + "parse-rect": "^1.2.0" + }, + "dependencies": { + "binary-search-bounds": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/binary-search-bounds/-/binary-search-bounds-2.0.4.tgz", + "integrity": "sha512-2hg5kgdKql5ClF2ErBcSx0U5bnl5hgS4v7wMnLFodyR47yMtj2w+UAZB+0CiqyHct2q543i7Bi4/aMIegorCCg==" + } + } + }, + "point-in-big-polygon": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/point-in-big-polygon/-/point-in-big-polygon-2.0.0.tgz", + "integrity": "sha1-ObYT6mzxfWtD4Yj3fzTETGszulU=", + "requires": { + "binary-search-bounds": "^1.0.0", + "interval-tree-1d": "^1.0.1", + "robust-orientation": "^1.1.3", + "slab-decomposition": "^1.0.1" + } + }, + "polybooljs": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/polybooljs/-/polybooljs-1.2.0.tgz", + "integrity": "sha1-tDkMLgedTCYtOyUExiiNlbp6R1g=" + }, + "polytope-closest-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/polytope-closest-point/-/polytope-closest-point-1.0.0.tgz", + "integrity": "sha1-5uV/QIGrXox3i4Ee8G4sSK4zjD8=", + "requires": { + "numeric": "^1.2.6" + } + }, "portfinder": { "version": "1.0.13", "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.13.tgz", @@ -12719,6 +15336,11 @@ "resolved": "https://registry.npmjs.org/preserve/-/preserve-0.2.0.tgz", "integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks=" }, + "prettier": { + "version": "1.16.4", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.16.4.tgz", + "integrity": "sha512-ZzWuos7TI5CKUeQAtFd6Zhm2s6EpAD/ZLApIhsF9pRvRtM1RFo61dM/4MSRUA0SuLugA/zgrZD8m0BaY46Og7g==" + }, "pretty-bytes": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-4.0.2.tgz", @@ -12794,6 +15416,11 @@ "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-1.5.1.tgz", "integrity": "sha512-CGuc0VUTGthpJXL36ydB6jnbyOf/rAHFvmVrJlH+Rg0DqqLFQGAP6hIaxD/G0OAmBJPhXDHuEJigrp0e0wFV6g==" }, + "protocol-buffers-schema": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/protocol-buffers-schema/-/protocol-buffers-schema-3.3.2.tgz", + "integrity": "sha512-Xdayp8sB/mU+sUV4G7ws8xtYMGdQnxbeIfLjyO9TZZRJdztBGhlmbI5x1qcY4TG5hBkIKGnc28i7nXxaugu88w==" + }, "proxy-addr": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.3.tgz", @@ -12840,6 +15467,14 @@ "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" }, + "quat-slerp": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/quat-slerp/-/quat-slerp-1.0.1.tgz", + "integrity": "sha1-K6oVzjprvcMkHZcusXKDE57Wnyk=", + "requires": { + "gl-quat": "^1.0.0" + } + }, "querystring": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", @@ -12855,6 +15490,65 @@ "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-0.0.4.tgz", "integrity": "sha1-DPf4T5Rj/wrlHExLFC2VvjdyTZw=" }, + "quickselect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/quickselect/-/quickselect-1.1.1.tgz", + "integrity": "sha512-qN0Gqdw4c4KGPsBOQafj6yj/PA6c/L63f6CaZ/DCF/xF4Esu3jVmKLUDYxghFx8Kb/O7y9tI7x2RjTSXwdK1iQ==" + }, + "quote-stream": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/quote-stream/-/quote-stream-0.0.0.tgz", + "integrity": "sha1-zeKelMQJsW4Z3HCYuJtmWPlyHTs=", + "requires": { + "minimist": "0.0.8", + "through2": "~0.4.1" + }, + "dependencies": { + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + }, + "object-keys": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-0.4.0.tgz", + "integrity": "sha1-KKaq50KN0sOpLz2V8hM13SBOAzY=" + }, + "readable-stream": { + "version": "1.0.34", + "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz", + "integrity": "sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw=", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" + }, + "through2": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/through2/-/through2-0.4.2.tgz", + "integrity": "sha1-2/WGYDEVHsg1K7bE22SiKSqEC5s=", + "requires": { + "readable-stream": "~1.0.17", + "xtend": "~2.1.1" + } + }, + "xtend": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-2.1.2.tgz", + "integrity": "sha1-bv7MKk2tjmlixJAbM3znuoe10os=", + "requires": { + "object-keys": "~0.4.0" + } + } + } + }, "radium": { "version": "0.19.6", "resolved": "https://registry.npmjs.org/radium/-/radium-0.19.6.tgz", @@ -12929,6 +15623,14 @@ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4=" }, + "rat-vec": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/rat-vec/-/rat-vec-1.1.1.tgz", + "integrity": "sha1-Dd4rZrezS7G80qI4BerIBth/0X8=", + "requires": { + "big-rat": "^1.0.3" + } + }, "raw-body": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.2.tgz", @@ -12968,33 +15670,655 @@ } } }, - "rc": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.7.tgz", - "integrity": "sha512-LdLD8xD4zzLsAT5xyushXDNscEjB7+2ulnl8+r1pnESlYtlJtVSoCMBGr30eDRJ3+2Gq89jK9P9e4tCEH1+ywA==", + "rc": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.7.tgz", + "integrity": "sha512-LdLD8xD4zzLsAT5xyushXDNscEjB7+2ulnl8+r1pnESlYtlJtVSoCMBGr30eDRJ3+2Gq89jK9P9e4tCEH1+ywA==", + "requires": { + "deep-extend": "^0.5.1", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + } + } + }, + "rc-align": { + "version": "2.4.5", + "resolved": "https://registry.npmjs.org/rc-align/-/rc-align-2.4.5.tgz", + "integrity": "sha512-nv9wYUYdfyfK+qskThf4BQUSIadeI/dCsfaMZfNEoxm9HwOIioQ+LyqmMK6jWHAZQgOzMLaqawhuBXlF63vgjw==", + "requires": { + "babel-runtime": "^6.26.0", + "dom-align": "^1.7.0", + "prop-types": "^15.5.8", + "rc-util": "^4.0.4" + } + }, + "rc-animate": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/rc-animate/-/rc-animate-2.6.0.tgz", + "integrity": "sha512-JXDycchgbOI+7T/VKmFWnAIn042LLScK1fNkmNunb0jz5q5aPGCAybx2bTo7X5t31Jkj9OsxKNb/vZPDPWufCg==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.6", + "css-animation": "^1.3.2", + "prop-types": "15.x", + "raf": "^3.4.0", + "react-lifecycles-compat": "^3.0.4" + } + }, + "rc-calendar": { + "version": "9.10.10", + "resolved": "https://registry.npmjs.org/rc-calendar/-/rc-calendar-9.10.10.tgz", + "integrity": "sha512-WFnxpXGzIt2cPCJjFmrju/w2jZHAO9jW3JSDZovaJuBtVciu1p8brL6PSjWCo4flD3jVurL9LO8tJwgajELj2w==", + "requires": { + "babel-runtime": "6.x", + "classnames": "2.x", + "moment": "2.x", + "prop-types": "^15.5.8", + "rc-trigger": "^2.2.0", + "rc-util": "^4.1.1", + "react-lifecycles-compat": "^3.0.4" + } + }, + "rc-cascader": { + "version": "0.17.1", + "resolved": "https://registry.npmjs.org/rc-cascader/-/rc-cascader-0.17.1.tgz", + "integrity": "sha512-JED1iOLpj1+uob+0Asd4zwhhMRp3gLs2iYOY2/0OsdEsPc8Qj6TUwj8+isVtqyXiwGWG3vo8XgO6KCM/i7ZFqQ==", + "requires": { + "array-tree-filter": "^2.1.0", + "prop-types": "^15.5.8", + "rc-trigger": "^2.2.0", + "rc-util": "^4.0.4", + "react-lifecycles-compat": "^3.0.4", + "shallow-equal": "^1.0.0", + "warning": "^4.0.1" + }, + "dependencies": { + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } + } + }, + "rc-checkbox": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/rc-checkbox/-/rc-checkbox-2.1.6.tgz", + "integrity": "sha512-+VxQbt2Cwe1PxCvwosrAYXT6EQeGwrbLJB2K+IPGCSRPCKnk9zcub/0eW8A4kxjyyfh60PkwsAUZ7qmB31OmRA==", + "requires": { + "babel-runtime": "^6.23.0", + "classnames": "2.x", + "prop-types": "15.x", + "rc-util": "^4.0.4" + } + }, + "rc-collapse": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/rc-collapse/-/rc-collapse-1.11.1.tgz", + "integrity": "sha512-9HA8f7aWE0yabnzfE2v/7IyMb6dTmj052A9cyEMB0aT1sdLESpetMAzT3FkLcPT5fl7YNRkyVZ3zwkC5qMmzmA==", + "requires": { + "classnames": "2.x", + "css-animation": "1.x", + "prop-types": "^15.5.6", + "rc-animate": "2.x", + "react-is": "^16.7.0", + "shallowequal": "^1.1.0" + }, + "dependencies": { + "react-is": { + "version": "16.8.5", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.8.5.tgz", + "integrity": "sha512-sudt2uq5P/2TznPV4Wtdi+Lnq3yaYW8LfvPKLM9BKD8jJNBkxMVyB0C9/GmVhLw7Jbdmndk/73n7XQGeN9A3QQ==" + }, + "shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + } + } + }, + "rc-dialog": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/rc-dialog/-/rc-dialog-7.3.0.tgz", + "integrity": "sha512-YLQHqZuU0cO02LUwhCsCCtvSw24SKLrT4DkNHCNGGcH9YpZP/IOFaH4zVUmXGEQiwyt0D1f3volHthMCKzLzMg==", + "requires": { + "babel-runtime": "6.x", + "rc-animate": "2.x", + "rc-util": "^4.4.0" + } + }, + "rc-drawer": { + "version": "1.7.7", + "resolved": "https://registry.npmjs.org/rc-drawer/-/rc-drawer-1.7.7.tgz", + "integrity": "sha512-7dESNkClYdWGSdBdwcfeOz6DUCqzrW44QT013fsTBJIiWNLSLgDV5KoHKXG8VTJWU4mBn7M5Lqgyr94CRZcxGA==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.5", + "prop-types": "^15.5.0", + "rc-util": "^4.5.1" + } + }, + "rc-dropdown": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/rc-dropdown/-/rc-dropdown-2.4.1.tgz", + "integrity": "sha512-p0XYn0wrOpAZ2fUGE6YJ6U8JBNc5ASijznZ6dkojdaEfQJAeZtV9KMEewhxkVlxGSbbdXe10ptjBlTEW9vEwEg==", + "requires": { + "babel-runtime": "^6.26.0", + "classnames": "^2.2.6", + "prop-types": "^15.5.8", + "rc-trigger": "^2.5.1", + "react-lifecycles-compat": "^3.0.2" + } + }, + "rc-editor-core": { + "version": "0.8.9", + "resolved": "https://registry.npmjs.org/rc-editor-core/-/rc-editor-core-0.8.9.tgz", + "integrity": "sha512-fGTkTm96Kil/i9n5a3JwAzJcl2TkfjO1r1WBWf6NIOxXiJXpC3Lajkf3j6E5K7iz5AW0QRaSGnNQFBrwvXKKWA==", + "requires": { + "babel-runtime": "^6.26.0", + "classnames": "^2.2.5", + "draft-js": "^0.10.0", + "immutable": "^3.7.4", + "lodash": "^4.16.5", + "prop-types": "^15.5.8", + "setimmediate": "^1.0.5" + } + }, + "rc-editor-mention": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/rc-editor-mention/-/rc-editor-mention-1.1.12.tgz", + "integrity": "sha512-cPm2rQ7P+hXaKMsO0ajVv08QlTDcSPVtw8/lVr9D+QzQKRPChCqLw9rVGOa4YGYTeS3gVe8lBfLr8a9JKFk3gA==", + "requires": { + "babel-runtime": "^6.23.0", + "classnames": "^2.2.5", + "dom-scroll-into-view": "^1.2.0", + "draft-js": "~0.10.0", + "immutable": "^3.7.4", + "prop-types": "^15.5.8", + "rc-animate": "^2.3.0", + "rc-editor-core": "~0.8.3" + } + }, + "rc-form": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/rc-form/-/rc-form-2.4.3.tgz", + "integrity": "sha512-59KeQat5TU4YzpfXYpFlyQ1/5uFXm0SV7VokRr+i8bPMhimpKpZl5gt0J7dNiKLTsGnkCqBLSL88d9ufPJ+EQQ==", + "requires": { + "async-validator": "~1.8.5", + "babel-runtime": "6.x", + "create-react-class": "^15.5.3", + "dom-scroll-into-view": "1.x", + "hoist-non-react-statics": "^3.3.0", + "lodash": "^4.17.4", + "warning": "^4.0.3" + }, + "dependencies": { + "hoist-non-react-statics": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.0.tgz", + "integrity": "sha512-0XsbTXxgiaCDYDIWFcwkmerZPSwywfUqYmwT4jzewKTQSWoE6FCMoUVOeBJWK3E/CrWbxRG3m5GzY4lnIwGRBA==", + "requires": { + "react-is": "^16.7.0" + } + }, + "react-is": { + "version": "16.8.5", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.8.5.tgz", + "integrity": "sha512-sudt2uq5P/2TznPV4Wtdi+Lnq3yaYW8LfvPKLM9BKD8jJNBkxMVyB0C9/GmVhLw7Jbdmndk/73n7XQGeN9A3QQ==" + }, + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } + } + }, + "rc-hammerjs": { + "version": "0.6.9", + "resolved": "https://registry.npmjs.org/rc-hammerjs/-/rc-hammerjs-0.6.9.tgz", + "integrity": "sha512-4llgWO3RgLyVbEqUdGsDfzUDqklRlQW5VEhE3x35IvhV+w//VPRG34SBavK3D2mD/UaLKaohgU41V4agiftC8g==", + "requires": { + "babel-runtime": "6.x", + "hammerjs": "^2.0.8", + "prop-types": "^15.5.9" + } + }, + "rc-input-number": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/rc-input-number/-/rc-input-number-4.4.0.tgz", + "integrity": "sha512-AsXLVaQZ7rCU71B8zzP3nviL8/CkFGDcp5kIlpMzBdGIHoLyRnXcxei3itH9PfFSgMBixEnb5hFVoTikFbNWSQ==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.0", + "is-negative-zero": "^2.0.0", + "prop-types": "^15.5.7", + "rc-util": "^4.5.1", + "rmc-feedback": "^2.0.0" + } + }, + "rc-menu": { + "version": "7.4.21", + "resolved": "https://registry.npmjs.org/rc-menu/-/rc-menu-7.4.21.tgz", + "integrity": "sha512-TfcwybKLuw2WhEkplYH7iFMGlDbH6KhPcd+gv5J2oLQcgiGeUECzyOWSVaFRRlkpB7g2eNzXbha/AXN/Xyzvnw==", + "requires": { + "babel-runtime": "6.x", + "classnames": "2.x", + "dom-scroll-into-view": "1.x", + "ismobilejs": "^0.5.1", + "mini-store": "^2.0.0", + "mutationobserver-shim": "^0.3.2", + "prop-types": "^15.5.6", + "rc-animate": "2.x", + "rc-trigger": "^2.3.0", + "rc-util": "^4.1.0", + "resize-observer-polyfill": "^1.5.0" + } + }, + "rc-notification": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/rc-notification/-/rc-notification-3.3.1.tgz", + "integrity": "sha512-U5+f4BmBVfMSf3OHSLyRagsJ74yKwlrQAtbbL5ijoA0F2C60BufwnOcHG18tVprd7iaIjzZt1TKMmQSYSvgrig==", + "requires": { + "babel-runtime": "6.x", + "classnames": "2.x", + "prop-types": "^15.5.8", + "rc-animate": "2.x", + "rc-util": "^4.0.4" + } + }, + "rc-pagination": { + "version": "1.17.11", + "resolved": "https://registry.npmjs.org/rc-pagination/-/rc-pagination-1.17.11.tgz", + "integrity": "sha512-pwtRmEgoPkS6goSd0QXGai541+hmToMYAnlKYa/Z/0rKmrPN9NJiLsYzFVH8rypDyA/KVpC9xU8Xqm5zeSIaww==", + "requires": { + "babel-runtime": "6.x", + "prop-types": "^15.5.7", + "react-lifecycles-compat": "^3.0.4" + } + }, + "rc-progress": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/rc-progress/-/rc-progress-2.3.0.tgz", + "integrity": "sha512-hYBKFSsNgD7jsF8j+ZC1J8y5UIC2X/ktCYI/OQhQNSX6mGV1IXnUCjAd9gbLmzmpChPvKyymRNfckScUNiTpFQ==", + "requires": { + "babel-runtime": "6.x", + "prop-types": "^15.5.8" + } + }, + "rc-rate": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/rc-rate/-/rc-rate-2.5.0.tgz", + "integrity": "sha512-aXX5klRqbVZxvLghcKnLqqo7LvLVCHswEDteWsm5Gb7NBIPa1YKTcAbvb5SZ4Z4i4EeRoZaPwygRAWsQgGtbKw==", + "requires": { + "classnames": "^2.2.5", + "prop-types": "^15.5.8", + "rc-util": "^4.3.0", + "react-lifecycles-compat": "^3.0.4" + } + }, + "rc-select": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/rc-select/-/rc-select-9.0.2.tgz", + "integrity": "sha512-lwFz/aINmbznQmKvq/jFipc922h+RhA+iKCicxAglTqC4qmXg2REKWzviT5Tk0kqVe4mHcfNX8PyvMEHSmkaLA==", + "requires": { + "babel-runtime": "^6.23.0", + "classnames": "2.x", + "component-classes": "1.x", + "dom-scroll-into-view": "1.x", + "prop-types": "^15.5.8", + "raf": "^3.4.0", + "rc-animate": "2.x", + "rc-menu": "^7.3.0", + "rc-trigger": "^2.5.4", + "rc-util": "^4.0.4", + "react-lifecycles-compat": "^3.0.2", + "warning": "^4.0.2" + }, + "dependencies": { + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } + } + }, + "rc-slider": { + "version": "8.6.7", + "resolved": "https://registry.npmjs.org/rc-slider/-/rc-slider-8.6.7.tgz", + "integrity": "sha512-QIFWMnK1VLc4TtJSZJgjhI6UOhN8eg53EM2La+eRa8rSPZwJT3rIWfZnTZs7OV7zXG/AiLWN4G+oGxuMcEFpsg==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.5", + "prop-types": "^15.5.4", + "rc-tooltip": "^3.7.0", + "rc-util": "^4.0.4", + "shallowequal": "^1.0.1", + "warning": "^4.0.3" + }, + "dependencies": { + "shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } + } + }, + "rc-steps": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/rc-steps/-/rc-steps-3.3.1.tgz", + "integrity": "sha512-LGzmPYS9ETePo+6YbHlFukCdcKppeBZXO49ZxewaC7Cba00q0zrMXlexquZ4fm+9iz0IkpzwgmenvjsVWCmGOw==", + "requires": { + "babel-runtime": "^6.23.0", + "classnames": "^2.2.3", + "lodash": "^4.17.5", + "prop-types": "^15.5.7" + } + }, + "rc-switch": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/rc-switch/-/rc-switch-1.9.0.tgz", + "integrity": "sha512-Isas+egaK6qSk64jaEw4GgPStY4umYDbT7ZY93bZF1Af+b/JEsKsJdNOU2qG3WI0Z6tXo2DDq0kJCv8Yhu0zww==", + "requires": { + "classnames": "^2.2.1", + "prop-types": "^15.5.6", + "react-lifecycles-compat": "^3.0.4" + } + }, + "rc-table": { + "version": "6.4.3", + "resolved": "https://registry.npmjs.org/rc-table/-/rc-table-6.4.3.tgz", + "integrity": "sha512-4/f7mS87EtNxM2vhIaA7I1J8hPZ5OiOQwmjac7RJTmGOFVA8PJDGwEipeyU/eC9RM7f3v4Lc+a05KCfIbRU4tg==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.5", + "component-classes": "^1.2.6", + "lodash": "^4.17.5", + "mini-store": "^2.0.0", + "prop-types": "^15.5.8", + "rc-util": "^4.0.4", + "react-lifecycles-compat": "^3.0.2", + "shallowequal": "^1.0.2", + "warning": "^3.0.0" + }, + "dependencies": { + "shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + } + } + }, + "rc-tabs": { + "version": "9.6.3", + "resolved": "https://registry.npmjs.org/rc-tabs/-/rc-tabs-9.6.3.tgz", + "integrity": "sha512-f4GotOvzfzY4fqj/Y9Npt3pxyyHceyj06yss2uhNlAb+PW25tn22LxgGhhFVn2RyUXrt5WT26HPgtHx9R9sN3Q==", + "requires": { + "babel-runtime": "6.x", + "classnames": "2.x", + "create-react-context": "0.2.2", + "lodash": "^4.17.5", + "prop-types": "15.x", + "raf": "^3.4.1", + "rc-hammerjs": "~0.6.0", + "rc-util": "^4.0.4", + "resize-observer-polyfill": "^1.5.1", + "warning": "^3.0.0" + }, + "dependencies": { + "create-react-context": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/create-react-context/-/create-react-context-0.2.2.tgz", + "integrity": "sha512-KkpaLARMhsTsgp0d2NA/R94F/eDLbhXERdIq3LvX2biCAXcDvHYoOqHfWCHf1+OLj+HKBotLG3KqaOOf+C1C+A==", + "requires": { + "fbjs": "^0.8.0", + "gud": "^1.0.0" + } + }, + "raf": { + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz", + "integrity": "sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA==", + "requires": { + "performance-now": "^2.1.0" + } + } + } + }, + "rc-time-picker": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/rc-time-picker/-/rc-time-picker-3.6.2.tgz", + "integrity": "sha512-SyGEVXO0ImeG2mz+7fkVmDoVM0+OrX6uYGpKYijNr/lAah7c5p310ZR6fVrblXOl4TpqVnfWR67RMJ3twAyM7w==", + "requires": { + "classnames": "2.x", + "moment": "2.x", + "prop-types": "^15.5.8", + "rc-trigger": "^2.2.0" + } + }, + "rc-tooltip": { + "version": "3.7.3", + "resolved": "https://registry.npmjs.org/rc-tooltip/-/rc-tooltip-3.7.3.tgz", + "integrity": "sha512-dE2ibukxxkrde7wH9W8ozHKUO4aQnPZ6qBHtrTH9LoO836PjDdiaWO73fgPB05VfJs9FbZdmGPVEbXCeOP99Ww==", + "requires": { + "babel-runtime": "6.x", + "prop-types": "^15.5.8", + "rc-trigger": "^2.2.2" + } + }, + "rc-tree": { + "version": "1.14.10", + "resolved": "https://registry.npmjs.org/rc-tree/-/rc-tree-1.14.10.tgz", + "integrity": "sha512-iOn7+SpWzM4OQoF/7wJeFiuRpBGJ3ndTe6YVGnfIhsWqDd7S6a7z0anDQcBpPsW/PvisjNDXr4zKchZvx+0iCA==", + "requires": { + "babel-runtime": "^6.23.0", + "classnames": "2.x", + "prop-types": "^15.5.8", + "rc-animate": "^3.0.0-rc.5", + "rc-util": "^4.5.1", + "react-lifecycles-compat": "^3.0.4", + "warning": "^3.0.0" + }, + "dependencies": { + "rc-animate": { + "version": "3.0.0-rc.6", + "resolved": "https://registry.npmjs.org/rc-animate/-/rc-animate-3.0.0-rc.6.tgz", + "integrity": "sha512-oBLPpiT6Q4t6YvD/pkLcmofBP1p01TX0Otse8Q4+Mxt8J+VSDflLZGIgf62EwkvRwsQUkLPjZVFBsldnPKLzjg==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.5", + "component-classes": "^1.2.6", + "fbjs": "^0.8.16", + "prop-types": "15.x", + "raf": "^3.4.0", + "rc-util": "^4.5.0", + "react-lifecycles-compat": "^3.0.4" + } + } + } + }, + "rc-tree-select": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/rc-tree-select/-/rc-tree-select-2.6.1.tgz", + "integrity": "sha512-ZNGZMKIIwikgqvpbC8YJlv33OeGWlHiVbr42IXYTmVORoO3QpJuZPW95sF/Yhpjk86DgjuuM3HreoLPAOZCVQQ==", + "requires": { + "classnames": "^2.2.1", + "dom-scroll-into-view": "^1.2.1", + "prop-types": "^15.5.8", + "raf": "^3.4.0", + "rc-animate": "^3.0.0-rc.4", + "rc-tree": "~1.15.0", + "rc-trigger": "^3.0.0-rc.2", + "rc-util": "^4.5.0", + "react-lifecycles-compat": "^3.0.4", + "shallowequal": "^1.0.2", + "warning": "^4.0.1" + }, + "dependencies": { + "rc-animate": { + "version": "3.0.0-rc.6", + "resolved": "https://registry.npmjs.org/rc-animate/-/rc-animate-3.0.0-rc.6.tgz", + "integrity": "sha512-oBLPpiT6Q4t6YvD/pkLcmofBP1p01TX0Otse8Q4+Mxt8J+VSDflLZGIgf62EwkvRwsQUkLPjZVFBsldnPKLzjg==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.5", + "component-classes": "^1.2.6", + "fbjs": "^0.8.16", + "prop-types": "15.x", + "raf": "^3.4.0", + "rc-util": "^4.5.0", + "react-lifecycles-compat": "^3.0.4" + } + }, + "rc-tree": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/rc-tree/-/rc-tree-1.15.2.tgz", + "integrity": "sha512-VPXLA/GdV6U9N8evpl4rmjRsBkw5BoweqWjcVBVwYGzBtonNIFpdc+bnb7TDmd6S3mKOM7mXPbiSr2GKYdj4hA==", + "requires": { + "babel-runtime": "^6.23.0", + "classnames": "2.x", + "prop-types": "^15.5.8", + "rc-animate": "^3.0.0-rc.5", + "rc-util": "^4.5.1", + "react-lifecycles-compat": "^3.0.4", + "warning": "^3.0.0" + }, + "dependencies": { + "warning": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/warning/-/warning-3.0.0.tgz", + "integrity": "sha1-MuU3fLVy3kqwR1O9+IIcAe1gW3w=", + "requires": { + "loose-envify": "^1.0.0" + } + } + } + }, + "rc-trigger": { + "version": "3.0.0-rc.3", + "resolved": "https://registry.npmjs.org/rc-trigger/-/rc-trigger-3.0.0-rc.3.tgz", + "integrity": "sha512-4vB6cpxcUdm2qO5VtB9q1TZz0MoWm9BzFLvGknulphGrl1qI6uxUsPDCvqnmujdpDdAKGGfjxntFpA7RtAwkFQ==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.6", + "prop-types": "15.x", + "raf": "^3.4.0", + "rc-align": "^2.4.1", + "rc-animate": "^3.0.0-rc.1", + "rc-util": "^4.4.0" + } + }, + "shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } + } + }, + "rc-trigger": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/rc-trigger/-/rc-trigger-2.6.2.tgz", + "integrity": "sha512-op4xCu95/gdHVaysyxxiYxbY+Z+UcIBSUY9nQfLqm1FlitdtnAN+owD5iMPfnnsRXntgcQ5+RdYKNUFQT5DjzA==", "requires": { - "deep-extend": "^0.5.1", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" + "babel-runtime": "6.x", + "classnames": "^2.2.6", + "prop-types": "15.x", + "rc-align": "^2.4.0", + "rc-animate": "2.x", + "rc-util": "^4.4.0" + } + }, + "rc-upload": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rc-upload/-/rc-upload-2.6.3.tgz", + "integrity": "sha512-wM57UH/EEqW2/EcWz5nwnU07d4LHDHjBgxRin2Q56TW9JcFVnaQVq/JHycVFumsgSFV5CZfNW8PBROsKT9VFMw==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.5", + "prop-types": "^15.5.7", + "warning": "4.x" }, "dependencies": { - "minimist": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", - "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } } } }, + "rc-util": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/rc-util/-/rc-util-4.6.0.tgz", + "integrity": "sha512-rbgrzm1/i8mgfwOI4t1CwWK7wGe+OwX+dNa7PVMgxZYPBADGh86eD4OcJO1UKGeajIMDUUKMluaZxvgraQIOmw==", + "requires": { + "add-dom-event-listener": "^1.1.0", + "babel-runtime": "6.x", + "prop-types": "^15.5.10", + "shallowequal": "^0.2.2" + } + }, "react": { - "version": "16.3.2", - "resolved": "https://registry.npmjs.org/react/-/react-16.3.2.tgz", - "integrity": "sha512-o5GPdkhciQ3cEph6qgvYB7LTOHw/GB0qRI6ZFNugj49qJCFfgHwVNjZ5u+b7nif4vOeMIOuYj3CeYe2IBD74lg==", + "version": "16.5.2", + "resolved": "https://registry.npmjs.org/react/-/react-16.5.2.tgz", + "integrity": "sha512-FDCSVd3DjVTmbEAjUNX6FgfAmQ+ypJfHUsqUJOYNCBUp1h8lqmtC+0mXJ+JjsWx4KAVTkk1vKd1hLQPvEviSuw==", "requires": { - "fbjs": "^0.8.16", "loose-envify": "^1.1.0", "object-assign": "^4.1.1", - "prop-types": "^15.6.0" + "prop-types": "^15.6.2", + "schedule": "^0.5.0" + }, + "dependencies": { + "prop-types": { + "version": "15.6.2", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.6.2.tgz", + "integrity": "sha512-3pboPvLiWD7dkI3qf3KbUe6hKFKa52w+AE0VCqECtf+QHAKgOL37tTaNCnuX1nAAQ4ZhyP+kYVKf8rLmJ/feDQ==", + "requires": { + "loose-envify": "^1.3.1", + "object-assign": "^4.1.1" + } + } + } + }, + "react-app-rewire-define-plugin": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/react-app-rewire-define-plugin/-/react-app-rewire-define-plugin-1.0.0.tgz", + "integrity": "sha512-n916cvzs/CVADBGzDHULQvT7MAbqFm1QB3zkxrfVvHzaskzAJeGhNyordddwoJ2elGszqw4YDQJ/YWtTU0Ji+g==", + "dev": true, + "requires": { + "webpack": "^3.6.0" } }, "react-app-rewire-polyfills": { @@ -13085,6 +16409,17 @@ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.3.2.tgz", "integrity": "sha512-ybEM7YOr4yBgFd6w8dJqwxegqZGJNBZl6U27HnGKuTZmDvVrD5quWOK/wAnMywiZzW+Qsk+l4X2c70+thp/A8Q==" }, + "react-lazy-load": { + "version": "3.0.13", + "resolved": "https://registry.npmjs.org/react-lazy-load/-/react-lazy-load-3.0.13.tgz", + "integrity": "sha1-OwqS0zbUPT8Nc8vm81sXBQsIuCQ=", + "requires": { + "eventlistener": "0.0.1", + "lodash.debounce": "^4.0.0", + "lodash.throttle": "^4.0.0", + "prop-types": "^15.5.8" + } + }, "react-lifecycles-compat": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz", @@ -13123,6 +16458,14 @@ "warning": "^3.0.0" } }, + "react-plotly.js": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/react-plotly.js/-/react-plotly.js-2.2.0.tgz", + "integrity": "sha512-+OWgUDZCMWJsZDhWupTvxvjZu/Q7Wbnaikq0PtdX2QQ4s/HuRj2uYydx3HfoDrW4Mc97aZsMnEVAqUxAEaf6FA==", + "requires": { + "prop-types": "^15.5.10" + } + }, "react-prop-types": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/react-prop-types/-/react-prop-types-0.4.0.tgz", @@ -13347,6 +16690,19 @@ } } }, + "react-slick": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/react-slick/-/react-slick-0.23.2.tgz", + "integrity": "sha512-fM6DXX7+22eOcYE9cgaXUfioZL/Zw6fwS6aPMDBt0kLHl4H4fFNEbp4JsJQdEWMLUNFtUytNcvd9KRml22Tp5w==", + "requires": { + "classnames": "^2.2.5", + "enquire.js": "^2.1.6", + "json2mq": "^0.2.0", + "lodash.debounce": "^4.0.8", + "prettier": "^1.14.3", + "resize-observer-polyfill": "^1.5.0" + } + }, "react-smooth": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/react-smooth/-/react-smooth-1.0.0.tgz", @@ -13411,23 +16767,16 @@ } }, "react-virtualized": { - "version": "9.19.0", - "resolved": "https://registry.npmjs.org/react-virtualized/-/react-virtualized-9.19.0.tgz", - "integrity": "sha512-aeOGF964AnR7rcKtl2mQF8Ci2s3OJI2a4lmcCTTj1tNBk0V3xKjlhQETrnHs1xU66yNx2+CMTgS4CV82Pf/oNQ==", + "version": "9.21.0", + "resolved": "https://registry.npmjs.org/react-virtualized/-/react-virtualized-9.21.0.tgz", + "integrity": "sha512-duKD2HvO33mqld4EtQKm9H9H0p+xce1c++2D5xn59Ma7P8VT7CprfAe5hwjd1OGkyhqzOZiTMlTal7LxjH5yBQ==", "requires": { "babel-runtime": "^6.26.0", "classnames": "^2.2.3", "dom-helpers": "^2.4.0 || ^3.0.0", "loose-envify": "^1.3.0", "prop-types": "^15.6.0", - "react-lifecycles-compat": "^1.0.2" - }, - "dependencies": { - "react-lifecycles-compat": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-1.1.4.tgz", - "integrity": "sha512-g3pdexIqkn+CVvSpYIoyON8zUbF9kgfhp672gyz7wQ7PQyXVmJtah+GDYqpHpOrdwex3F77iv+alq79iux9HZw==" - } + "react-lifecycles-compat": "^3.0.4" } }, "read-pkg": { @@ -13555,6 +16904,21 @@ "strip-indent": "^1.0.1" } }, + "redeyed": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/redeyed/-/redeyed-0.4.4.tgz", + "integrity": "sha1-N+mQpvKyGyoRwuakj9QTVpjLqX8=", + "requires": { + "esprima": "~1.0.4" + }, + "dependencies": { + "esprima": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.0.4.tgz", + "integrity": "sha1-n1V+CPw7TSbs6d00+Pv0drYlha0=" + } + } + }, "reduce-css-calc": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/reduce-css-calc/-/reduce-css-calc-1.3.0.tgz", @@ -13587,6 +16951,16 @@ } } }, + "reduce-simplicial-complex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/reduce-simplicial-complex/-/reduce-simplicial-complex-1.0.0.tgz", + "integrity": "sha1-dNaWovg196bc2SBl/YxRgfLt+Lw=", + "requires": { + "cell-orientation": "^1.0.1", + "compare-cell": "^1.0.0", + "compare-oriented-cell": "^1.0.1" + } + }, "redux": { "version": "3.7.2", "resolved": "https://registry.npmjs.org/redux/-/redux-3.7.2.tgz", @@ -13613,6 +16987,21 @@ "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.3.3.tgz", "integrity": "sha512-jVpo1GadrDAK59t/0jRx5VxYWQEDkkEKi6+HjE3joFVLfDOh9Xrdh0dF1eSq+BI/SwvTQ44gSscJ8N5zYL61sg==" }, + "regenerate-unicode-properties": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-7.0.0.tgz", + "integrity": "sha512-s5NGghCE4itSlUS+0WUj88G6cfMVMmH8boTPNvABf8od+2dhT9WDlWu8n01raQAJZMOK8Ch6jSexaRO7swd6aw==", + "requires": { + "regenerate": "^1.4.0" + }, + "dependencies": { + "regenerate": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz", + "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==" + } + } + }, "regenerator-runtime": { "version": "0.11.1", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz", @@ -13692,6 +17081,105 @@ } } }, + "regl": { + "version": "1.3.9", + "resolved": "https://registry.npmjs.org/regl/-/regl-1.3.9.tgz", + "integrity": "sha512-CungQSUBsZNYZJWJlb2sPe4iwBjmxrgl1Yxt91HN3VuuEL7lJ5k03O3T1xEXVOCMN1q8wncddwJsxozuyzzmrA==" + }, + "regl-error2d": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/regl-error2d/-/regl-error2d-2.0.5.tgz", + "integrity": "sha512-hBxGSY0F9S3+JsobYiQBKdZ+0oWNpM6k8zeRxVDyv5rbZ2HNclVInrT82em+JPZ+GEh0OLmZdlS4BbPIuYAk2w==", + "requires": { + "array-bounds": "^1.0.1", + "bubleify": "^1.0.0", + "color-normalize": "^1.0.3", + "flatten-vertex-data": "^1.0.0", + "object-assign": "^4.1.1", + "pick-by-alias": "^1.1.1", + "to-float32": "^1.0.0", + "update-diff": "^1.0.2" + } + }, + "regl-line2d": { + "version": "3.0.12", + "resolved": "https://registry.npmjs.org/regl-line2d/-/regl-line2d-3.0.12.tgz", + "integrity": "sha512-6KV6ZbVWeoMZDqkVdqbWpvzrQR1BFOOUHMoyi1HDkZ3TXuS88s1/vQghTJjaLDRBVV5krZfIMpBrePY7OMxDIQ==", + "requires": { + "array-bounds": "^1.0.0", + "array-normalize": "^1.1.3", + "bubleify": "^1.0.0", + "color-normalize": "^1.0.0", + "earcut": "^2.1.1", + "es6-weak-map": "^2.0.2", + "flatten-vertex-data": "^1.0.0", + "glslify": "^6.3.1", + "object-assign": "^4.1.1", + "parse-rect": "^1.2.0", + "pick-by-alias": "^1.1.0", + "to-float32": "^1.0.0" + } + }, + "regl-scatter2d": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/regl-scatter2d/-/regl-scatter2d-3.0.6.tgz", + "integrity": "sha512-l2/OcCRKTxsCtrGtb2TKUKYnDHzI07qOm2eK2kiRYKyDwiWiGyiLC6p3SlOxDoqhQ/8gbIue9BABPXuCJ0lpRQ==", + "requires": { + "array-range": "^1.0.1", + "array-rearrange": "^2.2.2", + "bubleify": "^1.0.0", + "clamp": "^1.0.1", + "color-id": "^1.1.0", + "color-normalize": "^1.0.3", + "flatten-vertex-data": "^1.0.0", + "glslify": "^6.1.1", + "is-iexplorer": "^1.0.0", + "object-assign": "^4.1.1", + "parse-rect": "^1.1.0", + "pick-by-alias": "^1.0.0", + "point-cluster": "^3.1.2", + "to-float32": "^1.0.0", + "update-diff": "^1.1.0" + } + }, + "regl-splom": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/regl-splom/-/regl-splom-1.0.4.tgz", + "integrity": "sha512-+iq/RJAJdHCp48wPbEGQ5qw29OXFVF/m7CzcuLZxwptjdkB/FHGKiMuyqclOSNQcEKFxQTvRRJMJJ6brd8VvrA==", + "requires": { + "array-bounds": "^1.0.1", + "array-range": "^1.0.1", + "bubleify": "^1.1.0", + "color-alpha": "^1.0.2", + "defined": "^1.0.0", + "flatten-vertex-data": "^1.0.2", + "left-pad": "^1.2.0", + "parse-rect": "^1.2.0", + "pick-by-alias": "^1.2.0", + "point-cluster": "^1.0.2", + "raf": "^3.4.0", + "regl-scatter2d": "^3.0.6" + }, + "dependencies": { + "binary-search-bounds": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/binary-search-bounds/-/binary-search-bounds-2.0.4.tgz", + "integrity": "sha512-2hg5kgdKql5ClF2ErBcSx0U5bnl5hgS4v7wMnLFodyR47yMtj2w+UAZB+0CiqyHct2q543i7Bi4/aMIegorCCg==" + }, + "point-cluster": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/point-cluster/-/point-cluster-1.0.2.tgz", + "integrity": "sha512-pau5Py38SKgEJZ8pvD/bfXrz2TmQy6BEtMFZZSpjsQ2EmAe4CRO+HLhHw1gmgHVFaY/9KqhrfSeUPIsBOw8tDA==", + "requires": { + "array-bounds": "^1.0.1", + "array-normalize": "^1.1.3", + "binary-search-bounds": "^2.0.4", + "clamp": "^1.0.1", + "parse-rect": "^1.1.1" + } + } + } + }, "relateurl": { "version": "0.2.7", "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", @@ -13797,6 +17285,11 @@ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" }, + "resize-observer-polyfill": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", + "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==" + }, "resolve": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.6.0.tgz", @@ -13839,6 +17332,14 @@ "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-2.2.0.tgz", "integrity": "sha512-bAFz9ld18RzJfddgrO2e/0S2O81710++chRMUxHjXOYKF6jTAMrUNZrEZ1PvV0zlhfjidm08iRPdTLPno1FuRg==" }, + "resolve-protobuf-schema": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/resolve-protobuf-schema/-/resolve-protobuf-schema-2.1.0.tgz", + "integrity": "sha512-kI5ffTiZWmJaS/huM8wZfEMer1eRd7oJQhDuxeCLe3t7N7mX3z94CN0xPxBQxFYQTSNz9T0i+v6inKqSdK8xrQ==", + "requires": { + "protocol-buffers-schema": "^3.3.1" + } + }, "resolve-url": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", @@ -13853,6 +17354,14 @@ "signal-exit": "^3.0.2" } }, + "resumer": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/resumer/-/resumer-0.0.0.tgz", + "integrity": "sha1-8ej0YeQGS6Oegq883CqMiT0HZ1k=", + "requires": { + "through": "~2.3.4" + } + }, "ret": { "version": "0.1.15", "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", @@ -13866,6 +17375,11 @@ "align-text": "^0.1.1" } }, + "right-now": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/right-now/-/right-now-1.0.0.tgz", + "integrity": "sha1-bolgne69fc2vja7Mmuo5z1haCRg=" + }, "rimraf": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.2.tgz", @@ -13883,6 +17397,106 @@ "inherits": "^2.0.1" } }, + "rmc-feedback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/rmc-feedback/-/rmc-feedback-2.0.0.tgz", + "integrity": "sha512-5PWOGOW7VXks/l3JzlOU9NIxRpuaSS8d9zA3UULUCuTKnpwBHNvv1jSJzxgbbCQeYzROWUpgKI4za3X4C/mKmQ==", + "requires": { + "babel-runtime": "6.x", + "classnames": "^2.2.5" + } + }, + "robust-compress": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/robust-compress/-/robust-compress-1.0.0.tgz", + "integrity": "sha1-TPYsSzGNgwhRYBK7jBF1Lzkymxs=" + }, + "robust-determinant": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/robust-determinant/-/robust-determinant-1.1.0.tgz", + "integrity": "sha1-jsrnm3nKqz509t6+IjflORon6cc=", + "requires": { + "robust-compress": "^1.0.0", + "robust-scale": "^1.0.0", + "robust-sum": "^1.0.0", + "two-product": "^1.0.0" + } + }, + "robust-dot-product": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/robust-dot-product/-/robust-dot-product-1.0.0.tgz", + "integrity": "sha1-yboBeL0sMEv9cl9Y6Inx2UYARVM=", + "requires": { + "robust-sum": "^1.0.0", + "two-product": "^1.0.0" + } + }, + "robust-in-sphere": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/robust-in-sphere/-/robust-in-sphere-1.1.3.tgz", + "integrity": "sha1-HFiD0WpOkjkpR27zSBmFe/Kpz3U=", + "requires": { + "robust-scale": "^1.0.0", + "robust-subtract": "^1.0.0", + "robust-sum": "^1.0.0", + "two-product": "^1.0.0" + } + }, + "robust-linear-solve": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/robust-linear-solve/-/robust-linear-solve-1.0.0.tgz", + "integrity": "sha1-DNasUEBpGm8qo81jEdcokFyjofE=", + "requires": { + "robust-determinant": "^1.1.0" + } + }, + "robust-orientation": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/robust-orientation/-/robust-orientation-1.1.3.tgz", + "integrity": "sha1-2v9bANO+TmByLw6cAVbvln8cIEk=", + "requires": { + "robust-scale": "^1.0.2", + "robust-subtract": "^1.0.0", + "robust-sum": "^1.0.0", + "two-product": "^1.0.2" + } + }, + "robust-product": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/robust-product/-/robust-product-1.0.0.tgz", + "integrity": "sha1-aFJQAHzbunzx3nW/9tKScBEJir4=", + "requires": { + "robust-scale": "^1.0.0", + "robust-sum": "^1.0.0" + } + }, + "robust-scale": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/robust-scale/-/robust-scale-1.0.2.tgz", + "integrity": "sha1-d1Ey7QlULQKOWLLMecBikLz3jDI=", + "requires": { + "two-product": "^1.0.2", + "two-sum": "^1.0.0" + } + }, + "robust-segment-intersect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/robust-segment-intersect/-/robust-segment-intersect-1.0.1.tgz", + "integrity": "sha1-MlK2oPwboUreaRXMvgnLzpqrHBw=", + "requires": { + "robust-orientation": "^1.1.3" + } + }, + "robust-subtract": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/robust-subtract/-/robust-subtract-1.0.0.tgz", + "integrity": "sha1-4LFk4e2LpOOl3aRaEgODSNvtPpo=" + }, + "robust-sum": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/robust-sum/-/robust-sum-1.0.0.tgz", + "integrity": "sha1-FmRuUlKStNJdgnV6KGlV4Lv6U9k=" + }, "rst-selector-parser": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/rst-selector-parser/-/rst-selector-parser-2.2.3.tgz", @@ -13901,6 +17515,11 @@ "is-promise": "^2.1.0" } }, + "rw": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", + "integrity": "sha1-P4Yt+pGrdmsUiF700BEkv9oHT7Q=" + }, "rx-lite": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/rx-lite/-/rx-lite-4.0.8.tgz", @@ -13969,6 +17588,11 @@ } } }, + "sane-topojson": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/sane-topojson/-/sane-topojson-2.0.0.tgz", + "integrity": "sha1-QOJXNqKMTM6qojP0W7hjc6J4W4Q=" + }, "sanitize-html": { "version": "1.18.5", "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-1.18.5.tgz", @@ -14024,6 +17648,14 @@ "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" }, + "schedule": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/schedule/-/schedule-0.5.0.tgz", + "integrity": "sha512-HUcJicG5Ou8xfR//c2rPT0lPIRR09vVvN81T9fqfVgBmhERUbDEQoYKjpBxbueJnCPpSu2ujXzOnRQt6x9o/jw==", + "requires": { + "object-assign": "^4.1.1" + } + }, "schema-utils": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-0.3.0.tgz", @@ -14032,6 +17664,11 @@ "ajv": "^5.0.0" } }, + "seedrandom": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-2.4.4.tgz", + "integrity": "sha512-9A+PDmgm+2du77B5i0Ip2cxOqqHjgNxnBgglxLcX78A2D6c2rTo61z4jnVABpF4cKeDMDG+cmXXvdnqse2VqMA==" + }, "select-hose": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", @@ -14165,6 +17802,16 @@ "safe-buffer": "^5.0.1" } }, + "shallow-copy": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/shallow-copy/-/shallow-copy-0.0.1.tgz", + "integrity": "sha1-QV9CcC1z2BAzApLMXuhurhoRoXA=" + }, + "shallow-equal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallow-equal/-/shallow-equal-1.1.0.tgz", + "integrity": "sha512-0SW1nWo1hnabO62SEeHsl8nmTVVEzguVWZCj5gaQrgWAxz/BaCja4OWdJBWLVPDxdtE/WU7c98uUCCXyPHSCvw==" + }, "shallowequal": { "version": "0.2.2", "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-0.2.2.tgz", @@ -14173,6 +17820,26 @@ "lodash.keys": "^3.1.2" } }, + "sharkdown": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/sharkdown/-/sharkdown-0.1.0.tgz", + "integrity": "sha1-YdT+Up510CRCEnzJI0NiJlCZIU8=", + "requires": { + "cardinal": "~0.4.2", + "expect.js": "~0.2.0", + "minimist": "0.0.5", + "split": "~0.2.10", + "stream-spigot": "~2.1.2", + "through": "~2.3.4" + }, + "dependencies": { + "minimist": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.5.tgz", + "integrity": "sha1-16oye87PUY+RBqxrjwA/o7zqhWY=" + } + } + }, "shebang-command": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", @@ -14282,11 +17949,93 @@ } } }, + "shuffle-seed": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/shuffle-seed/-/shuffle-seed-1.1.6.tgz", + "integrity": "sha1-UzwSaDurO0+j6HUfxOViFGdEJgs=", + "requires": { + "seedrandom": "^2.4.2" + } + }, "signal-exit": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=" }, + "signum": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/signum/-/signum-0.0.0.tgz", + "integrity": "sha1-q1UbEAM1EHCnBHg/GgnF52kfnPY=" + }, + "simplicial-complex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/simplicial-complex/-/simplicial-complex-1.0.0.tgz", + "integrity": "sha1-bDOk7Wn81Nkbe8rdOzC2NoPq4kE=", + "requires": { + "bit-twiddle": "^1.0.0", + "union-find": "^1.0.0" + } + }, + "simplicial-complex-boundary": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/simplicial-complex-boundary/-/simplicial-complex-boundary-1.0.1.tgz", + "integrity": "sha1-csn/HiTeqjdMm7L6DL8MCB6++BU=", + "requires": { + "boundary-cells": "^2.0.0", + "reduce-simplicial-complex": "^1.0.0" + } + }, + "simplicial-complex-contour": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/simplicial-complex-contour/-/simplicial-complex-contour-1.0.2.tgz", + "integrity": "sha1-iQqsrChDZTQBEFRc8mKaJuBL+dE=", + "requires": { + "marching-simplex-table": "^1.0.0", + "ndarray": "^1.0.15", + "ndarray-sort": "^1.0.0", + "typedarray-pool": "^1.1.0" + } + }, + "simplify-planar-graph": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/simplify-planar-graph/-/simplify-planar-graph-2.0.1.tgz", + "integrity": "sha1-vIWJNyXzLo+oriVoE5hEbSy892Y=", + "requires": { + "robust-orientation": "^1.0.1", + "simplicial-complex": "^0.3.3" + }, + "dependencies": { + "bit-twiddle": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/bit-twiddle/-/bit-twiddle-0.0.2.tgz", + "integrity": "sha1-wurruVKjuUrMFASX4c3NLxoz9Y4=" + }, + "simplicial-complex": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/simplicial-complex/-/simplicial-complex-0.3.3.tgz", + "integrity": "sha1-TDDK1X+eRXKd2PMGyHU1efRr6Z4=", + "requires": { + "bit-twiddle": "~0.0.1", + "union-find": "~0.0.3" + } + }, + "union-find": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/union-find/-/union-find-0.0.4.tgz", + "integrity": "sha1-uFSzMBYZva0USwAUx4+W6sDS8PY=" + } + } + }, + "slab-decomposition": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/slab-decomposition/-/slab-decomposition-1.0.2.tgz", + "integrity": "sha1-He1WdU1AixBznxRRA9/GGAf2UTQ=", + "requires": { + "binary-search-bounds": "^1.0.0", + "functional-red-black-tree": "^1.0.0", + "robust-orientation": "^1.1.3" + } + }, "slash": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", @@ -14447,6 +18196,16 @@ "url-parse": "^1.1.8" } }, + "sort-asc": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/sort-asc/-/sort-asc-0.1.0.tgz", + "integrity": "sha1-q3md9h/HPqCVbHnEtTHtHp53J+k=" + }, + "sort-desc": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/sort-desc/-/sort-desc-0.1.1.tgz", + "integrity": "sha1-GYuMDN6wlcRjNBhh45JdTuNZqe4=" + }, "sort-keys": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-1.1.2.tgz", @@ -14455,6 +18214,15 @@ "is-plain-obj": "^1.0.0" } }, + "sort-object": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/sort-object/-/sort-object-0.3.2.tgz", + "integrity": "sha1-mODRme3kDgfGGoRAPGHWw7KQ+eI=", + "requires": { + "sort-asc": "^0.1.0", + "sort-desc": "^0.1.1" + } + }, "source-list-map": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.0.tgz", @@ -14497,6 +18265,11 @@ "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz", "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=" }, + "sourcemap-codec": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.4.tgz", + "integrity": "sha512-CYAPYdBu34781kLHkaW3m6b/uUSyMOC2R61gcYMWooeuaGtjof86ZA/8T+qVPPt7np1085CR9hmMGrySwEc8Xg==" + }, "spdx-correct": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.0.0.tgz", @@ -14552,6 +18325,23 @@ "wbuf": "^1.7.2" } }, + "split": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/split/-/split-0.2.10.tgz", + "integrity": "sha1-Zwl8YB1pfOE2j0GPBs0gHPBSGlc=", + "requires": { + "through": "2" + } + }, + "split-polygon": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/split-polygon/-/split-polygon-1.0.0.tgz", + "integrity": "sha1-DqzIoTanaxKj2VJW6n2kXbDC0kc=", + "requires": { + "robust-dot-product": "^1.0.0", + "robust-sum": "^1.0.0" + } + }, "split-string": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", @@ -14589,6 +18379,19 @@ "tweetnacl": "~0.14.0" } }, + "stack-trace": { + "version": "0.0.9", + "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz", + "integrity": "sha1-qPbq7KkGdMMz58Q5U/J1tFFRBpU=" + }, + "static-eval": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.0.0.tgz", + "integrity": "sha512-6flshd3F1Gwm+Ksxq463LtFd1liC77N/PX1FVVc3OzL3hAmo2fwHFbuArkcfi7s9rTNsLEhcRmXGFZhlgy40uw==", + "requires": { + "escodegen": "^1.8.1" + } + }, "static-extend": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", @@ -14603,7 +18406,140 @@ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", "requires": { - "is-descriptor": "^0.1.0" + "is-descriptor": "^0.1.0" + } + } + } + }, + "static-module": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/static-module/-/static-module-1.5.0.tgz", + "integrity": "sha1-J9qYg8QajNCSNvhC8MHrxu32PYY=", + "requires": { + "concat-stream": "~1.6.0", + "duplexer2": "~0.0.2", + "escodegen": "~1.3.2", + "falafel": "^2.1.0", + "has": "^1.0.0", + "object-inspect": "~0.4.0", + "quote-stream": "~0.0.0", + "readable-stream": "~1.0.27-1", + "shallow-copy": "~0.0.1", + "static-eval": "~0.2.0", + "through2": "~0.4.1" + }, + "dependencies": { + "escodegen": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.3.3.tgz", + "integrity": "sha1-8CQBb1qI4Eb9EgBQVek5gC5sXyM=", + "requires": { + "esprima": "~1.1.1", + "estraverse": "~1.5.0", + "esutils": "~1.0.0", + "source-map": "~0.1.33" + } + }, + "esprima": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.1.1.tgz", + "integrity": "sha1-W28VR/TRAuZw4UDFCb5ncdautUk=" + }, + "estraverse": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-1.5.1.tgz", + "integrity": "sha1-hno+jlip+EYYr7bC3bzZFrfLr3E=" + }, + "esutils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-1.0.0.tgz", + "integrity": "sha1-gVHTWOIMisx/t0XnRywAJf5JZXA=" + }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + }, + "object-inspect": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-0.4.0.tgz", + "integrity": "sha1-9RV8EWwUVbJDsG7pdwM5LFrYn+w=" + }, + "object-keys": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-0.4.0.tgz", + "integrity": "sha1-KKaq50KN0sOpLz2V8hM13SBOAzY=" + }, + "readable-stream": { + "version": "1.0.34", + "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz", + "integrity": "sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw=", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "source-map": { + "version": "0.1.43", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.1.43.tgz", + "integrity": "sha1-wkvBRspRfBRx9drL4lcbK3+eM0Y=", + "optional": true, + "requires": { + "amdefine": ">=0.0.4" + } + }, + "static-eval": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-0.2.4.tgz", + "integrity": "sha1-t9NNg4k3uWn5ZBygfUj47eJj6ns=", + "requires": { + "escodegen": "~0.0.24" + }, + "dependencies": { + "escodegen": { + "version": "0.0.28", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-0.0.28.tgz", + "integrity": "sha1-Dk/xcV8yh3XWyrUaxEpAbNer/9M=", + "requires": { + "esprima": "~1.0.2", + "estraverse": "~1.3.0", + "source-map": ">= 0.1.2" + } + }, + "esprima": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.0.4.tgz", + "integrity": "sha1-n1V+CPw7TSbs6d00+Pv0drYlha0=" + }, + "estraverse": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-1.3.2.tgz", + "integrity": "sha1-N8K4k+8T1yPydth41g2FNRUqbEI=" + } + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" + }, + "through2": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/through2/-/through2-0.4.2.tgz", + "integrity": "sha1-2/WGYDEVHsg1K7bE22SiKSqEC5s=", + "requires": { + "readable-stream": "~1.0.17", + "xtend": "~2.1.1" + } + }, + "xtend": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-2.1.2.tgz", + "integrity": "sha1-bv7MKk2tjmlixJAbM3znuoe10os=", + "requires": { + "object-keys": "~0.4.0" } } } @@ -14634,6 +18570,47 @@ "xtend": "^4.0.0" } }, + "stream-shift": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.0.tgz", + "integrity": "sha1-1cdSgl5TZ+eG944Y5EXqIjoVWVI=" + }, + "stream-spigot": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/stream-spigot/-/stream-spigot-2.1.2.tgz", + "integrity": "sha1-feFF6Bn43Q20UJDRPc9zqO08wDU=", + "requires": { + "readable-stream": "~1.1.0" + }, + "dependencies": { + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + }, + "readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" + } + } + }, + "string-convert": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/string-convert/-/string-convert-0.2.1.tgz", + "integrity": "sha1-aYLMMEn7tM2F+LJFaLnZvznu/5c=" + }, "string-length": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/string-length/-/string-length-1.0.1.tgz", @@ -14642,6 +18619,14 @@ "strip-ansi": "^3.0.0" } }, + "string-split-by": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/string-split-by/-/string-split-by-1.0.0.tgz", + "integrity": "sha512-KaJKY+hfpzNyet/emP81PJA9hTVSfxNLS9SFTWxdCnnW1/zOOwiV248+EfoX7IQFcBaOp4G5YE6xTJMF+pLg6A==", + "requires": { + "parenthesis": "^3.1.5" + } + }, "string-width": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", @@ -14666,6 +18651,16 @@ } } }, + "string.prototype.trim": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.1.2.tgz", + "integrity": "sha1-0E3iyJ4Tf019IG8Ia17S+ua+jOo=", + "requires": { + "define-properties": "^1.1.2", + "es-abstract": "^1.5.0", + "function-bind": "^1.0.2" + } + }, "string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", @@ -14695,6 +18690,11 @@ "is-utf8": "^0.2.0" } }, + "strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha1-5SEekiQ2n7uB1jOi8ABE3IztrZI=" + }, "strip-eof": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", @@ -14713,6 +18713,11 @@ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=" }, + "strongly-connected-components": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/strongly-connected-components/-/strongly-connected-components-1.0.1.tgz", + "integrity": "sha1-CSDitN9nyOrulsa2I0/inoc9upk=" + }, "style-loader": { "version": "0.19.0", "resolved": "https://registry.npmjs.org/style-loader/-/style-loader-0.19.0.tgz", @@ -14722,6 +18727,19 @@ "schema-utils": "^0.3.0" } }, + "supercluster": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/supercluster/-/supercluster-2.3.0.tgz", + "integrity": "sha1-h6tWCBu+qaHXJN9TUe6ejDry9Is=", + "requires": { + "kdbush": "^1.0.1" + } + }, + "superscript-text": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/superscript-text/-/superscript-text-1.0.0.tgz", + "integrity": "sha1-58snUlZzYN9QvrBhDOjfPXHY39g=" + }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", @@ -14730,6 +18748,54 @@ "has-flag": "^3.0.0" } }, + "surface-nets": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/surface-nets/-/surface-nets-1.0.2.tgz", + "integrity": "sha1-5DPIy7qUpydMb0yZVStGG/H8eks=", + "requires": { + "ndarray-extract-contour": "^1.0.0", + "triangulate-hypercube": "^1.0.0", + "zero-crossings": "^1.0.0" + } + }, + "svg-arc-to-cubic-bezier": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/svg-arc-to-cubic-bezier/-/svg-arc-to-cubic-bezier-3.1.2.tgz", + "integrity": "sha512-scEWWUoCDhBtgamRnW8C4b0Va73GdpxwWs01SH/wNsl+al7FHEHsval/ZnutHfzvrNTcn/A3YIsQ1oNULSFS7g==" + }, + "svg-path-bounds": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/svg-path-bounds/-/svg-path-bounds-1.0.1.tgz", + "integrity": "sha1-v0WLeDcmv1NDG0Yz8nkvYHSNn3Q=", + "requires": { + "abs-svg-path": "^0.1.1", + "is-svg-path": "^1.0.1", + "normalize-svg-path": "^1.0.0", + "parse-svg-path": "^0.1.2" + }, + "dependencies": { + "normalize-svg-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/normalize-svg-path/-/normalize-svg-path-1.0.1.tgz", + "integrity": "sha1-b3Ka1rcLtMpO/y/ksQdInv4dVv4=", + "requires": { + "svg-arc-to-cubic-bezier": "^3.0.0" + } + } + } + }, + "svg-path-sdf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/svg-path-sdf/-/svg-path-sdf-1.1.2.tgz", + "integrity": "sha512-dOH+KAAQMPh3phURH1gg4PjulxyuEzGESMjHiy4l4vGCrXpzGemH19e4VUTAXs6ipEUoHsVNdaG0i0CMMdFNVQ==", + "requires": { + "bitmap-sdf": "^1.0.0", + "draw-svg-path": "^1.0.0", + "is-svg-path": "^1.0.1", + "parse-svg-path": "^0.1.2", + "svg-path-bounds": "^1.0.1" + } + }, "svgo": { "version": "0.7.2", "resolved": "https://registry.npmjs.org/svgo/-/svgo-0.7.2.tgz", @@ -14846,6 +18912,49 @@ "resolved": "https://registry.npmjs.org/tapable/-/tapable-0.2.8.tgz", "integrity": "sha1-mTcqXJmb8t8WCvwNdL7U9HlIzSI=" }, + "tape": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/tape/-/tape-4.9.1.tgz", + "integrity": "sha512-6fKIXknLpoe/Jp4rzHKFPpJUHDHDqn8jus99IfPnHIjyz78HYlefTGD3b5EkbQzuLfaEvmfPK3IolLgq2xT3kw==", + "requires": { + "deep-equal": "~1.0.1", + "defined": "~1.0.0", + "for-each": "~0.3.3", + "function-bind": "~1.1.1", + "glob": "~7.1.2", + "has": "~1.0.3", + "inherits": "~2.0.3", + "minimist": "~1.2.0", + "object-inspect": "~1.6.0", + "resolve": "~1.7.1", + "resumer": "~0.0.0", + "string.prototype.trim": "~1.1.2", + "through": "~2.3.8" + }, + "dependencies": { + "has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "requires": { + "function-bind": "^1.1.1" + } + }, + "minimist": { + "version": "1.2.0", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + }, + "resolve": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.7.1.tgz", + "integrity": "sha512-c7rwLofp8g1U+h1KNyHL/jicrKg1Ek4q+Lr33AL65uZTinUZHe30D5HlyN5V9NW0JX1D5dXQ4jqW5l7Sy/kGfw==", + "requires": { + "path-parse": "^1.0.5" + } + } + } + }, "term-size": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/term-size/-/term-size-1.2.0.tgz", @@ -14866,6 +18975,14 @@ "require-main-filename": "^1.0.1" } }, + "text-cache": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/text-cache/-/text-cache-4.2.0.tgz", + "integrity": "sha512-8+W9fHZYOamWTy0Yb7lxMszOWo6sqUT4XvwrCZfaGxM8C8uzOoTQWXgtr/jDpuwozQhKNS3AxnuIaYc1SvV8vg==", + "requires": { + "vectorize-text": "^3.2.0" + } + }, "text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -14881,6 +18998,38 @@ "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" }, + "through2": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-0.6.5.tgz", + "integrity": "sha1-QaucZ7KdVyCQcUEOHXp6lozTrUg=", + "requires": { + "readable-stream": ">=1.0.33-1 <1.1.0-0", + "xtend": ">=4.0.0 <4.1.0-0" + }, + "dependencies": { + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + }, + "readable-stream": { + "version": "1.0.34", + "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz", + "integrity": "sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw=", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" + } + } + }, "thunky": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.0.2.tgz", @@ -14904,6 +19053,21 @@ "setimmediate": "^1.0.4" } }, + "tiny-sdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tiny-sdf/-/tiny-sdf-1.0.2.tgz", + "integrity": "sha1-KOdphcRMTlhMS2fY7N2bM6HKwow=" + }, + "tinycolor2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/tinycolor2/-/tinycolor2-1.4.1.tgz", + "integrity": "sha1-9PrTM0R7wLB9TcjpIJ2POaisd+g=" + }, + "tinyqueue": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/tinyqueue/-/tinyqueue-1.2.3.tgz", + "integrity": "sha512-Qz9RgWuO9l8lT+Y9xvbzhPT2efIUIFd69N7eF7tJ9lnQl0iLj1M7peK7IoUGZL9DJHw9XftqLreccfxcQgYLxA==" + }, "tmp": { "version": "0.0.33", "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", @@ -14927,6 +19091,11 @@ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz", "integrity": "sha1-uDVx+k2MJbguIxsG46MFXeTKGkc=" }, + "to-float32": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/to-float32/-/to-float32-1.0.1.tgz", + "integrity": "sha512-nOy2WSwae3xhZbc+05xiCuU3ZPPmH0L4Rg4Q1qiOGFSuNSCTB9nVJaGgGl3ZScxAclX/L8hJuDHJGDAzbfuKCQ==" + }, "to-object-path": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", @@ -14945,6 +19114,14 @@ } } }, + "to-px": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/to-px/-/to-px-1.1.0.tgz", + "integrity": "sha512-bfg3GLYrGoEzrGoE05TAL/Uw+H/qrf2ptr9V3W7U0lkjjyYnIfgxmVLUfhQ1hZpIQwin81uxhDjvUkDYsC0xWw==", + "requires": { + "parse-unit": "^1.0.1" + } + }, "to-regex": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", @@ -14965,6 +19142,19 @@ "repeat-string": "^1.6.1" } }, + "toggle-selection": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz", + "integrity": "sha1-bkWxJj8gF/oKzH2J14sVuL932jI=" + }, + "topojson-client": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/topojson-client/-/topojson-client-2.1.0.tgz", + "integrity": "sha1-/59784mRGF4LQoTCsGroNPDqxsg=", + "requires": { + "commander": "2" + } + }, "toposort": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/toposort/-/toposort-1.0.7.tgz", @@ -14990,6 +19180,24 @@ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", "integrity": "sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=" }, + "triangulate-hypercube": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/triangulate-hypercube/-/triangulate-hypercube-1.0.1.tgz", + "integrity": "sha1-2Acdsuv8/VHzCNC88qXEils20Tc=", + "requires": { + "gamma": "^0.1.0", + "permutation-parity": "^1.0.0", + "permutation-rank": "^1.0.0" + } + }, + "triangulate-polyline": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/triangulate-polyline/-/triangulate-polyline-1.0.3.tgz", + "integrity": "sha1-v4uod6hQVBA/65+lphtOjXAXgU0=", + "requires": { + "cdt2d": "^1.0.0" + } + }, "trim-newlines": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-1.0.0.tgz", @@ -15018,12 +19226,32 @@ "safe-buffer": "^5.0.1" } }, + "turntable-camera-controller": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/turntable-camera-controller/-/turntable-camera-controller-3.0.1.tgz", + "integrity": "sha1-jb0/4AVQGRxlFky4iJcQSVeK/Zk=", + "requires": { + "filtered-vector": "^1.2.1", + "gl-mat4": "^1.0.2", + "gl-vec3": "^1.0.2" + } + }, "tweetnacl": { "version": "0.14.5", "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", "optional": true }, + "two-product": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/two-product/-/two-product-1.0.2.tgz", + "integrity": "sha1-Z9ldSyV6kh4stL16+VEfkIhSLqo=" + }, + "two-sum": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/two-sum/-/two-sum-1.0.0.tgz", + "integrity": "sha1-MdPzIjnk9zHsqd+RVeKyl/AIq2Q=" + }, "type-check": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", @@ -15046,6 +19274,15 @@ "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" }, + "typedarray-pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/typedarray-pool/-/typedarray-pool-1.1.0.tgz", + "integrity": "sha1-0RT0hIAUifU+yrXoCIqiMET0mNk=", + "requires": { + "bit-twiddle": "^1.0.0", + "dup": "^1.0.0" + } + }, "ua-parser-js": { "version": "0.7.18", "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.18.tgz", @@ -15124,6 +19361,35 @@ "integrity": "sha1-YaajIBBiKvoHljvzJSA88SI51gQ=", "dev": true }, + "unicode-canonical-property-names-ecmascript": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz", + "integrity": "sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ==" + }, + "unicode-match-property-ecmascript": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz", + "integrity": "sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==", + "requires": { + "unicode-canonical-property-names-ecmascript": "^1.0.4", + "unicode-property-aliases-ecmascript": "^1.0.4" + } + }, + "unicode-match-property-value-ecmascript": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.0.2.tgz", + "integrity": "sha512-Rx7yODZC1L/T8XKo/2kNzVAQaRE88AaMvI1EF/Xnj3GW2wzN6fop9DDWuFAKUVFH7vozkz26DzP0qyWLKLIVPQ==" + }, + "unicode-property-aliases-ecmascript": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.4.tgz", + "integrity": "sha512-2WSLa6OdYd2ng8oqiGIWnJqyFArvhn+5vgx5GTxMbUYjCYKUcuKS62YLFF0R/BDGlB1yzXjQOLtPAfHsgirEpg==" + }, + "union-find": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/union-find/-/union-find-1.0.2.tgz", + "integrity": "sha1-KSusQV5q06iVNdI3AQ20pTYoTlg=" + }, "union-value": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.0.tgz", @@ -15192,6 +19458,11 @@ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=" }, + "unquote": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", + "integrity": "sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ=" + }, "unset-value": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", @@ -15238,6 +19509,11 @@ "resolved": "https://registry.npmjs.org/upath/-/upath-1.0.5.tgz", "integrity": "sha512-qbKn90aDQ0YEwvXoLqj0oiuUYroLX2lVHZ+b+xwjozFasAOC4GneDq5+OaIG5Zj+jFmbz/uO+f7a9qxjktJQww==" }, + "update-diff": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/update-diff/-/update-diff-1.1.0.tgz", + "integrity": "sha1-9RAYLYHugZ+4LDprIrYrve2ngI8=" + }, "update-notifier": { "version": "2.5.0", "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-2.5.0.tgz", @@ -15402,6 +19678,20 @@ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=" }, + "vectorize-text": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/vectorize-text/-/vectorize-text-3.2.0.tgz", + "integrity": "sha512-N3eldFPkXY7mVK1aBuKPdQKYerBSPEAf+4Tl6DGdnVb1MZ8buD9SKv5TUCyRCEe5KblC56MoJcmf0I/IyGjOGQ==", + "requires": { + "cdt2d": "^1.0.0", + "clean-pslg": "^1.1.0", + "ndarray": "^1.0.11", + "planar-graph-to-polyline": "^1.0.0", + "simplify-planar-graph": "^2.0.1", + "surface-nets": "^1.0.0", + "triangulate-polyline": "^1.0.0" + } + }, "velocity-animate": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/velocity-animate/-/velocity-animate-1.5.1.tgz", @@ -15433,6 +19723,11 @@ "extsprintf": "^1.2.0" } }, + "vlq": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/vlq/-/vlq-0.2.3.tgz", + "integrity": "sha512-DRibZL6DsNhIgYQ+wNdWDL2SL3bKPlVrRiBqV5yuMm++op8W4kGFtaQfCs4KEJn0wBZcHVHJ3eoywX8983k1ow==" + }, "vm-browserify": { "version": "0.0.4", "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-0.0.4.tgz", @@ -15441,6 +19736,16 @@ "indexof": "0.0.1" } }, + "vt-pbf": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/vt-pbf/-/vt-pbf-3.1.1.tgz", + "integrity": "sha512-pHjWdrIoxurpmTcbfBWXaPwSmtPAHS105253P1qyEfSTV2HJddqjM+kIHquaT/L6lVJIk9ltTGc0IxR/G47hYA==", + "requires": { + "@mapbox/point-geometry": "0.1.0", + "@mapbox/vector-tile": "^1.3.1", + "pbf": "^3.0.5" + } + }, "walker": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.7.tgz", @@ -15480,6 +19785,24 @@ "minimalistic-assert": "^1.0.0" } }, + "weak-map": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/weak-map/-/weak-map-1.0.5.tgz", + "integrity": "sha1-eWkVhNmGB/UHC9O3CkDmuyLkAes=" + }, + "weakmap-shim": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/weakmap-shim/-/weakmap-shim-1.1.1.tgz", + "integrity": "sha1-1lr9eEEJshZuAP9XHDMVDsKkC0k=" + }, + "webgl-context": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/webgl-context/-/webgl-context-2.2.0.tgz", + "integrity": "sha1-jzfXJXz23xzQpJ5qextyG5TMhqA=", + "requires": { + "get-canvas-context": "^1.0.1" + } + }, "webidl-conversions": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", @@ -15897,6 +20220,11 @@ "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.3.tgz", "integrity": "sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg==" }, + "wgs84": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/wgs84/-/wgs84-0.0.0.tgz", + "integrity": "sha1-NP3FVZF7blfPKigu0ENxDASc3HY=" + }, "whatwg-encoding": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-1.0.3.tgz", @@ -15977,6 +20305,14 @@ "errno": "~0.1.7" } }, + "world-calendars": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/world-calendars/-/world-calendars-1.0.3.tgz", + "integrity": "sha1-slxQMrokEo/8QdCfr0pewbnBQzU=", + "requires": { + "object-assign": "^4.1.0" + } + }, "wrap-ansi": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", @@ -16143,6 +20479,14 @@ "integrity": "sha1-riF2gXXRVZ1IvvNUILL0li8JwzA=" } } + }, + "zero-crossings": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/zero-crossings/-/zero-crossings-1.0.1.tgz", + "integrity": "sha1-xWK9MRNkPzRDokXRJAa4i2m5qf8=", + "requires": { + "cwise-compiler": "^1.0.0" + } } } } diff --git a/mlflow/server/js/package.json b/mlflow/server/js/package.json index 8a30b217eff11..a59f41dfade79 100644 --- a/mlflow/server/js/package.json +++ b/mlflow/server/js/package.json @@ -3,6 +3,7 @@ "version": "0.1.0", "private": true, "dependencies": { + "antd": "^3.15.2", "bytes": "3.0.0", "classnames": "^2.2.6", "cookie": "0.3.1", @@ -10,17 +11,23 @@ "draft-js": "^0.10.5", "file-saver": "1.3.8", "formik": "^1.1.1", + "html-entities": "1.2.1", "immutable": "3.8.1", "jquery": "3.0.0", "json-bigint": "databricks/json-bigint#a1defaf9cd8dd749f0fd4d5f83a22cd846789658", + "lodash": "^4.17.11", + "merge": "1.2.1", + "plotly.js": "1.42.5", "prop-types": "15.6.1", "qs": "6.5.2", - "react": "^16.3.2", + "react": "16.5.2", "react-app-rewire-polyfills": "^0.2.0", "react-bootstrap": "0.32.1", "react-dom": "^16.3.2", "react-mde": "5.8.0", "react-modal": "^3.4.4", + "react-overlays": "^0.8.3", + "react-plotly.js": "2.2.0", "react-redux": "5.0.7", "react-router": "4.2.0", "react-router-dom": "4.2.2", @@ -28,7 +35,7 @@ "react-scripts": "1.1.4", "react-sticky": "6.0.2", "react-treebeard": "2.1.0", - "react-virtualized": "9.19.0", + "react-virtualized": "9.21.0", "recharts": "1.0.0-beta.10", "redux": "3.7.2", "redux-promise-middleware": "5.0.0", @@ -54,7 +61,9 @@ "eslint-plugin-promise": "3.6.0", "eslint-plugin-react": "7.4.0", "eslint-plugin-standard": "3.0.1", - "react-app-rewired": "^1.5.2" + "jest-localstorage-mock": "^2.3.0", + "react-app-rewired": "^1.5.2", + "react-app-rewire-define-plugin": "1.0.0" }, "scripts": { "start": "react-app-rewired start", @@ -74,5 +83,13 @@ "lcov" ] }, - "proxy": "http://localhost:5000" + "proxy": { + "/ajax-api": { + "target": "http://localhost:5000" + }, + "/get-artifact": { + "target": "http://localhost:5000", + "ws": true + } + } } diff --git a/mlflow/server/js/public/fontawesome-all.min.css b/mlflow/server/js/public/fontawesome/css/fontawesome-all.min.css similarity index 100% rename from mlflow/server/js/public/fontawesome-all.min.css rename to mlflow/server/js/public/fontawesome/css/fontawesome-all.min.css diff --git a/mlflow/server/js/public/webfonts/fa-brands-400.eot b/mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.eot similarity index 100% rename from mlflow/server/js/public/webfonts/fa-brands-400.eot rename to mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.eot diff --git a/mlflow/server/js/public/webfonts/fa-brands-400.svg b/mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.svg similarity index 100% rename from mlflow/server/js/public/webfonts/fa-brands-400.svg rename to mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.svg diff --git a/mlflow/server/js/public/webfonts/fa-brands-400.ttf b/mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.ttf similarity index 100% rename from mlflow/server/js/public/webfonts/fa-brands-400.ttf rename to mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.ttf diff --git a/mlflow/server/js/public/webfonts/fa-brands-400.woff b/mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.woff similarity index 100% rename from mlflow/server/js/public/webfonts/fa-brands-400.woff rename to mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.woff diff --git a/mlflow/server/js/public/webfonts/fa-brands-400.woff2 b/mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.woff2 similarity index 100% rename from mlflow/server/js/public/webfonts/fa-brands-400.woff2 rename to mlflow/server/js/public/fontawesome/webfonts/fa-brands-400.woff2 diff --git a/mlflow/server/js/public/webfonts/fa-regular-400.eot b/mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.eot similarity index 100% rename from mlflow/server/js/public/webfonts/fa-regular-400.eot rename to mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.eot diff --git a/mlflow/server/js/public/webfonts/fa-regular-400.svg b/mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.svg similarity index 100% rename from mlflow/server/js/public/webfonts/fa-regular-400.svg rename to mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.svg diff --git a/mlflow/server/js/public/webfonts/fa-regular-400.ttf b/mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.ttf similarity index 100% rename from mlflow/server/js/public/webfonts/fa-regular-400.ttf rename to mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.ttf diff --git a/mlflow/server/js/public/webfonts/fa-regular-400.woff b/mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.woff similarity index 100% rename from mlflow/server/js/public/webfonts/fa-regular-400.woff rename to mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.woff diff --git a/mlflow/server/js/public/webfonts/fa-regular-400.woff2 b/mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.woff2 similarity index 100% rename from mlflow/server/js/public/webfonts/fa-regular-400.woff2 rename to mlflow/server/js/public/fontawesome/webfonts/fa-regular-400.woff2 diff --git a/mlflow/server/js/public/webfonts/fa-solid-900.eot b/mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.eot similarity index 100% rename from mlflow/server/js/public/webfonts/fa-solid-900.eot rename to mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.eot diff --git a/mlflow/server/js/public/webfonts/fa-solid-900.svg b/mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.svg similarity index 100% rename from mlflow/server/js/public/webfonts/fa-solid-900.svg rename to mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.svg diff --git a/mlflow/server/js/public/webfonts/fa-solid-900.ttf b/mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.ttf similarity index 100% rename from mlflow/server/js/public/webfonts/fa-solid-900.ttf rename to mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.ttf diff --git a/mlflow/server/js/public/webfonts/fa-solid-900.woff b/mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.woff similarity index 100% rename from mlflow/server/js/public/webfonts/fa-solid-900.woff rename to mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.woff diff --git a/mlflow/server/js/public/webfonts/fa-solid-900.woff2 b/mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.woff2 similarity index 100% rename from mlflow/server/js/public/webfonts/fa-solid-900.woff2 rename to mlflow/server/js/public/fontawesome/webfonts/fa-solid-900.woff2 diff --git a/mlflow/server/js/public/index.html b/mlflow/server/js/public/index.html index 9dac2c9f87ef8..af5be6cc94557 100644 --- a/mlflow/server/js/public/index.html +++ b/mlflow/server/js/public/index.html @@ -23,7 +23,7 @@ Learn how to configure a non-root public URL by running `npm run build`. --> - + MLflow diff --git a/mlflow/server/js/src/Actions.js b/mlflow/server/js/src/Actions.js index 91cff2e74a4c6..053311b26d053 100644 --- a/mlflow/server/js/src/Actions.js +++ b/mlflow/server/js/src/Actions.js @@ -1,6 +1,8 @@ import { MlflowService } from './sdk/MlflowService'; import ErrorCodes from './sdk/ErrorCodes'; +export const SEARCH_MAX_RESULTS = 100; + export const isPendingApi = (action) => { return action.type.endsWith("_PENDING"); }; @@ -77,16 +79,42 @@ export const restoreRunApi = (runUuid, id = getUUID()) => { }; export const SEARCH_RUNS_API = 'SEARCH_RUNS_API'; -export const searchRunsApi = (experimentIds, andedExpressions, runViewType, id = getUUID()) => { +export const searchRunsApi = (experimentIds, filter, runViewType, orderBy, id = getUUID()) => { return { type: SEARCH_RUNS_API, payload: wrapDeferred(MlflowService.searchRuns, { - experiment_ids: experimentIds, anded_expressions: andedExpressions, run_view_type: runViewType + experiment_ids: experimentIds, + filter: filter, + run_view_type: runViewType, + max_results: SEARCH_MAX_RESULTS, + order_by: orderBy, }), meta: { id: id }, }; }; +export const LOAD_MORE_RUNS_API = 'LOAD_MORE_RUNS_API'; +export const loadMoreRunsApi = ( + experimentIds, + filter, + runViewType, + orderBy, + pageToken, + id = getUUID(), +) => ({ + type: LOAD_MORE_RUNS_API, + payload: wrapDeferred(MlflowService.searchRuns, { + experiment_ids: experimentIds, + filter: filter, + run_view_type: runViewType, + max_results: SEARCH_MAX_RESULTS, + order_by: orderBy, + page_token: pageToken, + }), + meta: { id }, +}); + + export const LIST_ARTIFACTS_API = 'LIST_ARTIFACTS_API'; export const listArtifactsApi = (runUuid, path, id = getUUID()) => { return { @@ -197,5 +225,20 @@ export class ErrorWrapper { } return "INTERNAL_SERVER_ERROR"; } + + getMessageField() { + const responseText = this.xhr.responseText; + if (responseText) { + try { + const parsed = JSON.parse(responseText); + if (parsed.error_code && parsed.message) { + return parsed.message; + } + } catch (e) { + return "INTERNAL_SERVER_ERROR"; + } + } + return "INTERNAL_SERVER_ERROR"; + } } diff --git a/mlflow/server/js/src/Routes.js b/mlflow/server/js/src/Routes.js index 0d73c28fae61e..7adc4f90a6eda 100644 --- a/mlflow/server/js/src/Routes.js +++ b/mlflow/server/js/src/Routes.js @@ -7,14 +7,17 @@ class Routes { static experimentPageRoute = "/experiments/:experimentId"; + static experimentPageSearchRoute = "/experiments/:experimentId/:searchString"; + static getRunPageRoute(experimentId, runUuid) { return `/experiments/${experimentId}/runs/${runUuid}`; } static runPageRoute = "/experiments/:experimentId/runs/:runUuid"; - static getMetricPageRoute(runUuids, metricKey, experimentId) { - return `/metric/${metricKey}?runs=${JSON.stringify(runUuids)}&experiment=${experimentId}`; + static getMetricPageRoute(runUuids, metricKey, experimentId, plotMetricKeys) { + return `/metric/${metricKey}?runs=${JSON.stringify(runUuids)}&experiment=${experimentId}` + + `&plot_metric_keys=${JSON.stringify(plotMetricKeys || [metricKey])}`; } static metricPageRoute = "/metric/:metricKey"; diff --git a/mlflow/server/js/src/components/App.css b/mlflow/server/js/src/components/App.css index 402f7952a568e..9f7191ea8b0a3 100644 --- a/mlflow/server/js/src/components/App.css +++ b/mlflow/server/js/src/components/App.css @@ -65,9 +65,8 @@ a:hover, a:focus { } .App-content { - width: 80%; - margin-right: auto; - margin-left: auto; + width: calc(100% - 128px); + margin: 0 64px 0 64px; } div.mlflow-logo { @@ -96,13 +95,14 @@ div.header-links { display: inline-block; float: right; padding-top: 21px; - padding-right: 10%; + margin-right: 64px; font-size: 16px; } h1 { - margin-top: 32px; - font-size: 24px; + margin-top: 24px; + font-size: 18px; + margin-bottom: 24px; font-weight: bold; color: #333; } @@ -112,7 +112,7 @@ h1 a, h1 a:hover, h1 a:active, h1 a:visited { } h2 { - font-size: 18px; + font-size: 16px; font-weight: normal; color: #333; } @@ -130,19 +130,15 @@ label { margin: 0; } -div.metadata { - margin-top: 32px; -} - span.metadata { - font-size: 16px; + font-size: 14px; font-weight: normal; white-space: nowrap; margin-right: 100px; } span.metadata-header { - font-size: 16px; + font-size: 14px; font-weight: normal; color: #888; margin-right: 4px; @@ -190,3 +186,8 @@ img.center { display: block; margin: 0 auto; } + +.plotly-notifier, .plotly-notifier.notifier-note { + font-family: inherit !important; + font-size: 13px !important; +} diff --git a/mlflow/server/js/src/components/App.js b/mlflow/server/js/src/components/App.js index 068980e8fde32..137f4c5aa9d54 100644 --- a/mlflow/server/js/src/components/App.js +++ b/mlflow/server/js/src/components/App.js @@ -21,9 +21,9 @@ class App extends Component { render() { return ( -
+
-
+ {process.env.HIDE_HEADER === 'true' ? null :
-
+
} @@ -52,6 +52,7 @@ class App extends Component { + diff --git a/mlflow/server/js/src/components/ArtifactView.css b/mlflow/server/js/src/components/ArtifactView.css index 912ab6a1bfe12..6c56dd3c7c1ea 100644 --- a/mlflow/server/js/src/components/ArtifactView.css +++ b/mlflow/server/js/src/components/ArtifactView.css @@ -5,7 +5,20 @@ padding-top: 8px; } +.artifact-info-link { + float: right; + width: 38px; + height: 40px; + padding-top: 5px; + font-size: 21px; +} + +.artifact-info-path { + margin-right: 48px; +} + .artifact-info-size { + margin-right: 45px; } .view-button { diff --git a/mlflow/server/js/src/components/ArtifactView.js b/mlflow/server/js/src/components/ArtifactView.js index 5f06ef582408d..729374e4f3c7d 100644 --- a/mlflow/server/js/src/components/ArtifactView.js +++ b/mlflow/server/js/src/components/ArtifactView.js @@ -10,7 +10,7 @@ import { ArtifactNode as ArtifactUtils, ArtifactNode } from '../utils/ArtifactUt import { decorators, Treebeard } from 'react-treebeard'; import bytes from 'bytes'; import './ArtifactView.css'; -import ShowArtifactPage from './artifact-view-components/ShowArtifactPage'; +import ShowArtifactPage, {getSrc} from './artifact-view-components/ShowArtifactPage'; import spinner from '../static/mlflow-spinner.png'; class ArtifactView extends Component { @@ -20,6 +20,7 @@ class ArtifactView extends Component { this.getTreebeardData = this.getTreebeardData.bind(this); this.getRealPath = this.getRealPath.bind(this); this.shouldShowTreebeard = this.shouldShowTreebeard.bind(this); + this.activeNodeIsDirectory = this.activeNodeIsDirectory.bind(this); } static propTypes = { runUuid: PropTypes.string.isRequired, @@ -52,12 +53,26 @@ class ArtifactView extends Component {
{this.state.activeNodeId ? - (
-
+
+ {!this.activeNodeIsDirectory() ? +
+ + + +
+ : + null + } +
{this.getRealPath()}
-
{this.getSize()}
-
) : +
+ {this.getSize()} +
+
+ : null }
@@ -65,7 +80,8 @@ class ArtifactView extends Component {
-
: +
+ :
@@ -83,6 +99,7 @@ class ArtifactView extends Component {
); } + onToggleTreebeard(dataNode, toggled) { const { id, loading } = dataNode; const newRequestedNodeIds = new Set(this.state.requestedNodeIds); @@ -167,6 +184,16 @@ class ArtifactView extends Component { } return bytes(0); } + + activeNodeIsDirectory() { + if (this.state.activeNodeId) { + const node = ArtifactUtils.findChild(this.props.artifactNode, this.state.activeNodeId); + return node.fileInfo.is_dir; + } else { + // No node is highlighted so we're displaying the root, which is a directory. + return true; + } + } } diff --git a/mlflow/server/js/src/components/BaggedCell.js b/mlflow/server/js/src/components/BaggedCell.js new file mode 100644 index 0000000000000..49b2107745586 --- /dev/null +++ b/mlflow/server/js/src/components/BaggedCell.js @@ -0,0 +1,87 @@ +import React, { PureComponent } from 'react'; +import PropTypes from 'prop-types'; +import { Menu, Dropdown } from 'antd'; +import classNames from 'classnames'; +import ExperimentViewUtil from "./ExperimentViewUtil"; + +const styles = { + metricParamCellContent: { + display: "inline-block", + maxWidth: 120, + }, +}; + +export default class BaggedCell extends PureComponent { + static propTypes = { + keyName: PropTypes.string.isRequired, + value: PropTypes.string.isRequired, + onSortBy: PropTypes.func.isRequired, + isParam: PropTypes.bool.isRequired, + isMetric: PropTypes.bool.isRequired, + onRemoveBagged: PropTypes.func.isRequired, + sortIcon: PropTypes.node, + }; + + handleSortAscending = () => { + const { isParam, keyName, onSortBy } = this.props; + const keyType = (isParam ? "params" : "metrics"); + const canonicalKey = ExperimentViewUtil.makeCanonicalKey(keyType, keyName); + onSortBy(canonicalKey, true); + }; + + handleSortDescending = () => { + const { isParam, keyName, onSortBy } = this.props; + const keyType = (isParam ? "params" : "metrics"); + const canonicalKey = ExperimentViewUtil.makeCanonicalKey(keyType, keyName); + onSortBy(canonicalKey, false); + }; + + handleRemoveBagged = () => { + const { isParam, keyName, onRemoveBagged } = this.props; + onRemoveBagged(isParam, keyName); + }; + + render() { + const { keyName, value, sortIcon } = this.props; + const cellClass = classNames("metric-param-content", "metric-param-cell", "BaggedCell"); + return ( + + + + Sort ascending + + + Sort descending + + + Display as a separate column + + + )} + trigger={['click']} + > + + + {sortIcon} + {keyName}: + + + {value} + + + + + ); + } +} diff --git a/mlflow/server/js/src/components/BreadcrumbTitle.css b/mlflow/server/js/src/components/BreadcrumbTitle.css index 567b739e5aaee..8c20598a66156 100644 --- a/mlflow/server/js/src/components/BreadcrumbTitle.css +++ b/mlflow/server/js/src/components/BreadcrumbTitle.css @@ -1,7 +1,3 @@ -.breadcrumb-header { - margin-bottom: 0px; -} - .breadcrumb-title { max-width: 500px; vertical-align: bottom; @@ -12,5 +8,5 @@ font-size: 75%; margin-left: 10px; margin-right: 8px; - vertical-align: 6px; + vertical-align: 4px; } diff --git a/mlflow/server/js/src/components/CollapsibleTagsCell.js b/mlflow/server/js/src/components/CollapsibleTagsCell.js new file mode 100644 index 0000000000000..925a89718f185 --- /dev/null +++ b/mlflow/server/js/src/components/CollapsibleTagsCell.js @@ -0,0 +1,58 @@ +import React from 'react'; +import PropTypes from 'prop-types'; +import Utils from '../utils/Utils'; + +// Number of tags shown when cell is collapsed +export const NUM_TAGS_ON_COLLAPSED = 3; + +export class CollapsibleTagsCell extends React.Component { + static propTypes = { + tags: PropTypes.object.isRequired, + onToggle: PropTypes.func, + }; + + state = { + collapsed: true, + }; + + handleToggleCollapse = () => { + this.setState((prevState) => ({ collapsed: !prevState.collapsed })); + if (this.props.onToggle) { + this.props.onToggle(); + } + }; + + render() { + const visibleTags = Utils.getVisibleTagValues(this.props.tags); + const tagsToDisplay = this.state.collapsed + ? visibleTags.slice(0, NUM_TAGS_ON_COLLAPSED) + : visibleTags; + return ( +
+ {tagsToDisplay.map((entry) => { + const tagName = entry[0]; + const value = entry[1]; + return ( +
+ {tagName}:{value} +
+ ); + })} + {visibleTags.length > 3 ? ( + + {this.state.collapsed + ? `${visibleTags.length - NUM_TAGS_ON_COLLAPSED} more` + : `Show less`} + + ) : null} +
+ ); + } +} + +const styles = { + tagKey: { + color: '#888', + paddingRight: 10, + } +}; diff --git a/mlflow/server/js/src/components/CollapsibleTagsCell.test.js b/mlflow/server/js/src/components/CollapsibleTagsCell.test.js new file mode 100644 index 0000000000000..c232fc988c31e --- /dev/null +++ b/mlflow/server/js/src/components/CollapsibleTagsCell.test.js @@ -0,0 +1,45 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { CollapsibleTagsCell, NUM_TAGS_ON_COLLAPSED } from './CollapsibleTagsCell'; +import _ from 'lodash'; + +describe('unit tests', () => { + let wrapper; + let instance; + let minimalProps; + + const setupProps = (numTags) => { + const tags = []; + _.range(numTags).forEach((n) => { + tags[`tag${n}`] = { getKey: () => `tag${n}`, getValue: () => `value${n}` }; + }); + return { tags, onToggle: jest.fn() }; + }; + + beforeEach(() => { + minimalProps = setupProps(NUM_TAGS_ON_COLLAPSED + 1); + }); + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); + + test('should not show toggle link for less than NUM_TAGS_ON_COLLAPSED tags', () => { + // Assume we have exactly `NUM_TAGS_ON_COLLAPSED` tags + const props = setupProps(NUM_TAGS_ON_COLLAPSED); + wrapper = shallow(); + instance = wrapper.instance(); + expect(wrapper.find('.tag-cell-toggle-link')).toHaveLength(0); + }); + + test('toggle link should work as expected', () => { + const numTags = NUM_TAGS_ON_COLLAPSED + 2; + const props = setupProps(numTags); + wrapper = shallow(); + instance = wrapper.instance(); + expect(wrapper.find('.tag-cell-item')).toHaveLength(NUM_TAGS_ON_COLLAPSED); + instance.setState({ collapsed: false }); + expect(wrapper.find('.tag-cell-item')).toHaveLength(numTags); + }); +}); diff --git a/mlflow/server/js/src/components/CompareRunScatter.css b/mlflow/server/js/src/components/CompareRunScatter.css index 9e46d3e2627e5..17e80f727e886 100644 --- a/mlflow/server/js/src/components/CompareRunScatter.css +++ b/mlflow/server/js/src/components/CompareRunScatter.css @@ -1,44 +1,5 @@ -.scatter-tooltip { - width: 340px; +.scatter-plotly { + width: 100%; + height: 100%; + min-height: 35vw; } - -.scatter-tooltip h3 { - font-size: 105%; - color: #888; -} - -.scatter-tooltip h4 { - font-size: 105%; - color: #888; - margin: 0; -} - -.scatter-tooltip ul { - margin: 0; - padding: 0; -} - -.scatter-tooltip ul li { - display: block; - margin: 0; - padding: 0 0 0 0; - text-indent: 0; -} - -.scatter-tooltip ul li .value { - display: inline-block; - max-width: 100%; - text-overflow: ellipsis; - white-space: nowrap; - overflow: hidden; - text-indent: 0; - vertical-align: top; -} - -.recharts-responsive-container { - padding-bottom: 10px; -} - -.recharts-responsive-container svg { - overflow: visible; -} \ No newline at end of file diff --git a/mlflow/server/js/src/components/CompareRunScatter.js b/mlflow/server/js/src/components/CompareRunScatter.js index f19b8994b8918..85ccb8d640b8f 100644 --- a/mlflow/server/js/src/components/CompareRunScatter.js +++ b/mlflow/server/js/src/components/CompareRunScatter.js @@ -1,4 +1,6 @@ import React, { Component } from 'react'; +import { AllHtmlEntities } from 'html-entities'; +import Plot from 'react-plotly.js'; import PropTypes from 'prop-types'; import { getParams, getRunInfo } from '../reducers/Reducers'; import { connect } from 'react-redux'; @@ -6,16 +8,6 @@ import './CompareRunView.css'; import { RunInfo } from '../sdk/MlflowMessages'; import Utils from '../utils/Utils'; import { getLatestMetrics } from '../reducers/MetricReducer'; -import { - ScatterChart, - Scatter, - XAxis, - YAxis, - CartesianGrid, - Tooltip, - ResponsiveContainer, - Label, -} from 'recharts'; import './CompareRunScatter.css'; import CompareRunUtil from './CompareRunUtil'; @@ -27,13 +19,17 @@ class CompareRunScatter extends Component { runDisplayNames: PropTypes.arrayOf(String).isRequired, }; + // Size limits for displaying keys and values in our plot axes and tooltips + static MAX_PLOT_KEY_LENGTH = 40; + static MAX_PLOT_VALUE_LENGTH = 60; + constructor(props) { super(props); - this.renderTooltip = this.renderTooltip.bind(this); + this.entities = new AllHtmlEntities(); - this.metricKeys = CompareRunUtil.getKeys(this.props.metricLists, true); - this.paramKeys = CompareRunUtil.getKeys(this.props.paramLists, true); + this.metricKeys = CompareRunUtil.getKeys(this.props.metricLists, false); + this.paramKeys = CompareRunUtil.getKeys(this.props.paramLists, false); if (this.paramKeys.length + this.metricKeys.length < 2) { this.state = {disabled: true}; @@ -69,12 +65,23 @@ class CompareRunScatter extends Component { return value === undefined ? value : value.value; } + /** + * Encode HTML entities in a string (since Plotly's tooltips take HTML) + */ + encodeHtml(str) { + return this.entities.encode(str); + } + render() { if (this.state.disabled) { - return
; + return
; } - const scatterData = []; + const keyLength = CompareRunScatter.MAX_PLOT_KEY_LENGTH; + + const xs = []; + const ys = []; + const tooltips = []; this.props.runInfos.forEach((_, index) => { const x = this.getValue(index, this.state.x); @@ -82,11 +89,12 @@ class CompareRunScatter extends Component { if (x === undefined || y === undefined) { return; } - scatterData.push({index, x: +x, y: +y}); + xs.push(x); + ys.push(y); + tooltips.push(this.getPlotlyTooltip(index)); }); - return (
-

Scatter Plot

+ return (
@@ -100,43 +108,54 @@ class CompareRunScatter extends Component {
- - - - {this.renderAxisLabel('x')} - - - {this.renderAxisLabel('y')} - - - - - - +
); } - renderAxisLabel(axis) { - const key = this.state[axis]; - return (
- - + + + + + + + +
); } diff --git a/mlflow/server/js/src/components/EditableTagsTableView.js b/mlflow/server/js/src/components/EditableTagsTableView.js new file mode 100644 index 0000000000000..339c5915effb7 --- /dev/null +++ b/mlflow/server/js/src/components/EditableTagsTableView.js @@ -0,0 +1,136 @@ +import React from 'react'; +import { connect } from 'react-redux'; +import Utils from '../utils/Utils'; +import PropTypes from 'prop-types'; +import { Form, Input, Button, message } from 'antd'; +import { getUUID, setTagApi } from '../Actions'; +import { EditableFormTable } from './tables/EditableFormTable'; +import _ from 'lodash'; + +export class EditableTagsTableView extends React.Component { + static propTypes = { + runUuid: PropTypes.string.isRequired, + tags: PropTypes.object.isRequired, + form: PropTypes.object.isRequired, + setTagApi: PropTypes.func.isRequired, + }; + + state = { isRequestPending: false }; + + tableColumns = [ + { + title: 'Name', + dataIndex: 'name', + width: 200, + }, + { + title: 'Value', + dataIndex: 'value', + width: 200, + editable: true, + } + ]; + + requestId = getUUID(); + + getData = () => _.sortBy(Utils.getVisibleTagValues(this.props.tags).map((values) => ({ + key: values[0], + name: values[0], + value: values[1], + })), 'name'); + + getTagNamesAsSet = () => new Set( + Utils.getVisibleTagValues(this.props.tags).map((values) => values[0]) + ); + + handleAddTag = (e) => { + e.preventDefault(); + const { form, runUuid, setTagApi: setTag } = this.props; + form.validateFields((err, values) => { + if (!err) { + this.setState({ isRequestPending: true }); + setTag(runUuid, values.name, values.value, this.requestId) + .then(() => { + this.setState({ isRequestPending: false }); + form.resetFields(); + }) + .catch((ex) => { + this.setState({ isRequestPending: false }); + console.error(ex); + message.error('Failed to add tag. Error: ' + ex.getUserVisibleError()); + }); + } + }); + }; + + handleSaveEdit = ({ name, value }) => { + const { runUuid, setTagApi: setTag } = this.props; + return setTag(runUuid, name, value, this.requestId) + .catch((ex) => { + console.error(ex); + message.error('Failed to set tag. Error: ' + ex.getUserVisibleError()); + }); + }; + + tagNameValidator = (rule, value, callback) => { + const tagNamesSet = this.getTagNamesAsSet(); + callback(tagNamesSet.has(value) ? `Tag "${value}" already exists.` : undefined); + }; + + render() { + const { form } = this.props; + const { getFieldDecorator } = form; + const { isRequestPending } = this.state; + + return ( +
+ +
+

Add Tag

+
+ + {getFieldDecorator('name', { + rules: [ + { required: true, message: 'Name is required.'}, + { validator: this.tagNameValidator }, + ], + })( + + )} + + + {getFieldDecorator('value', { + rules: [{ required: true, message: 'Value is required.'}] + })( + + )} + + + + +
+
+
+ ); + } +} + +const styles = { + addTagForm: { + wrapper: { marginLeft: 7 }, + label: { + marginTop: 20, + }, + form: { marginBottom: 20 }, + nameInput: { width: 186 }, + valueInput: { width: 186 }, + } +}; + +const mapDispatchToProps = { setTagApi }; + +export default connect(undefined, mapDispatchToProps)(Form.create()(EditableTagsTableView)); diff --git a/mlflow/server/js/src/components/EditableTagsTableView.test.js b/mlflow/server/js/src/components/EditableTagsTableView.test.js new file mode 100644 index 0000000000000..b7057f3a4e8a3 --- /dev/null +++ b/mlflow/server/js/src/components/EditableTagsTableView.test.js @@ -0,0 +1,31 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { EditableTagsTableView } from './EditableTagsTableView'; + +describe('unit tests', () => { + let wrapper; + let instance; + const minimalProps = { + runUuid: 'runUuid1', + tags: { + tag1: { getKey: () => 'tag1', getValue: () => 'value1' }, + tag2: { getKey: () => 'tag2', getValue: () => 'value2' }, + }, + // eslint-disable-next-line no-unused-vars + form: { getFieldDecorator: jest.fn(opts => c => c) }, + setTagApi: () => {}, + }; + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); + + test('should validate tag name properly', () => { + wrapper = shallow(); + instance = wrapper.instance(); + const validationCallback = jest.fn(); + instance.tagNameValidator(undefined, 'tag1', validationCallback); + expect(validationCallback).toBeCalledWith('Tag "tag1" already exists.'); + }); +}); diff --git a/mlflow/server/js/src/components/EmptyIfClosedMenu.js b/mlflow/server/js/src/components/EmptyIfClosedMenu.js new file mode 100644 index 0000000000000..f91e5a48ae93d --- /dev/null +++ b/mlflow/server/js/src/components/EmptyIfClosedMenu.js @@ -0,0 +1,27 @@ +import React, { Component } from 'react'; +import PropTypes from 'prop-types'; +import { Dropdown } from 'react-bootstrap'; +import RootCloseWrapper from 'react-overlays/lib/RootCloseWrapper'; + + +export default class EmptyIfClosedMenu extends Component { + static propTypes = { + children: PropTypes.array.isRequired, + open: PropTypes.bool, + onClose: PropTypes.func, + }; + + render() { + const {children, open, onClose, ...props} = this.props; + if (!open) { + return null; + } + return ( + + + {children} + + + ); + } +} diff --git a/mlflow/server/js/src/components/ExperimentListView.css b/mlflow/server/js/src/components/ExperimentListView.css index 226fdfc6f2684..b9e1045580fcb 100644 --- a/mlflow/server/js/src/components/ExperimentListView.css +++ b/mlflow/server/js/src/components/ExperimentListView.css @@ -17,9 +17,9 @@ overflow:hidden; text-overflow: ellipsis; white-space: nowrap; - font-size: 16px; - height: 40px; - line-height: 40px; + font-size: 14px; + height: 32px; + line-height: 32px; padding-left: 12px; } @@ -27,6 +27,7 @@ font-weight: normal; display: inline-block; padding-bottom: 6px; + margin-bottom: 8px; } .collapser-container { @@ -44,6 +45,6 @@ width: 24px; height: 24px; text-align: center; - margin-left: 68px; + margin-left: 96px; cursor: pointer; } diff --git a/mlflow/server/js/src/components/ExperimentListView.js b/mlflow/server/js/src/components/ExperimentListView.js index 7ebde9f63ed6d..8e5767591450e 100644 --- a/mlflow/server/js/src/components/ExperimentListView.js +++ b/mlflow/server/js/src/components/ExperimentListView.js @@ -7,10 +7,11 @@ import { Experiment } from '../sdk/MlflowMessages'; import Routes from '../Routes'; import { Link } from 'react-router-dom'; -class ExperimentListView extends Component { +export class ExperimentListView extends Component { static propTypes = { onClickListExperiments: PropTypes.func.isRequired, - activeExperimentId: PropTypes.number.isRequired, + // If activeExperimentId is undefined, then the active experiment is the first one. + activeExperimentId: PropTypes.number, experiments: PropTypes.arrayOf(Experiment).isRequired, }; @@ -44,8 +45,13 @@ class ExperimentListView extends Component { className="collapser fa fa-chevron-left login-icon"/>
- {this.props.experiments.map((e) => { - const active = parseInt(e.getExperimentId(), 10) === this.props.activeExperimentId; + {this.props.experiments.map((e, idx) => { + let active; + if (this.props.activeExperimentId) { + active = parseInt(e.getExperimentId(), 10) === this.props.activeExperimentId; + } else { + active = idx === 0; + } let className = "experiment-list-item"; if (active) { className = `${className} active-experiment-list-item`; diff --git a/mlflow/server/js/src/components/ExperimentListView.test.js b/mlflow/server/js/src/components/ExperimentListView.test.js new file mode 100644 index 0000000000000..bfd6904dfb8e7 --- /dev/null +++ b/mlflow/server/js/src/components/ExperimentListView.test.js @@ -0,0 +1,21 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { ExperimentListView } from './ExperimentListView'; +import Fixtures from '../test-utils/Fixtures'; + +test('If activeExperimentId is defined then choose that one', () => { + const wrapper = shallow( {}} + experiments={Fixtures.experiments} + activeExperimentId={1} + />); + expect(wrapper.find('.active-experiment-list-item').first().prop('title')).toEqual('Test'); +}); + +test('If activeExperimentId is undefined then choose first experiment', () => { + const wrapper = shallow( {}} + experiments={Fixtures.experiments} + />); + expect(wrapper.find('.active-experiment-list-item').first().prop('title')).toEqual('Default'); +}); diff --git a/mlflow/server/js/src/components/ExperimentPage.js b/mlflow/server/js/src/components/ExperimentPage.js index 1391caf4fe531..db79eb5e03131 100644 --- a/mlflow/server/js/src/components/ExperimentPage.js +++ b/mlflow/server/js/src/components/ExperimentPage.js @@ -1,113 +1,282 @@ import React, { Component } from 'react'; import './ExperimentPage.css'; import PropTypes from 'prop-types'; -import { getExperimentApi, getUUID, searchRunsApi } from '../Actions'; +import { + getExperimentApi, + getUUID, + searchRunsApi, + loadMoreRunsApi, +} from '../Actions'; import { connect } from 'react-redux'; import ExperimentView from './ExperimentView'; import RequestStateWrapper from './RequestStateWrapper'; import KeyFilter from '../utils/KeyFilter'; import { ViewType } from '../sdk/MlflowEnums'; - +import { ExperimentPagePersistedState } from "../sdk/MlflowLocalStorageMessages"; +import Utils from "../utils/Utils"; +import ErrorCodes from "../sdk/ErrorCodes"; +import PermissionDeniedView from "./PermissionDeniedView"; +import {Spinner} from "./Spinner"; +import { withRouter } from 'react-router-dom'; export const LIFECYCLE_FILTER = { ACTIVE: 'Active', DELETED: 'Deleted' }; -class ExperimentPage extends Component { +export class ExperimentPage extends Component { constructor(props) { super(props); - this.onSearch = this.onSearch.bind(this); - this.getRequestIds = this.getRequestIds.bind(this); + const urlState = Utils.getSearchParamsFromUrl(props.location.search); + this.state = { + ...ExperimentPage.getDefaultUnpersistedState(), + persistedState: { + paramKeyFilterString: urlState.params === undefined ? "" : urlState.params, + metricKeyFilterString: urlState.metrics === undefined ? "" : urlState.metrics, + searchInput: urlState.search === undefined ? "" : urlState.search, + orderByKey: urlState.orderByKey === undefined ? null : urlState.orderByKey, + orderByAsc: urlState.orderByAsc === undefined ? true : urlState.orderByAsc === "true", + }, + nextPageToken: null, + loadingMore: false, + }; + } + + getExperimentRequestId = getUUID(); + searchRunsRequestId = getUUID(); + loadMoreRunsRequestId = getUUID(); + + loadData() { + const { persistedState, lifecycleFilter } = this.state; + const { experimentId } = this.props; + const { orderByKey, orderByAsc, searchInput } = persistedState; + const orderBy = ExperimentPage.getOrderByExpr(orderByKey, orderByAsc); + const viewType = lifecycleFilterToRunViewType(lifecycleFilter); + + this.props.getExperimentApi(experimentId, this.getExperimentRequestId); + this.props + .searchRunsApi([experimentId], searchInput, viewType, orderBy, this.searchRunsRequestId) + .then(this.updateNextPageToken) + .catch((e) => { + Utils.logErrorAndNotifyUser(e); + this.setState({ nextPageToken: null, loadingMore: false }); + }); } + updateNextPageToken = (response = {}) => { + const { value } = response; + let nextPageToken = null; + if (value && value.next_page_token) { + nextPageToken = value.next_page_token; + } + this.setState({ nextPageToken, loadingMore: false }); + }; + + handleLoadMoreRuns = () => { + const { experimentId } = this.props; + const { persistedState, lifecycleFilter, nextPageToken } = this.state; + const { orderByKey, orderByAsc, searchInput } = persistedState; + const orderBy = ExperimentPage.getOrderByExpr(orderByKey, orderByAsc); + const viewType = lifecycleFilterToRunViewType(lifecycleFilter); + this.setState({ loadingMore: true }); + this.props + .loadMoreRunsApi( + [experimentId], + searchInput, + viewType, + orderBy, + nextPageToken, + this.loadMoreRunsRequestId, + ) + .then(this.updateNextPageToken) + .catch((e) => { + Utils.logErrorAndNotifyUser(e); + this.setState({ nextPageToken: null, loadingMore: false }); + }); + }; + static propTypes = { experimentId: PropTypes.number.isRequired, - dispatchSearchRuns: PropTypes.func.isRequired, + getExperimentApi: PropTypes.func.isRequired, + searchRunsApi: PropTypes.func.isRequired, + loadMoreRunsApi: PropTypes.func.isRequired, + history: PropTypes.object.isRequired, + location: PropTypes.object, }; - state = { - paramKeyFilter: new KeyFilter(), - metricKeyFilter: new KeyFilter(), - getExperimentRequestId: getUUID(), - searchRunsRequestId: getUUID(), - searchInput: '', - lastExperimentId: undefined, - lifecycleFilter: LIFECYCLE_FILTER.ACTIVE, - }; + /** Returns default values for state attributes that aren't persisted in the URL. */ + static getDefaultUnpersistedState() { + return { + // Last experiment, if any, displayed by this instance of ExperimentPage + lastExperimentId: undefined, + // Lifecycle filter of runs to display + lifecycleFilter: LIFECYCLE_FILTER.ACTIVE, + }; + } + + componentDidMount() { + this.loadData(); + } + + componentDidUpdate(prevProps) { + this.maybeReloadData(prevProps); + } static getDerivedStateFromProps(props, state) { if (props.experimentId !== state.lastExperimentId) { - const newState = { - paramKeyFilter: new KeyFilter(), - metricKeyFilter: new KeyFilter(), - getExperimentRequestId: getUUID(), - searchRunsRequestId: getUUID(), - searchInput: '', + return { + ...ExperimentPage.getDefaultUnpersistedState(), + persistedState: state.lastExperimentId === undefined ? + state.persistedState : (new ExperimentPagePersistedState()).toJSON(), lastExperimentId: props.experimentId, lifecycleFilter: LIFECYCLE_FILTER.ACTIVE, }; - props.dispatch(getExperimentApi(props.experimentId, newState.getExperimentRequestId)); - props.dispatch(searchRunsApi( - [props.experimentId], - [], - lifecycleFilterToRunViewType(newState.lifecycleFilter), - newState.searchRunsRequestId)); - return newState; } return null; } - onSearch(paramKeyFilter, metricKeyFilter, andedExpressions, searchInput, lifecycleFilterInput) { + maybeReloadData(prevProps) { + if (this.props.experimentId !== prevProps.experimentId) { + this.loadData(); + } + } + + onSearch = ( + paramKeyFilterString, + metricKeyFilterString, + searchInput, + lifecycleFilterInput, + orderByKey, + orderByAsc + ) => { this.setState({ - paramKeyFilter, - metricKeyFilter, + persistedState: new ExperimentPagePersistedState({ + paramKeyFilterString, + metricKeyFilterString, + searchInput, + orderByKey, + orderByAsc, + }).toJSON(), + lifecycleFilter: lifecycleFilterInput, + }); + + const orderBy = ExperimentPage.getOrderByExpr(orderByKey, orderByAsc); + this.props + .searchRunsApi( + [this.props.experimentId], + searchInput, + lifecycleFilterToRunViewType(lifecycleFilterInput), + orderBy, + this.searchRunsRequestId, + ) + .then(this.updateNextPageToken) + .catch((e) => { + Utils.logErrorAndNotifyUser(e); + this.setState({ nextPageToken: null, loadingMore: false }); + }); + + this.updateUrlWithSearchFilter({ + paramKeyFilterString, + metricKeyFilterString, searchInput, - lifecycleFilter: lifecycleFilterInput + orderByKey, + orderByAsc, }); - const searchRunsRequestId = this.props.dispatchSearchRuns( - this.props.experimentId, andedExpressions, lifecycleFilterInput); - this.setState({ searchRunsRequestId }); + }; + + static getOrderByExpr(orderByKey, orderByAsc) { + let orderBy = []; + if (orderByKey) { + if (orderByAsc) { + orderBy = [orderByKey + " ASC"]; + } else { + orderBy = [orderByKey + " DESC"]; + } + } + return orderBy; + } + + updateUrlWithSearchFilter( + {paramKeyFilterString, metricKeyFilterString, searchInput, orderByKey, orderByAsc}) { + const state = {}; + if (paramKeyFilterString) { + state['params'] = paramKeyFilterString; + } + if (metricKeyFilterString) { + state['metrics'] = metricKeyFilterString; + } + if (searchInput) { + state['search'] = searchInput; + } + if (orderByKey) { + state['orderByKey'] = orderByKey; + } + // orderByAsc defaults to true, so only encode it if it is false. + if (orderByAsc === false) { + state['orderByAsc'] = orderByAsc; + } + const newUrl = `/experiments/${this.props.experimentId}` + + `/s?${Utils.getSearchUrlFromState(state)}`; + if (newUrl !== (this.props.history.location.pathname + + this.props.history.location.search)) { + this.props.history.push(newUrl); + } } render() { return ( -
- - +
+ + {(isLoading, shouldRenderError, requests) => { + let searchRunsError; + const getExperimentRequest = Utils.getRequestWithId( + requests, this.getExperimentRequestId); + if (shouldRenderError) { + const searchRunsRequest = Utils.getRequestWithId( + requests, this.searchRunsRequestId); + if (searchRunsRequest.error) { + searchRunsError = searchRunsRequest.error.getMessageField(); + } else if (getExperimentRequest.error.getErrorCode() === + ErrorCodes.PERMISSION_DENIED) { + return (); + } else { + return undefined; + } + } + if (!getExperimentRequest || getExperimentRequest.active) { + return ; + } + + return ; + }}
); } getRequestIds() { - return [this.state.getExperimentRequestId, this.state.searchRunsRequestId]; + return [this.getExperimentRequestId, this.searchRunsRequestId]; } } -const mapStateToProps = (state, ownProps) => { - const { match } = ownProps; - if (match.url === "/") { - return { experimentId: 0 }; - } - return { experimentId: parseInt(match.params.experimentId, 10) }; -}; - -const mapDispatchToProps = (dispatch) => { - return { - dispatch, - dispatchSearchRuns: (experimentId, andedExpressions, lifecycleFilterInput) => { - const requestId = getUUID(); - dispatch(searchRunsApi([experimentId], andedExpressions, - lifecycleFilterToRunViewType(lifecycleFilterInput), requestId)); - return requestId; - } - }; +const mapDispatchToProps = { + getExperimentApi, + searchRunsApi, + loadMoreRunsApi, }; const lifecycleFilterToRunViewType = (lifecycleFilter) => { @@ -118,4 +287,4 @@ const lifecycleFilterToRunViewType = (lifecycleFilter) => { } }; -export default connect(mapStateToProps, mapDispatchToProps)(ExperimentPage); +export default withRouter(connect(undefined, mapDispatchToProps)(ExperimentPage)); diff --git a/mlflow/server/js/src/components/ExperimentPage.test.js b/mlflow/server/js/src/components/ExperimentPage.test.js new file mode 100644 index 0000000000000..fca53c880848e --- /dev/null +++ b/mlflow/server/js/src/components/ExperimentPage.test.js @@ -0,0 +1,138 @@ +import React from 'react'; +import qs from 'qs'; +import { shallow } from 'enzyme'; +import { ExperimentPage } from './ExperimentPage'; +import { ViewType } from '../sdk/MlflowEnums'; + + +const BASE_PATH = "/experiments/17/s"; +const EXPERIMENT_ID = 17; + +let searchRunsApi; +let getExperimentApi; +let loadMoreRunsApi; +let history; +let location; + +beforeEach(() => { + searchRunsApi = jest.fn(() => Promise.resolve()); + getExperimentApi = jest.fn(() => Promise.resolve()); + loadMoreRunsApi = jest.fn(() => Promise.resolve()); + location = {}; + + history = {}; + history.location = {}; + history.location.pathname = BASE_PATH; + history.location.search = ""; + history.push = jest.fn(); +}); + +const getExperimentPageMock = () => { + return shallow(); +}; + +function expectSearchState(historyEntry, state) { + const expectedPrefix = BASE_PATH + "?"; + expect(historyEntry.startsWith(expectedPrefix)).toBe(true); + const search = historyEntry.substring(expectedPrefix.length); + const parsedHistory = qs.parse(search); + expect(parsedHistory).toEqual(state); +} + +test('URL is empty for blank search', () => { + const wrapper = getExperimentPageMock(); + wrapper.instance().onSearch("", "", "", "Active", null, true); + expectSearchState(history.push.mock.calls[0][0], {}); + const searchRunsCall = searchRunsApi.mock.calls[1]; + expect(searchRunsCall[0]).toEqual([EXPERIMENT_ID]); + expect(searchRunsCall[1]).toEqual(""); + expect(searchRunsCall[2]).toEqual(ViewType.ACTIVE_ONLY); + expect(searchRunsCall[3]).toEqual([]); +}); + +test('URL can encode a complete search', () => { + const wrapper = getExperimentPageMock(); + wrapper.instance().onSearch("key_filter", "metric0, metric1", "metrics.metric0 > 3", + "Deleted", null, true); + expectSearchState(history.push.mock.calls[0][0], { + "metrics": "metric0, metric1", + "params": "key_filter", + "search": "metrics.metric0 > 3" + }); + const searchRunsCall = searchRunsApi.mock.calls[1]; + expect(searchRunsCall[1]).toEqual("metrics.metric0 > 3"); + expect(searchRunsCall[2]).toEqual(ViewType.DELETED_ONLY); +}); + +test('URL can encode order_by', () => { + const wrapper = getExperimentPageMock(); + wrapper.instance().onSearch("key_filter", "metric0, metric1", "", + "Active", "my_key", false); + expectSearchState(history.push.mock.calls[0][0], { + "metrics": "metric0, metric1", + "params": "key_filter", + "orderByKey": "my_key", + "orderByAsc": "false", + }); + const searchRunsCall = searchRunsApi.mock.calls[1]; + expect(searchRunsCall[1]).toEqual(""); + expect(searchRunsCall[3]).toEqual(["my_key DESC"]); +}); + +test('Loading state without any URL params', () => { + const wrapper = getExperimentPageMock(); + const state = wrapper.instance().state; + expect(state.persistedState.paramKeyFilterString).toEqual(""); + expect(state.persistedState.metricKeyFilterString).toEqual(""); + expect(state.persistedState.searchInput).toEqual(""); + expect(state.persistedState.orderByKey).toBe(null); + expect(state.persistedState.orderByAsc).toEqual(true); +}); + +test('Loading state with all URL params', () => { + location.search = "params=a&metrics=b&search=c&orderByKey=d&orderByAsc=false"; + const wrapper = getExperimentPageMock(); + const state = wrapper.instance().state; + expect(state.persistedState.paramKeyFilterString).toEqual("a"); + expect(state.persistedState.metricKeyFilterString).toEqual("b"); + expect(state.persistedState.searchInput).toEqual("c"); + expect(state.persistedState.orderByKey).toEqual("d"); + expect(state.persistedState.orderByAsc).toEqual(false); +}); + +test('should update next page token initially', () => { + const promise = Promise.resolve({ value: { next_page_token: 'token_1' } }); + searchRunsApi = jest.fn(() => promise); + const wrapper = getExperimentPageMock(); + const instance = wrapper.instance(); + return promise.then(() => expect(instance.state.nextPageToken).toBe('token_1')); +}); + +test('should update next page token after load-more', () => { + const promise = Promise.resolve({ value: { next_page_token: 'token_1' } }); + loadMoreRunsApi = jest.fn(() => promise); + const wrapper = getExperimentPageMock(); + const instance = wrapper.instance(); + instance.handleLoadMoreRuns(); + return promise.then(() => expect(instance.state.nextPageToken).toBe('token_1')); +}); + +test('should update next page token to null when load-more response has no token', () => { + const promise1 = Promise.resolve({ value: { next_page_token: 'token_1' } }); + const promise2 = Promise.resolve({ value: {} }); + searchRunsApi = jest.fn(() => promise1); + loadMoreRunsApi = jest.fn(() => promise2); + const wrapper = getExperimentPageMock(); + const instance = wrapper.instance(); + instance.handleLoadMoreRuns(); + return Promise.all([promise1, promise2]).then(() => + expect(instance.state.nextPageToken).toBe(null), + ); +}); diff --git a/mlflow/server/js/src/components/ExperimentRunsTableCompactView.js b/mlflow/server/js/src/components/ExperimentRunsTableCompactView.js index 691ac0311ec37..c30850a438c7a 100644 --- a/mlflow/server/js/src/components/ExperimentRunsTableCompactView.js +++ b/mlflow/server/js/src/components/ExperimentRunsTableCompactView.js @@ -1,12 +1,24 @@ -import React, { Component } from 'react'; +import React from 'react'; +import { connect } from 'react-redux'; import PropTypes from 'prop-types'; -import Table from 'react-bootstrap/es/Table'; import ExperimentViewUtil from "./ExperimentViewUtil"; import { RunInfo } from '../sdk/MlflowMessages'; import classNames from 'classnames'; import { Dropdown, MenuItem } from 'react-bootstrap'; import ExperimentRunsSortToggle from './ExperimentRunsSortToggle'; -import Utils from '../utils/Utils'; +import BaggedCell from "./BaggedCell"; +import { CellMeasurer, CellMeasurerCache, AutoSizer, Column, Table } from 'react-virtualized'; +import _ from 'lodash'; +import { LoadMoreBar } from './LoadMoreBar'; + +import 'react-virtualized/styles.css'; + +export const NUM_RUN_METADATA_COLS = 8; +const TABLE_HEADER_HEIGHT = 40; +const UNBAGGED_COL_WIDTH = 125; +const BAGGED_COL_WIDTH = 250; +const BORDER_STYLE = "1px solid #e2e2e2"; +const LOAD_MORE_ROW_HEIGHT = 37; const styles = { sortArrow: { @@ -25,17 +37,41 @@ const styles = { display: "inline-block", maxWidth: 120, }, + metricParamNameContainer: { + verticalAlign: "middle", + display: "inline-block", + overflow: "hidden" + }, + unbaggedMetricParamColHeader: { + verticalAlign: "middle", + maxWidth: UNBAGGED_COL_WIDTH, + textOverflow: "ellipsis", + whiteSpace: "nowrap", + padding: "8px 0px 8px 8px", + height: "100%" + }, + columnStyle: { + display: "flex", + alignItems: "flex-start", + }, + baggedCellContainer: { + whiteSpace: 'normal' + }, }; /** * Compact table view for displaying runs associated with an experiment. Renders metrics/params in * a single table cell per run (as opposed to one cell per metric/param). */ -class ExperimentRunsTableCompactView extends Component { +class ExperimentRunsTableCompactView extends React.Component { constructor(props) { super(props); - this.onHover = this.onHover.bind(this); this.getRow = this.getRow.bind(this); + this.tableRef = React.createRef(); + this.state = { + expanding: false, + isAtScrollBottom: false, + }; } static propTypes = { @@ -44,8 +80,6 @@ class ExperimentRunsTableCompactView extends Component { paramsList: PropTypes.arrayOf(Array).isRequired, // List of list of metrics in all the visible runs metricsList: PropTypes.arrayOf(Array).isRequired, - paramKeyList: PropTypes.arrayOf(PropTypes.string).isRequired, - metricKeyList: PropTypes.arrayOf(PropTypes.string).isRequired, // List of tags dictionary in all the visible runs. tagsList: PropTypes.arrayOf(Object).isRequired, // Function which takes one parameter (runId) @@ -54,264 +88,536 @@ class ExperimentRunsTableCompactView extends Component { onExpand: PropTypes.func.isRequired, isAllChecked: PropTypes.bool.isRequired, onSortBy: PropTypes.func.isRequired, - sortState: PropTypes.object.isRequired, + orderByKey: PropTypes.string, + orderByAsc: PropTypes.bool.isRequired, runsSelected: PropTypes.object.isRequired, runsExpanded: PropTypes.object.isRequired, - setSortByHandler: PropTypes.func.isRequired, - }; + paramKeyList: PropTypes.arrayOf(String).isRequired, + metricKeyList: PropTypes.arrayOf(String).isRequired, + metricRanges: PropTypes.object.isRequired, + // Handler for adding a metric or parameter to the set of bagged columns. All bagged metrics + // are displayed in a single column, while each unbagged metric has its own column. Similar + // logic applies for params. + onAddBagged: PropTypes.func.isRequired, + // Handler for removing a metric or parameter from the set of bagged columns. + onRemoveBagged: PropTypes.func.isRequired, + // Array of keys corresponding to unbagged params + unbaggedParams: PropTypes.arrayOf(String).isRequired, + // Array of keys corresponding to unbagged metrics + unbaggedMetrics: PropTypes.arrayOf(String).isRequired, - state = { - hoverState: {isMetric: false, isParam: false, key: ""}, + nextPageToken: PropTypes.string, + handleLoadMoreRuns: PropTypes.func.isRequired, + loadingMore: PropTypes.bool.isRequired, }; - onHover({isParam, isMetric, key}) { - this.setState({ hoverState: {isParam, isMetric, key} }); - } + /** Returns a row of table content (i.e. a non-header row) corresponding to a single run. */ getRow({ idx, isParent, hasExpander, expanderOpen, childrenIds }) { const { runInfos, paramsList, metricsList, - paramKeyList, - metricKeyList, onCheckbox, - sortState, + orderByKey, + orderByAsc, runsSelected, tagsList, - setSortByHandler, + onSortBy, onExpand, + paramKeyList, + metricKeyList, + metricRanges, + unbaggedMetrics, + unbaggedParams, + onRemoveBagged, } = this.props; - const hoverState = this.state.hoverState; - const runInfo = runInfos[idx]; const paramsMap = ExperimentViewUtil.toParamsMap(paramsList[idx]); const metricsMap = ExperimentViewUtil.toMetricsMap(metricsList[idx]); + const runInfo = runInfos[idx]; const selected = runsSelected[runInfo.run_uuid] === true; const rowContents = [ - ExperimentViewUtil.getCheckboxForRow(selected, () => onCheckbox(runInfo.run_uuid)), + ExperimentViewUtil.getCheckboxForRow(selected, () => onCheckbox(runInfo.run_uuid), "div"), ExperimentViewUtil.getExpander( - hasExpander, expanderOpen, () => onExpand(runInfo.run_uuid, childrenIds)), + hasExpander, expanderOpen, () => { + onExpand(runInfo.run_uuid, childrenIds); + this.setState({ expanding: true }); + }, runInfo.run_uuid, "div") ]; - ExperimentViewUtil.getRunInfoCellsForRow(runInfo, tagsList[idx], isParent) - .forEach((col) => rowContents.push(col)); - const filteredParamKeys = paramKeyList.filter((paramKey) => paramsMap[paramKey] !== undefined); - const paramsCellContents = filteredParamKeys.map((paramKey) => { - const cellClass = classNames("metric-param-content", - { highlighted: hoverState.isParam && hoverState.key === paramKey }); + ExperimentViewUtil.getRunInfoCellsForRow( + runInfo, + tagsList[idx], + isParent, + "div", + this.handleCellToggle, + ).forEach((col) => rowContents.push(col)); + + const unbaggedParamSet = new Set(unbaggedParams); + const unbaggedMetricSet = new Set(unbaggedMetrics); + const baggedParams = paramKeyList.filter((paramKey) => + !unbaggedParamSet.has(paramKey) && paramsMap[paramKey] !== undefined); + const baggedMetrics = metricKeyList.filter((metricKey) => + !unbaggedMetricSet.has(metricKey) && metricsMap[metricKey] !== undefined); + + // Add params (unbagged, then bagged) + unbaggedParams.forEach((paramKey) => { + rowContents.push(ExperimentViewUtil.getUnbaggedParamCell(paramKey, paramsMap, "div")); + }); + // Add bagged params + const paramsCellContents = baggedParams.map((paramKey) => { const keyname = "param-" + paramKey; - const sortIcon = ExperimentViewUtil.getSortIcon(sortState, true, false, paramKey); + const sortIcon = ExperimentViewUtil.getSortIcon(orderByKey, orderByAsc, + ExperimentViewUtil.makeCanonicalKey("params", paramKey)); + return (); + }); + if (this.shouldShowBaggedColumn(true)) { + rowContents.push( +
+ {paramsCellContents} +
); + } + + // Add metrics (unbagged, then bagged) + unbaggedMetrics.forEach((metricKey) => { + rowContents.push( + ExperimentViewUtil.getUnbaggedMetricCell(metricKey, metricsMap, metricRanges, "div")); + }); + + // Add bagged metrics + const metricsCellContents = baggedMetrics.map((metricKey) => { + const keyname = "metric-" + metricKey; + const sortIcon = ExperimentViewUtil.getSortIcon(orderByKey, orderByAsc, + ExperimentViewUtil.makeCanonicalKey("metrics", metricKey)); return ( + + ); + }); + if (this.shouldShowBaggedColumn(false)) { + rowContents.push(
this.onHover({isParam: true, isMetric: false, key: paramKey})} - onMouseLeave={() => this.onHover({isParam: false, isMetric: false, key: ""})} + key={"metrics-container-cell-" + runInfo.run_uuid} + className="metric-param-container-cell" > - - + {metricsCellContents} +
+ ); + } + return { + key: runInfo.run_uuid, + contents: rowContents, + isChild: !isParent, + }; + } + + /** + * Returns true if our table should contain a column for displaying bagged params (if isParam is + * truthy) or bagged metrics. + */ + shouldShowBaggedColumn(isParam) { + const { metricKeyList, paramKeyList, unbaggedMetrics, unbaggedParams } = this.props; + if (isParam) { + return unbaggedParams.length !== paramKeyList.length || paramKeyList.length === 0; + } + return unbaggedMetrics.length !== metricKeyList.length || metricKeyList.length === 0; + } + + /** + * Returns an array of header-row cells (DOM elements) corresponding to metric / parameter + * columns. + */ + getMetricParamHeaderCells() { + const { + onSortBy, + orderByKey, + orderByAsc, + paramKeyList, + metricKeyList, + unbaggedMetrics, + unbaggedParams, + onAddBagged, + } = this.props; + const columns = []; + const getHeaderCell = (isParam, key, i) => { + const keyType = (isParam ? "params" : "metrics"); + const canonicalKey = ExperimentViewUtil.makeCanonicalKey(keyType, key); + const sortIcon = ExperimentViewUtil.getSortIcon(orderByKey, orderByAsc, canonicalKey); + const className = classNames("bottom-row", { "left-border": i === 0 }); + const elemKey = (isParam ? "param-" : "metric-") + key; + const keyContainerWidth = sortIcon ? "calc(100% - 20px)" : "100%"; + return ( +
+ - - {sortIcon} - - - {paramKey} - - - : - + {key} + {sortIcon} - - {paramsMap[paramKey].getValue()} - setSortByHandler(false, true, paramKey, true)} + onClick={() => onSortBy(canonicalKey, true)} > Sort ascending setSortByHandler(false, true, paramKey, false)} + onClick={() => onSortBy(canonicalKey, false)} > Sort descending - - - -
- ); - }); - rowContents.push( -
{paramsCellContents}
); - const filteredMetricKeys = metricKeyList.filter((key) => metricsMap[key] !== undefined); - const metricsCellContents = filteredMetricKeys.map((metricKey) => { - const keyname = "metric-" + metricKey; - const cellClass = classNames("metric-param-content", - { highlighted: hoverState.isMetric && hoverState.key === metricKey }); - const sortIcon = ExperimentViewUtil.getSortIcon(sortState, true, false, metricKey); - const metric = metricsMap[metricKey].getValue(); - return ( - this.onHover({isParam: false, isMetric: true, key: metricKey})} - onMouseLeave={() => this.onHover({isParam: false, isMetric: false, key: ""})} - > - - - - - - {sortIcon} - - - {metricKey} - - - : - - - - - {Utils.formatMetric(metric)} - - setSortByHandler(true, false, metricKey, true)} + onClick={() => onAddBagged(isParam, key)} > - Sort ascending - - setSortByHandler(true, false, metricKey, false)} - > - Sort descending + Collapse column - - - ); - }); - rowContents.push( - -
- {metricsCellContents} -
- - ); - - const sortValue = ExperimentViewUtil.computeSortValue( - sortState, metricsMap, paramsMap, runInfo, tagsList[idx]); - return { - key: runInfo.run_uuid, - sortValue, - contents: rowContents, - isChild: !isParent, +
); }; - } - getSortInfo(isMetric, isParam) { - const { sortState, onSortBy } = this.props; - const sortIcon = sortState.ascending ? - : - ; - if (sortState.isMetric === isMetric && sortState.isParam === isParam) { - return ( - onSortBy(isMetric, isParam, sortState.key)} - > - - (sort: {sortState.key} - - {sortIcon} - ) - ); + const paramClassName = classNames("bottom-row", {"left-border": unbaggedParams.length === 0}); + const metricClassName = classNames("bottom-row", {"left-border": unbaggedMetrics.length === 0}); + unbaggedParams.forEach((paramKey, i) => { + columns.push(getHeaderCell(true, paramKey, i)); + }); + + if (this.shouldShowBaggedColumn(true)) { + columns.push(
+ {paramKeyList.length !== 0 ? "" : "(n/a)"} +
); + } + unbaggedMetrics.forEach((metricKey, i) => { + columns.push(getHeaderCell(false, metricKey, i)); + }); + if (this.shouldShowBaggedColumn(false)) { + columns.push(
+ {metricKeyList.length !== 0 ? "" : "(n/a)"} +
); } - return undefined; + return columns; } + handleCellToggle = () => { + this._cache.clearAll(); + this.forceUpdate(); + }; + + _cache = new CellMeasurerCache({ + fixedWidth: true, + minHeight: 32, + }); + + _lastRenderedWidth = -1; + _lastOrderByKey = this.props.orderByKey; + _lastOrderByAsc = this.props.orderByAsc; + _lastRunsExpanded = this.props.runsExpanded; + _lastUnbaggedMetrics = this.props.unbaggedMetrics; + _lastUnbaggedParams = this.props.unbaggedParams; + + render() { const { runInfos, onCheckAll, isAllChecked, onSortBy, - sortState, + orderByKey, + orderByAsc, tagsList, runsExpanded, + unbaggedMetrics, + unbaggedParams, + nextPageToken, + loadingMore, + handleLoadMoreRuns } = this.props; - const rows = ExperimentViewUtil.getRows({ + const rows = ExperimentViewUtil.getRowRenderMetadata({ runInfos, - sortState, tagsList, - runsExpanded, - getRow: this.getRow }); + runsExpanded}); const headerCells = [ - ExperimentViewUtil.getSelectAllCheckbox(onCheckAll, isAllChecked), + ExperimentViewUtil.getSelectAllCheckbox(onCheckAll, isAllChecked, "div"), // placeholder for expander header cell, - ExperimentViewUtil.getExpanderHeader(), + ExperimentViewUtil.getExpanderHeader("div"), ]; - ExperimentViewUtil.getRunMetadataHeaderCells(onSortBy, sortState) + ExperimentViewUtil.getRunMetadataHeaderCells(onSortBy, orderByKey, orderByAsc, "div") .forEach((headerCell) => headerCells.push(headerCell)); + this.getMetricParamHeaderCells().forEach((cell) => headerCells.push(cell)); + const showLoadMore = (nextPageToken && this.state.isAtScrollBottom) || this.props.loadingMore; return ( - - - - - - - {headerCells} - - - - {ExperimentViewUtil.renderRows(rows)} - -
-
Parameters
-
- {this.getSortInfo(false, true)} -
-
-
Metrics
-
- {this.getSortInfo(true, false)} -
-
); +
+ + {({width, height}) => { + if (this._lastRenderedWidth !== width) { + this._lastRenderedWidth = width; + this._cache.clearAll(); + } + if (this._lastOrderByKey !== orderByKey) { + this._lastOrderByKey = orderByKey; + this._cache.clearAll(); + } + if (this._lastOrderByAsc !== orderByAsc) { + this._lastOrderByAsc = orderByAsc; + this._cache.clearAll(); + } + if (this._lastUnbaggedMetrics !== unbaggedMetrics) { + this._lastUnbaggedMetrics = unbaggedMetrics; + this._cache.clearAll(); + } + if (this._lastUnbaggedParams !== unbaggedParams) { + this._lastUnbaggedParams = unbaggedParams; + this._cache.clearAll(); + } + const runMetadataColWidths = [ + 30, // checkbox column width + 20, // expander column width + 180, // 'Date' column width + 120, // 'user' column width + 120, // 'Run Name' column width + 100, // 'Source' column width + 80, // 'Version' column width + 250, // 'Tags' column width + ]; + const showBaggedParams = this.shouldShowBaggedColumn(true); + const showBaggedMetrics = this.shouldShowBaggedColumn(false); + const runMetadataWidth = runMetadataColWidths.reduce((a, b) => a + b); + const tableMinWidth = (BAGGED_COL_WIDTH * (showBaggedParams + showBaggedMetrics)) + + runMetadataWidth + + (UNBAGGED_COL_WIDTH * (unbaggedMetrics.length + unbaggedParams.length)); + const tableWidth = Math.max(width, tableMinWidth); + // If we aren't showing bagged metrics or params (bagged metrics & params are the + // only cols that use the CellMeasurer component), set the row height statically + const cellMeasurerProps = {}; + if (showBaggedMetrics || showBaggedParams) { + cellMeasurerProps.rowHeight = this._cache.rowHeight; + cellMeasurerProps.deferredMeasurementCache = this._cache; + } else { + cellMeasurerProps.rowHeight = 32; + } + return [ this.getRow(rows[index])} + rowStyle={({index}) => { + const base = {alignItems: "stretch", borderBottom: BORDER_STYLE, + overflow: "visible"}; + if (index === - 1) { + return { + ...base, + backgroundColor: "#fafafa", + borderTop: BORDER_STYLE, + borderLeft: BORDER_STYLE, + borderRight: BORDER_STYLE, + }; + } + return base; + }} + > + {[...Array(NUM_RUN_METADATA_COLS).keys()].map((colIdx) => { + return headerCells[colIdx]} + style={{ + ...styles.columnStyle, + // show left boarder for run tags column + ...(colIdx === NUM_RUN_METADATA_COLS - 1 + ? { borderLeft: BORDER_STYLE } + : undefined + ), + }} + cellRenderer={({ rowIndex, rowData, parent, dataKey }) => ( + + {rowData.contents[colIdx]} + + )} + />; + })} + {unbaggedParams.map((unbaggedParam, idx) => { + return headerCells[NUM_RUN_METADATA_COLS + idx]} + style={styles.columnStyle} + cellRenderer={({rowData}) => rowData.contents[NUM_RUN_METADATA_COLS + idx]} + />; + })} + {showBaggedParams && { + return
+ Parameters +
; + }} + style={{...styles.columnStyle, borderLeft: BORDER_STYLE}} + cellRenderer={({rowIndex, rowData, parent, dataKey}) => { + const colIdx = NUM_RUN_METADATA_COLS + unbaggedParams.length; + // Add extra padding for load more + const paddingOpt = rowIndex === rows.length - 1 + ? { paddingBottom: LOAD_MORE_ROW_HEIGHT * 2 } + : {}; + return ( +
+ {rowData.contents[colIdx]} +
+
); + }} + />} + {unbaggedMetrics.map((unbaggedMetric, idx) => { + const colIdx = NUM_RUN_METADATA_COLS + showBaggedParams + + unbaggedParams.length + idx; + return headerCells[colIdx]} + style={styles.columnStyle} + cellRenderer={({rowData}) => rowData.contents[colIdx]} + />; + })} + {showBaggedMetrics && { + return
+ Metrics +
; + }} + style={{...styles.columnStyle, borderLeft: BORDER_STYLE}} + cellRenderer={({rowIndex, rowData, parent, dataKey}) => { + const colIdx = NUM_RUN_METADATA_COLS + showBaggedParams + + unbaggedParams.length + unbaggedMetrics.length; + return ( +
+ {rowData.contents[colIdx]} +
+
); + }} + />} +
, + (showLoadMore ? ( + + ) : null)]; + }} +
+
+ ); + } + + componentDidUpdate(prevProps) { + this.maybeHandleScroll(); + this.maybeHandleLoadMoreFinish(prevProps); } + + maybeHandleLoadMoreFinish(prevProps) { + const loadMoreJustFinished = prevProps.loadingMore === false && this.props.loadingMore === true; + if (loadMoreJustFinished) { + this.setState({ isAtScrollBottom: false }); + } + } + + maybeHandleScroll() { + if (this.state.expanding) { + this.handleScroll(); + this.setState({ expanding: false }); + } + } + + handleScroll = _.debounce(() => { + // Getting clientHeight, scrollHeight and scrollTop from the Grid instance directly here because + // corresponding inputs provided by onScroll are wrong at mounting phase and upon toggling + if (!this.tableRef.current) return; + const grid = this.tableRef.current.Grid; + const { clientHeight, scrollHeight, scrollTop } = { + clientHeight: grid.props.height, + scrollHeight: grid.getTotalRowsHeight(), + scrollTop: grid.state.scrollTop, + }; + const isRunsListShort = scrollHeight < clientHeight; + const isAtScrollBottom = isRunsListShort || (clientHeight + scrollTop === scrollHeight); + this.setState({ isAtScrollBottom }); + }, 100); } -export default ExperimentRunsTableCompactView; +const mapStateToProps = (state, ownProps) => { + const { metricsList } = ownProps; + return {metricRanges: ExperimentViewUtil.computeMetricRanges(metricsList)}; +}; + +export default connect(mapStateToProps)(ExperimentRunsTableCompactView); diff --git a/mlflow/server/js/src/components/ExperimentRunsTableMultiColumnView.js b/mlflow/server/js/src/components/ExperimentRunsTableMultiColumnView.js index f4ff6ddd7389d..cf500060487af 100644 --- a/mlflow/server/js/src/components/ExperimentRunsTableMultiColumnView.js +++ b/mlflow/server/js/src/components/ExperimentRunsTableMultiColumnView.js @@ -1,10 +1,11 @@ import React, { Component } from 'react'; +import { connect } from 'react-redux'; import PropTypes from 'prop-types'; -import Table from 'react-bootstrap/es/Table'; +import { Table } from 'react-bootstrap'; import ExperimentViewUtil from './ExperimentViewUtil'; import classNames from 'classnames'; -import Utils from '../utils/Utils'; import { RunInfo } from '../sdk/MlflowMessages'; +import { NUM_RUN_METADATA_COLS } from './ExperimentRunsTableCompactView'; /** * Table view for displaying runs associated with an experiment. Renders each metric and param @@ -32,9 +33,11 @@ class ExperimentRunsTableMultiColumnView extends Component { onExpand: PropTypes.func.isRequired, isAllChecked: PropTypes.bool.isRequired, onSortBy: PropTypes.func.isRequired, - sortState: PropTypes.object.isRequired, + orderByKey: PropTypes.string, + orderByAsc: PropTypes.bool.isRequired, runsSelected: PropTypes.object.isRequired, runsExpanded: PropTypes.object.isRequired, + metricRanges: PropTypes.object.isRequired, }; getRow({ idx, isParent, hasExpander, expanderOpen, childrenIds }) { @@ -45,12 +48,11 @@ class ExperimentRunsTableMultiColumnView extends Component { paramKeyList, metricKeyList, onCheckbox, - sortState, runsSelected, tagsList, onExpand, + metricRanges, } = this.props; - const metricRanges = ExperimentViewUtil.computeMetricRanges(metricsList); const runInfo = runInfos[idx]; const paramsMap = ExperimentViewUtil.toParamsMap(paramsList[idx]); const metricsMap = ExperimentViewUtil.toMetricsMap(metricsList[idx]); @@ -58,64 +60,28 @@ class ExperimentRunsTableMultiColumnView extends Component { const numMetrics = metricKeyList.length; const selected = runsSelected[runInfo.run_uuid] === true; const rowContents = [ - ExperimentViewUtil.getCheckboxForRow(selected, () => onCheckbox(runInfo.run_uuid)), + ExperimentViewUtil.getCheckboxForRow(selected, () => onCheckbox(runInfo.run_uuid), "td"), ExperimentViewUtil.getExpander( - hasExpander, expanderOpen, () => onExpand(runInfo.run_uuid, childrenIds)), + hasExpander, expanderOpen, () => onExpand(runInfo.run_uuid, childrenIds), runInfo.run_uuid, + "td"), ]; - ExperimentViewUtil.getRunInfoCellsForRow(runInfo, tagsList[idx], isParent).forEach((col) => - rowContents.push(col)); - paramKeyList.forEach((paramKey, i) => { - const className = (i === 0 ? "left-border" : "") + " run-table-container"; - const keyName = "param-" + paramKey; - if (paramsMap[paramKey]) { - rowContents.push( -
- {paramsMap[paramKey].getValue()} -
- ); - } else { - rowContents.push(); - } + ExperimentViewUtil.getRunInfoCellsForRow(runInfo, tagsList[idx], isParent, "td") + .forEach((col) => rowContents.push(col)); + paramKeyList.forEach((paramKey) => { + rowContents.push(ExperimentViewUtil.getUnbaggedParamCell(paramKey, paramsMap, "td")); }); if (numParams === 0) { rowContents.push(); } - metricKeyList.forEach((metricKey, i) => { - const className = (i === 0 ? "left-border" : "") + " run-table-container"; - const keyName = "metric-" + metricKey; - if (metricsMap[metricKey]) { - const metric = metricsMap[metricKey].getValue(); - const range = metricRanges[metricKey]; - let fraction = 1.0; - if (range.max > range.min) { - fraction = (metric - range.min) / (range.max - range.min); - } - const percent = (fraction * 100) + "%"; - rowContents.push( - - {/* We need the extra div because metric-filler-bg is inline-block */} -
-
-
-
- {Utils.formatMetric(metric)} -
-
-
- - ); - } else { - rowContents.push(); - } + metricKeyList.forEach((metricKey) => { + rowContents.push( + ExperimentViewUtil.getUnbaggedMetricCell(metricKey, metricsMap, metricRanges, "td")); }); if (numMetrics === 0) { rowContents.push(); } - const sortValue = ExperimentViewUtil.computeSortValue( - sortState, metricsMap, paramsMap, runInfo, tagsList[idx]); return { key: runInfo.run_uuid, - sortValue: sortValue, contents: rowContents, isChild: !isParent, }; @@ -126,21 +92,21 @@ class ExperimentRunsTableMultiColumnView extends Component { paramKeyList, metricKeyList, onSortBy, - sortState + orderByKey, + orderByAsc, } = this.props; const numParams = paramKeyList.length; const numMetrics = metricKeyList.length; const columns = []; - const getHeaderCell = (isParam, key, i) => { - const isMetric = !isParam; - const sortIcon = ExperimentViewUtil.getSortIcon(sortState, isMetric, isParam, key); + const getHeaderCell = (isParam, key, i, canonicalKey) => { + const sortIcon = ExperimentViewUtil.getSortIcon(orderByKey, orderByAsc, canonicalKey); const className = classNames("bottom-row", "sortable", { "left-border": i === 0 }); const elemKey = (isParam ? "param-" : "metric-") + key; return ( onSortBy(isMetric, isParam, key)} + onClick={() => onSortBy(canonicalKey, !orderByAsc)} > { - columns.push(getHeaderCell(true, paramKey, i)); + columns.push(getHeaderCell(true, paramKey, i, + ExperimentViewUtil.makeCanonicalKey("params", paramKey))); }); if (numParams === 0) { columns.push((n/a)); } metricKeyList.forEach((metricKey, i) => { - columns.push(getHeaderCell(false, metricKey, i)); + columns.push(getHeaderCell(false, metricKey, i, + ExperimentViewUtil.makeCanonicalKey("metrics", metricKey))); }); if (numMetrics === 0) { columns.push((n/a)); @@ -174,7 +142,8 @@ class ExperimentRunsTableMultiColumnView extends Component { onCheckAll, isAllChecked, onSortBy, - sortState, + orderByKey, + orderByAsc, tagsList, runsExpanded, paramKeyList, @@ -182,16 +151,15 @@ class ExperimentRunsTableMultiColumnView extends Component { } = this.props; const rows = ExperimentViewUtil.getRows({ runInfos, - sortState, tagsList, runsExpanded, getRow: this.getRow }); const columns = [ - ExperimentViewUtil.getSelectAllCheckbox(onCheckAll, isAllChecked), - ExperimentViewUtil.getExpanderHeader(), + ExperimentViewUtil.getSelectAllCheckbox(onCheckAll, isAllChecked, "th"), + ExperimentViewUtil.getExpanderHeader("th"), ]; - ExperimentViewUtil.getRunMetadataHeaderCells(onSortBy, sortState) + ExperimentViewUtil.getRunMetadataHeaderCells(onSortBy, orderByKey, orderByAsc, "th") .forEach((cell) => columns.push(cell)); this.getMetricParamHeaderCells().forEach((cell) => columns.push(cell)); return ( @@ -200,7 +168,7 @@ class ExperimentRunsTableMultiColumnView extends Component { - ; + ; } static styles = { @@ -39,41 +42,52 @@ export default class ExperimentViewUtil { * Returns table cells describing run metadata (i.e. not params/metrics) comprising part of * the display row for a run. */ - static getRunInfoCellsForRow(runInfo, tags, isParent) { - const user = Utils.formatUser(runInfo.user_id); - const sourceType = Utils.renderSource(runInfo, tags); + static getRunInfoCellsForRow(runInfo, tags, isParent, cellType, handleCellToggle) { + const CellComponent = `${cellType}`; + const user = Utils.formatUser(Utils.getUser(runInfo, tags)); + const queryParams = window.location && window.location.search ? window.location.search : ""; + const sourceType = Utils.renderSource(tags, queryParams); const startTime = runInfo.start_time; const runName = Utils.getRunName(tags); const childLeftMargin = isParent ? {} : {paddingLeft: '16px'}; return [ - , - , - , - , - , + , + +
+ +
+
, ]; } @@ -82,12 +96,12 @@ export default class ExperimentViewUtil { * is visible if we're currently sorting by the corresponding column. Otherwise, the icon is * invisible but takes up space. */ - static getSortIcon(sortState, isMetric, isParam, key) { - if (ExperimentViewUtil.isSortedBy(sortState, isMetric, isParam, key)) { + static getSortIcon(curOrderByKey, curOrderByAsc, canonicalKey) { + if (curOrderByKey === canonicalKey) { return ( ); @@ -96,48 +110,110 @@ export default class ExperimentViewUtil { } /** Returns checkbox element for selecting all runs */ - static getSelectAllCheckbox(onCheckAll, isAllCheckedBool) { - return ; + ; } /** * Returns header-row table cells for columns containing run metadata. */ - static getRunMetadataHeaderCells(onSortBy, sortState) { - const getHeaderCell = (key, text) => { - const sortIcon = ExperimentViewUtil.getSortIcon(sortState, false, false, key); + static getRunMetadataHeaderCells(onSortBy, curOrderByKey, curOrderByAsc, cellType) { + const CellComponent = `${cellType}`; + const getHeaderCell = (key, text, canonicalSortKey) => { + const sortIcon = ExperimentViewUtil.getSortIcon(curOrderByKey, curOrderByAsc, + canonicalSortKey); + const isSortable = canonicalSortKey !== null; + const cellClassName = classNames("bottom-row", "run-table-container", + {"sortable": isSortable}); return ( - ); + {isSortable && + {sortIcon}} + ); }; return [ - getHeaderCell("start_time", {"Date"}), - getHeaderCell("user_id", {"User"}), - getHeaderCell("run_name", {"Run Name"}), - getHeaderCell("source", {"Source"}), - getHeaderCell("source_version", {"Version"}), + getHeaderCell("start_time", {"Date"}, "attributes.start_time"), + getHeaderCell("user_id", {"User"}, "tags.`mlflow.user`"), + getHeaderCell("run_name", {"Run Name"}, "tags.`mlflow.runName`"), + getHeaderCell("source", {"Source"}, "tags.`mlflow.source.name`"), + getHeaderCell("source_version", {"Version"}, "tags.`mlflow.source.git.commit`"), + getHeaderCell("tags", Tags, null), ]; } - static getExpanderHeader() { - return ; + return + ; } if (expanderOpen) { return ( - + ); } else { return ( - + ); } } - static getRows({ runInfos, sortState, tagsList, runsExpanded, getRow }) { + static getRowRenderMetadata({ runInfos, tagsList, runsExpanded }) { const runIdToIdx = {}; runInfos.forEach((r, idx) => { runIdToIdx[r.run_uuid] = idx; @@ -289,30 +379,36 @@ export default class ExperimentViewUtil { hasExpander = true; childrenIds = parentIdToChildren[runId].map((cIdx => runInfos[cIdx].run_uuid)); } - return [getRow({ + return [{ idx, isParent: true, hasExpander, expanderOpen: ExperimentViewUtil.isExpanderOpen(runsExpanded, runId), childrenIds, - })]; + runId, + }]; }); - ExperimentViewUtil.sortRows(parentRows, sortState); const mergedRows = []; parentRows.forEach((r) => { - const runId = r.key; + const runId = r.runId; mergedRows.push(r); const childrenIdxs = parentIdToChildren[runId]; if (childrenIdxs) { if (ExperimentViewUtil.isExpanderOpen(runsExpanded, runId)) { - const childrenRows = childrenIdxs.map((idx) => - getRow({ idx, isParent: false, hasExpander: false })); - ExperimentViewUtil.sortRows(childrenRows, sortState); + const childrenRows = childrenIdxs.map((idx) => { + return { idx, isParent: false, hasExpander: false }; + }); mergedRows.push(...childrenRows); } } }); - return mergedRows; + return mergedRows.slice(0); + } + + static getRows({ runInfos, tagsList, runsExpanded, getRow }) { + const mergedRows = ExperimentViewUtil.getRowRenderMetadata( + { runInfos, tagsList, runsExpanded }); + return mergedRows.map((rowMetadata) => getRow(rowMetadata)); } static renderRows(rows) { @@ -346,6 +442,7 @@ class TreeNode { if (visited.has(current.parent.value)) { return undefined; } + visited.add(current.value); current = current.parent; } return current; diff --git a/mlflow/server/js/src/components/HomePage.css b/mlflow/server/js/src/components/HomePage.css index 2a8585c70b259..043bbff7840d4 100644 --- a/mlflow/server/js/src/components/HomePage.css +++ b/mlflow/server/js/src/components/HomePage.css @@ -2,21 +2,24 @@ display: flex; } .HomePage-experiment-list-container { - width: 10%; - min-width: 333px; + flex: 0 0 312px; } .experiment-view-container { - width: 80%; -} -.experiment-view-right { - width: 10%; + /* + * Allow experiment view container to be smaller than its content (i.e. a large-width runs table) + * by overriding min-width as described in https://stackoverflow.com/a/38383437 and + * https://developer.mozilla.org/en-US/docs/Web/CSS/flex + */ + min-width: 0px; + flex: 1 1; + padding: 0 64px 0 32px; } /* BEGIN css for when experiment list collapsed */ .experiment-page-container { - width: 80%; - margin: 0 auto; + width: 100%; + padding: 0 64px 0 32px; } .collapsed-expander-container { float: left; diff --git a/mlflow/server/js/src/components/HomePage.js b/mlflow/server/js/src/components/HomePage.js index 0c0bc558f047f..d3e1fbab17478 100644 --- a/mlflow/server/js/src/components/HomePage.js +++ b/mlflow/server/js/src/components/HomePage.js @@ -1,26 +1,18 @@ import React, { Component } from 'react'; import PropTypes from 'prop-types'; -import ExperimentPage from './ExperimentPage'; import { connect } from 'react-redux'; import { getUUID, listExperimentsApi } from '../Actions'; import RequestStateWrapper from './RequestStateWrapper'; import './HomePage.css'; -import ExperimentListView from './ExperimentListView'; +import HomeView from './HomeView'; class HomePage extends Component { - constructor(props) { - super(props); - this.onClickListExperiments = this.onClickListExperiments.bind(this); - } - static propTypes = { - match: PropTypes.object.isRequired, dispatchListExperimentsApi: PropTypes.func.isRequired, - experimentId: PropTypes.number.isRequired, + experimentId: PropTypes.number, }; state = { - listExperimentsExpanded: true, listExperimentsRequestId: getUUID(), }; @@ -28,51 +20,19 @@ class HomePage extends Component { this.props.dispatchListExperimentsApi(this.state.listExperimentsRequestId); } - onClickListExperiments() { - this.setState({ listExperimentsExpanded: !this.state.listExperimentsExpanded }); - } - render() { - if (this.state.listExperimentsExpanded) { - return ( -
-
- -
- -
-
-
-
- -
-
-
- ); - } else { - return ( -
-
- -
-
- -
-
- ); - } + return ( + + + + ); } } const mapStateToProps = (state, ownProps) => { const { match } = ownProps; if (match.url === "/") { - return { experimentId: 0 }; + return {}; } return { experimentId: parseInt(match.params.experimentId, 10) }; }; @@ -80,7 +40,7 @@ const mapStateToProps = (state, ownProps) => { const mapDispatchToProps = (dispatch) => { return { dispatchListExperimentsApi: (requestId) => { - dispatch(listExperimentsApi(requestId)); + return dispatch(listExperimentsApi(requestId)); } }; }; diff --git a/mlflow/server/js/src/components/HomeView.js b/mlflow/server/js/src/components/HomeView.js new file mode 100644 index 0000000000000..09e51a77234f9 --- /dev/null +++ b/mlflow/server/js/src/components/HomeView.js @@ -0,0 +1,101 @@ +import React, { Component } from 'react'; +import PropTypes from 'prop-types'; +import { connect } from 'react-redux'; +import ExperimentListView from './ExperimentListView'; +import ExperimentPage from './ExperimentPage'; +import { getExperiments } from '../reducers/Reducers'; +import NoExperimentView from './NoExperimentView'; + +export const getFirstActiveExperiment = (experiments) => { + const sorted = experiments.concat().sort((a, b) => (a.experiment_id - b.experiment_id)); + return sorted.find((e) => e.lifecycle_stage === "active"); +}; + +class HomeView extends Component { + constructor(props) { + super(props); + this.onClickListExperiments = this.onClickListExperiments.bind(this); + } + + static propTypes = { + experimentId: PropTypes.number, + }; + + state = { + listExperimentsExpanded: true, + }; + + onClickListExperiments() { + this.setState({ listExperimentsExpanded: !this.state.listExperimentsExpanded }); + } + + render() { + const headerHeight = process.env.HIDE_HEADER === 'true' ? 0 : 60; + const containerHeight = "calc(100% - " + headerHeight + "px)"; + if (process.env.HIDE_EXPERIMENT_LIST === 'true') { + return ( +
+ { this.props.experimentId !== undefined ? + : + + } +
+ ); + } + if (this.state.listExperimentsExpanded) { + return ( +
+
+
+ +
+
+
+ { this.props.experimentId !== undefined ? + : + + } +
+
+ ); + } else { + return ( +
+
+ +
+
+ { this.props.experimentId !== undefined ? + : + + } +
+
+ ); + } + } +} + +const styles = { + showExperimentListExpander: { + marginTop: 24, + }, +}; + +const mapStateToProps = (state, ownProps) => { + if (ownProps.experimentId === undefined) { + const firstExp = getFirstActiveExperiment(getExperiments(state)); + if (firstExp) { + return { experimentId: parseInt(firstExp.experiment_id, 10) }; + } + } + return {}; +}; + +export default connect(mapStateToProps)(HomeView); diff --git a/mlflow/server/js/src/components/HomeView.test.js b/mlflow/server/js/src/components/HomeView.test.js new file mode 100644 index 0000000000000..1ae236075c44a --- /dev/null +++ b/mlflow/server/js/src/components/HomeView.test.js @@ -0,0 +1,12 @@ +import Fixtures from '../test-utils/Fixtures'; +import { getFirstActiveExperiment } from './HomeView'; + +const experiments = [ + Fixtures.createExperiment({ experiment_id: '1', name: '1', lifecycle_stage: 'deleted'}), + Fixtures.createExperiment({ experiment_id: '3', name: '3', lifecycle_stage: 'active'}), + Fixtures.createExperiment({ experiment_id: '2', name: '2', lifecycle_stage: 'active'}), +]; + +test('getFirstActiveExperiment works', () => { + expect(getFirstActiveExperiment(experiments).experiment_id).toEqual('2'); +}); diff --git a/mlflow/server/js/src/components/HtmlTableView.css b/mlflow/server/js/src/components/HtmlTableView.css index 285a6eda75b9e..504b2c7788c06 100644 --- a/mlflow/server/js/src/components/HtmlTableView.css +++ b/mlflow/server/js/src/components/HtmlTableView.css @@ -1,5 +1,5 @@ th { background-color: #fafafa; - font-size: 13px; + font-size: 14px; color: #888; } diff --git a/mlflow/server/js/src/components/LineSmoothSlider.js b/mlflow/server/js/src/components/LineSmoothSlider.js new file mode 100644 index 0000000000000..fe611294ea96c --- /dev/null +++ b/mlflow/server/js/src/components/LineSmoothSlider.js @@ -0,0 +1,53 @@ +import React from 'react'; +import { Slider, InputNumber, Row, Col } from 'antd'; +import PropTypes from 'prop-types'; + +export class LineSmoothSlider extends React.Component { + static propTypes = { + min: PropTypes.number.isRequired, + max: PropTypes.number.isRequired, + handleLineSmoothChange: PropTypes.func.isRequired, + }; + + state = { + inputValue: 0, + }; + + onChange = (value) => { + if (Number.isNaN(value)) { + return; + } + this.setState({ + inputValue: value, + }); + this.props.handleLineSmoothChange(value); + }; + + render() { + const { min, max } = this.props; + const { inputValue } = this.state; + return ( + +
+ + + + + + + ); + } +} diff --git a/mlflow/server/js/src/components/LoadMoreBar.js b/mlflow/server/js/src/components/LoadMoreBar.js new file mode 100644 index 0000000000000..b2442556fdb30 --- /dev/null +++ b/mlflow/server/js/src/components/LoadMoreBar.js @@ -0,0 +1,73 @@ +/** + "Load more" bar for user to click and load more runs. This row is currently built + outside of the Table component as we are following a minimum-invasive way of building + this feature to avoid massive refactor on current implementation. Ideally, this row + can be built inside the Table as a special row by rewriting table rendering with a + custom `rowRenderer`. That way, we don't need to handle scrolling position manually. + We can consider doing this refactor while we implement the multi-level nested runs. + TODO(Zangr) rewrite the runs table with rowRenderer to allow a built-in load-more row +*/ + +import React from 'react'; +import { Button, Icon } from 'antd'; +import PropTypes from 'prop-types'; + +export class LoadMoreBar extends React.PureComponent { + static propTypes = { + height: PropTypes.number.isRequired, + width: PropTypes.number.isRequired, + borderStyle: PropTypes.string.isRequired, + loadingMore: PropTypes.bool.isRequired, + onLoadMore: PropTypes.func.isRequired, + }; + + render() { + const { height, width, borderStyle, loadingMore, onLoadMore } = this.props; + return ( +
+ {loadingMore ? ( +
+ +
+ ) : ( + + )} +
+ ); + } +} + +const styles = { + loadMoreRows: { + display: 'flex', + justifyContent: 'center', + alignItems: 'center', + background: 'white', + position: 'absolute', + bottom: 20, + }, + loadingMoreWrapper: { + display: 'flex', + alignItems: 'center', + }, + loadingMoreIcon: { + fontSize: 20, + }, + loadMoreButton: { + paddingLeft: 16, + paddingRight: 16, + } +}; diff --git a/mlflow/server/js/src/components/LoadMoreBar.test.js b/mlflow/server/js/src/components/LoadMoreBar.test.js new file mode 100644 index 0000000000000..c9190f3e8d1c1 --- /dev/null +++ b/mlflow/server/js/src/components/LoadMoreBar.test.js @@ -0,0 +1,35 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { LoadMoreBar } from './LoadMoreBar'; + +describe('unit tests', () => { + let wrapper; + let mininumProps; + + beforeEach(() => { + mininumProps = { + height: 37, + width: 1000, + borderStyle: '', + loadingMore: false, + onLoadMore: jest.fn(), + }; + }); + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); + + test('should render load-more button by default', () => { + const props = { ...mininumProps, loadingMore: false }; + wrapper = shallow(); + expect(wrapper.find('.load-more-button').length).toBe(1); + }); + + test('should render loading icon when loadingMore is true', () => { + const props = { ...mininumProps, loadingMore: true }; + wrapper = shallow(); + expect(wrapper.find('.loading-more-wrapper').length).toBe(1); + }); +}); diff --git a/mlflow/server/js/src/components/MetricView.css b/mlflow/server/js/src/components/MetricView.css index 7286e96dc1a2a..05c714727a33c 100644 --- a/mlflow/server/js/src/components/MetricView.css +++ b/mlflow/server/js/src/components/MetricView.css @@ -1,23 +1,68 @@ div.MetricView { - display: block; - max-width: 1200px; - max-height: 10%; + display: block; + max-height: 10%; } div.MetricView h1 { - margin-bottom: 24px; + margin-bottom: 24px; } div.MetricView .recharts-tooltip-item-name { - max-width: 100px; - overflow: hidden; - text-overflow: ellipsis; - display: inline-block; - vertical-align: bottom; + max-width: 100px; + overflow: hidden; + text-overflow: ellipsis; + display: inline-block; + vertical-align: bottom; } div.MetricView .recharts-tooltip-item-value, .recharts-tooltip-item-separator { - display: inline-block; - margin-left: 4px; + display: inline-block; + margin-left: 4px; +} + +.metrics-plot-container { + display: flex; + width: 100%; +} + +.metrics-plot-container .plot-controls .x-axis-radio { + display: block; + height: 30px; + line-height: 30px; +} + +.metrics-plot-container .plot-controls .metrics-select { + width: 300px; + display: block; +} + +.metrics-plot-container .plot-controls .metrics-select input[type=text] { + padding: 0; +} + +.metrics-plot-container .plot-controls { + display: flex; + flex-direction: column; + justify-content: center; + min-height: 500px; +} + +.metrics-plot-container .plot-controls .inline-control { + margin-top: 25px; + display: flex; + align-items: center; +} + +.metrics-plot-container .plot-controls .inline-control .control-label { + margin-right: 10px; +} + +.metrics-plot-container .plot-controls .block-control { + margin-top: 25px; +} + +.metrics-plot-container .metrics-plot-view-container { + min-height: 500px; + flex: 1; } diff --git a/mlflow/server/js/src/components/MetricView.js b/mlflow/server/js/src/components/MetricView.js index f8d9d3f2dc896..64ba3becc0ac4 100644 --- a/mlflow/server/js/src/components/MetricView.js +++ b/mlflow/server/js/src/components/MetricView.js @@ -1,152 +1,50 @@ import React, { Component } from 'react'; import PropTypes from 'prop-types'; -import { LineChart, BarChart, Bar, XAxis, Tooltip, CartesianGrid, Line, YAxis, - ResponsiveContainer, Legend } from 'recharts'; import { connect } from 'react-redux'; import Utils from '../utils/Utils'; -import { getMetricsByKey } from '../reducers/MetricReducer'; import './MetricView.css'; -import { Experiment } from "../sdk/MlflowMessages"; -import { getExperiment, getRunTags} from "../reducers/Reducers"; -import BreadcrumbTitle from "./BreadcrumbTitle"; - -const COLORS = [ - "#993955", - "#AE76A6", - "#A3C3D9", - "#364958", - "#FF82A9", - "#FFC0BE", -]; +import { Experiment } from '../sdk/MlflowMessages'; +import { getExperiment, getRunTags } from '../reducers/Reducers'; +import BreadcrumbTitle from './BreadcrumbTitle'; +import MetricsPlotPanel from './MetricsPlotPanel'; +import { withRouter } from 'react-router-dom'; class MetricView extends Component { static propTypes = { experiment: PropTypes.instanceOf(Experiment).isRequired, - title: PropTypes.element.isRequired, - // Object with keys from Metric json and also - metrics: PropTypes.arrayOf(Object).isRequired, runUuids: PropTypes.arrayOf(String).isRequired, runNames: PropTypes.arrayOf(String).isRequired, + metricKey: PropTypes.string.isRequired, + location: PropTypes.object.isRequired, }; - static MAX_RUN_NAME_DISPLAY_LENGTH = 36; - - // Returns payload to use in recharts Legend component - // Legend type must be one of the values in - // https://github.com/recharts/recharts/blob/1b523c1/src/util/ReactUtils.js#L139 - getLegendPayload(legendType) { - const { runNames } = this.props; - return runNames.map((runName, idx) => { - return { - value: Utils.truncateString(runName, MetricView.MAX_RUN_NAME_DISPLAY_LENGTH), - id: idx, - type: legendType, - // Must specify legend item color, see https://github.com/recharts/recharts/issues/818 - color: COLORS[idx % COLORS.length], - }; - }); - } - render() { - const { experiment, runUuids, title, metrics, runNames } = this.props; - if (metrics.length === 1) { - return ( -
-
- -
- - - - - - - - {runUuids.map((uuid, idx) => ( - - ))} - - -
- ); - } else { - return ( -
-
- -
- - - - - - - - {runUuids.map((uuid, idx) => ( - - ))} - - + const { experiment, runUuids, runNames, metricKey, location } = this.props; + const plotMetricKeys = Utils.getPlotMetricKeysFromUrl(location.search); + return ( +
+
+ {plotMetricKeys.length > 1 ? 'Metrics' : plotMetricKeys[0]}} + />
- ); - } + +
+ ); } } const mapStateToProps = (state, ownProps) => { - const { metricKey, runUuids, experimentId } = ownProps; + const { experimentId, runUuids } = ownProps; const experiment = experimentId !== null ? getExperiment(experimentId, state) : null; - let maxLength = 0; - runUuids.forEach(runUuid => { - maxLength = Math.max(maxLength, getMetricsByKey(runUuid, metricKey, state).length); - }); - const metrics = new Array(maxLength); - for (let i = 0; i < metrics.length; i++) { - metrics[i] = {index: i}; - } - runUuids.forEach(runUuid => { - const entries = getMetricsByKey(runUuid, metricKey, state); - for (let i = 0; i < entries.length; i++) { - metrics[i][runUuid] = entries[i].value; - } - }); const runNames = runUuids.map((runUuid) => { const tags = getRunTags(runUuid, state); return Utils.getRunDisplayName(tags, runUuid); }); - return { - experiment, - metrics, - title: {metricKey}, - runUuids: runUuids, - runNames: runNames - }; + return { experiment, runNames }; }; -export default connect(mapStateToProps)(MetricView); +export default withRouter(connect(mapStateToProps)(MetricView)); diff --git a/mlflow/server/js/src/components/MetricsPlotControls.js b/mlflow/server/js/src/components/MetricsPlotControls.js new file mode 100644 index 0000000000000..cc16fb007f67b --- /dev/null +++ b/mlflow/server/js/src/components/MetricsPlotControls.js @@ -0,0 +1,113 @@ +import React from 'react'; +import _ from 'lodash'; +import { Radio, Switch, TreeSelect, Icon, Tooltip } from 'antd'; +import PropTypes from 'prop-types'; +import { CHART_TYPE_LINE } from './MetricsPlotPanel'; +import { LineSmoothSlider } from './LineSmoothSlider'; + +const RadioGroup = Radio.Group; +export const X_AXIS_WALL = 'wall'; +export const X_AXIS_STEP = 'step'; +export const X_AXIS_RELATIVE = 'relative'; + +export class MetricsPlotControls extends React.Component { + static propTypes = { + // An array of distinct metric keys to be shown as options + distinctMetricKeys: PropTypes.arrayOf(String).isRequired, + // An array of metric keys selected by user or indicated by URL + selectedMetricKeys: PropTypes.arrayOf(String).isRequired, + selectedXAxis: PropTypes.string.isRequired, + handleXAxisChange: PropTypes.func.isRequired, + handleShowPointChange: PropTypes.func.isRequired, + handleMetricsSelectChange: PropTypes.func.isRequired, + handleYAxisLogScaleChange: PropTypes.func.isRequired, + handleLineSmoothChange: PropTypes.func.isRequired, + chartType: PropTypes.string.isRequired, + }; + + handleMetricsSelectFilterChange = (text, option) => + option.props.title.toUpperCase().includes(text.toUpperCase()); + + getAllMetricKeys = () => { + const { distinctMetricKeys } = this.props; + return distinctMetricKeys.map((metricKey) => ({ + title: metricKey, + value: metricKey, + key: metricKey, + })); + }; + + render() { + const { chartType } = this.props; + const lineSmoothnessTooltipText = + 'Make the line between points "smoother" based on generalized Catmull-Rom splines. ' + + 'Smoothing can be useful for displaying the ' + + 'overall trend when the logging frequency is high.'; + return ( +
+ {chartType === CHART_TYPE_LINE ? ( +
+
+
Points:
+ +
+
+
+ Line Smoothness {' '} + + + +
+ +
+
+
X-axis:
+ + + Step + + + Time (Wall) + + + Time (Relative) + + +
+
+ ) : null} +
+
Y-axis:
+ +
+
+
Y-axis Log Scale:
+ +
+
+ ); + } +} diff --git a/mlflow/server/js/src/components/MetricsPlotControls.test.js b/mlflow/server/js/src/components/MetricsPlotControls.test.js new file mode 100644 index 0000000000000..d8d6fb6375eb8 --- /dev/null +++ b/mlflow/server/js/src/components/MetricsPlotControls.test.js @@ -0,0 +1,41 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { MetricsPlotControls, X_AXIS_RELATIVE } from './MetricsPlotControls'; +import { CHART_TYPE_BAR, CHART_TYPE_LINE } from './MetricsPlotPanel'; + +describe('unit tests', () => { + let wrapper; + const minimalPropsForLineChart = { + distinctMetricKeys: ['metric_0', 'metric_1'], + selectedMetricKeys: ['metric_0'], + selectedXAxis: X_AXIS_RELATIVE, + handleXAxisChange: jest.fn(), + handleShowPointChange: jest.fn(), + handleMetricsSelectChange: jest.fn(), + handleYAxisLogScaleChange: jest.fn(), + handleLineSmoothChange: jest.fn(), + chartType: CHART_TYPE_LINE, + }; + const minimalPropsForBarChart = { ...minimalPropsForLineChart, chartType: CHART_TYPE_BAR }; + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); + + test('should show x-axis controls for line chart', () => { + wrapper = shallow(); + expect(wrapper.find('.show-point-toggle')).toHaveLength(1); + expect(wrapper.find('.smoothness-toggle')).toHaveLength(1); + expect(wrapper.find('.x-axis-radio')).toHaveLength(3); + }); + + test('should not show x-axis controls for bar chart', () => { + wrapper = shallow(); + expect(wrapper.find('.show-point-toggle')).toHaveLength(0); + expect(wrapper.find('.smoothness-toggle')).toHaveLength(0); + expect(wrapper.find('.x-axis-radio')).toHaveLength(0); + }); +}); diff --git a/mlflow/server/js/src/components/MetricsPlotPanel.js b/mlflow/server/js/src/components/MetricsPlotPanel.js new file mode 100644 index 0000000000000..7fd5727e5c519 --- /dev/null +++ b/mlflow/server/js/src/components/MetricsPlotPanel.js @@ -0,0 +1,220 @@ +import React from 'react'; +import { connect } from 'react-redux'; +import Utils from '../utils/Utils'; +import RequestStateWrapper from './RequestStateWrapper'; +import { getMetricHistoryApi, getUUID } from '../Actions'; +import PropTypes from 'prop-types'; +import _ from 'lodash'; +import { MetricsPlotView } from './MetricsPlotView'; +import { getRunTags } from '../reducers/Reducers'; +import { MetricsPlotControls, X_AXIS_RELATIVE, X_AXIS_STEP } from './MetricsPlotControls'; +import qs from 'qs'; +import { withRouter } from 'react-router-dom'; +import Routes from '../Routes'; + +export const CHART_TYPE_LINE = 'line'; +export const CHART_TYPE_BAR = 'bar'; + +export class MetricsPlotPanel extends React.Component { + static propTypes = { + runUuids: PropTypes.arrayOf(String).isRequired, + metricKey: PropTypes.string.isRequired, + // A map of { runUuid : { metricKey: value } } + latestMetricsByRunUuid: PropTypes.object.isRequired, + // An array of distinct metric keys across all runUuids + distinctMetricKeys: PropTypes.arrayOf(String).isRequired, + // An array of { metricKey, history, runUuid, runDisplayName } + metricsWithRunInfoAndHistory: PropTypes.arrayOf(Object).isRequired, + getMetricHistoryApi: PropTypes.func.isRequired, + location: PropTypes.object.isRequired, + history: PropTypes.object.isRequired, + runDisplayNames: PropTypes.arrayOf(String).isRequired, + }; + + constructor(props) { + super(props); + const plotMetricKeys = Utils.getPlotMetricKeysFromUrl(props.location.search); + const selectedMetricKeys = plotMetricKeys.length ? plotMetricKeys : [props.metricKey]; + this.state = { + selectedXAxis: X_AXIS_RELATIVE, + selectedMetricKeys, + showPoint: false, + historyRequestIds: [], + yAxisLogScale: false, + lineSmoothness: 0, + }; + this.loadMetricHistory(this.props.runUuids, selectedMetricKeys); + } + + static predictChartType(metrics) { + // Show bar chart when every metric has exactly 1 metric history + if ( + metrics && + metrics.length && + _.every(metrics, (metric) => metric.history && metric.history.length === 1) + ) { + return CHART_TYPE_BAR; + } + return CHART_TYPE_LINE; + } + + static isComparing(search) { + const params = qs.parse(search); + const runs = params && params['?runs']; + return runs ? JSON.parse(runs).length > 1 : false; + } + + updateUrlWithSelectedMetrics(selectedMetricKeys) { + const { runUuids, metricKey, location, history } = this.props; + const params = qs.parse(location.search); + const experimentId = params['experiment']; + history.push(Routes.getMetricPageRoute(runUuids, metricKey, experimentId, selectedMetricKeys)); + } + + loadMetricHistory = (runUuids, metricKeys) => { + const requestIds = []; + const { latestMetricsByRunUuid } = this.props; + runUuids.forEach((runUuid) => { + metricKeys.forEach((metricKey) => { + if (latestMetricsByRunUuid[runUuid][metricKey]) { + const id = getUUID(); + this.props.getMetricHistoryApi(runUuid, metricKey, id); + requestIds.push(id); + } + }); + }); + return requestIds; + }; + + getMetrics = () => { + /* eslint-disable no-param-reassign */ + const selectedMetricsSet = new Set(this.state.selectedMetricKeys); + const { selectedXAxis } = this.state; + const { metricsWithRunInfoAndHistory } = this.props; + + // Take only selected metrics + const metrics = metricsWithRunInfoAndHistory.filter((m) => selectedMetricsSet.has(m.metricKey)); + + // Sort metric history based on selected x-axis + metrics.forEach((metric) => { + const isStep = + selectedXAxis === X_AXIS_STEP && metric.history[0] && _.isNumber(metric.history[0].step); + // Metric history can be large. Doing an in-place here to save memory + metric.history.sort(isStep ? Utils.compareByStepAndTimestamp : Utils.compareByTimestamp); + }); + return metrics; + }; + + handleYAxisLogScaleChange = (yAxisLogScale) => { + this.setState({ yAxisLogScale }); + }; + + handleXAxisChange = (e) => { + this.setState({ selectedXAxis: e.target.value }); + }; + + handleMetricsSelectChange = (metricValues, metricLabels, { triggerValue }) => { + const requestIds = this.loadMetricHistory(this.props.runUuids, [triggerValue]); + this.setState((prevState) => ({ + selectedMetricKeys: metricValues, + historyRequestIds: [...prevState.historyRequestIds, ...requestIds], + })); + this.updateUrlWithSelectedMetrics(metricValues); + }; + + handleShowPointChange = (showPoint) => this.setState({ showPoint }); + + handleLineSmoothChange = (lineSmoothness) => this.setState({ lineSmoothness }); + + render() { + const { runUuids, runDisplayNames, distinctMetricKeys, location } = this.props; + const { + historyRequestIds, + showPoint, + selectedXAxis, + selectedMetricKeys, + yAxisLogScale, + lineSmoothness, + } = this.state; + const metrics = this.getMetrics(); + const chartType = MetricsPlotPanel.predictChartType(metrics); + return ( +
+ + + + +
+ ); + } +} + +const mapStateToProps = (state, ownProps) => { + const { runUuids } = ownProps; + const { latestMetricsByRunUuid, metricsByRunUuid } = state.entities; + + // All metric keys from all runUuids, non-distinct + const metricKeys = _.flatMap(runUuids, (runUuid) => { + const latestMetrics = latestMetricsByRunUuid[runUuid]; + return latestMetrics ? Object.keys(latestMetrics) : []; + }); + const distinctMetricKeys = [...new Set(metricKeys)].sort(); + + const runDisplayNames = []; + + // Flat array of all metrics, with history and information of the run it belongs to + // This is used for underlying MetricsPlotView & predicting chartType for MetricsPlotControls + const metricsWithRunInfoAndHistory = _.flatMap(runUuids, (runUuid) => { + const runDisplayName = Utils.getRunDisplayName(getRunTags(runUuid, state), runUuid); + runDisplayNames.push(runDisplayName); + const metricsHistory = metricsByRunUuid[runUuid]; + return metricsHistory + ? Object.keys(metricsHistory).map((metricKey) => { + const history = metricsHistory[metricKey].map((entry) => ({ + key: entry.key, + value: entry.value, + step: Number.parseInt(entry.step, 10) || 0, // default step to 0 + timestamp: Number.parseFloat(entry.timestamp), + })); + return { metricKey, history, runUuid, runDisplayName }; + }) + : []; + }); + + return { + runDisplayNames, + latestMetricsByRunUuid, + distinctMetricKeys, + metricsWithRunInfoAndHistory, + }; +}; + +const mapDispatchToProps = { getMetricHistoryApi }; + +export default withRouter( + connect( + mapStateToProps, + mapDispatchToProps, + )(MetricsPlotPanel), +); diff --git a/mlflow/server/js/src/components/MetricsPlotPanel.test.js b/mlflow/server/js/src/components/MetricsPlotPanel.test.js new file mode 100644 index 0000000000000..864ab1ad9b972 --- /dev/null +++ b/mlflow/server/js/src/components/MetricsPlotPanel.test.js @@ -0,0 +1,170 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { MetricsPlotPanel, CHART_TYPE_BAR, CHART_TYPE_LINE } from './MetricsPlotPanel'; +import { X_AXIS_RELATIVE, X_AXIS_STEP, X_AXIS_WALL } from './MetricsPlotControls'; +import Utils from '../utils/Utils'; + +describe('unit tests', () => { + let wrapper; + let instance; + let minimalPropsForLineChart; + let minimalPropsForBarChart; + + beforeEach(() => { + minimalPropsForLineChart = { + runUuids: ['runUuid1', 'runUuid2'], + metricKey: 'metric_1', + latestMetricsByRunUuid: { + runUuid1: { metric_1: 100, metric_2: 200 }, + runUuid2: { metric_1: 111, metric_2: 222 }, + }, + distinctMetricKeys: ['metric_1', 'metric_2'], + // An array of { metricKey, history, runUuid, runDisplayName } + metricsWithRunInfoAndHistory: [ + { + metricKey: 'metric_1', + history: [ + /* Intentionally reversed timestamp and step here for testing */ + { key: 'metric_1', value: 100, step: 2, timestamp: 1556662044000 }, + { key: 'metric_1', value: 50, step: 1, timestamp: 1556662043000 }, + ], + runUuid: 'runUuid1', + runDisplayName: 'runDisplayName1', + }, + { + metricKey: 'metric_2', + history: [ + { key: 'metric_2', value: 55, step: -1, timestamp: 1556662043000 }, + { key: 'metric_2', value: 111, step: 0, timestamp: 1556662044000 }, + ], + runUuid: 'runUuid1', + runDisplayName: 'runDisplayName1', + }, + { + metricKey: 'metric_1', + history: [ + { key: 'metric_1', value: 150, step: 3, timestamp: 1556662043000 }, + { key: 'metric_1', value: 200, step: 4, timestamp: 1556662044000 }, + ], + runUuid: 'runUuid2', + runDisplayName: 'runDisplayName2', + }, + { + metricKey: 'metric_2', + history: [ + { key: 'metric_2', value: 155, step: -4, timestamp: 1556662043000 }, + { key: 'metric_2', value: 222, step: -3, timestamp: 1556662044000 }, + ], + runUuid: 'runUuid2', + runDisplayName: 'runDisplayName2', + }, + ], + getMetricHistoryApi: jest.fn(), + location: { + search: + '?runs=["runUuid1","runUuid2"]&experiment=0' + + '&plot_metric_keys=["metric_1","metric_2"]', + }, + history: { push: jest.fn() }, + runDisplayNames: ['runDisplayName1', 'runDisplayName2'], + }; + + minimalPropsForBarChart = { + runUuids: ['runUuid1', 'runUuid2'], + metricKey: 'metric_1', + latestMetricsByRunUuid: { + runUuid1: { metric_1: 100, metric_2: 200 }, + runUuid2: { metric_1: 111, metric_2: 222 }, + }, + distinctMetricKeys: ['metric_1', 'metric_2'], + // An array of { metricKey, history, runUuid, runDisplayName } + metricsWithRunInfoAndHistory: [ + { + metricKey: 'metric_1', + history: [{ key: 'metric_1', value: 50, step: 0, timestamp: 1556662043000 }], + runUuid: 'runUuid1', + runDisplayName: 'runDisplayName1', + }, + { + metricKey: 'metric_2', + history: [{ key: 'metric_2', value: 55, step: 0, timestamp: 1556662043000 }], + runUuid: 'runUuid1', + runDisplayName: 'runDisplayName1', + }, + { + metricKey: 'metric_1', + history: [{ key: 'metric_1', value: 150, step: 0, timestamp: 1556662043000 }], + runUuid: 'runUuid2', + runDisplayName: 'runDisplayName2', + }, + { + metricKey: 'metric_2', + history: [{ key: 'metric_2', value: 155, step: 0, timestamp: 1556662043000 }], + runUuid: 'runUuid2', + runDisplayName: 'runDisplayName2', + }, + ], + getMetricHistoryApi: jest.fn(), + location: { + search: + '?runs=["runUuid1","runUuid2"]&experiment=0' + + '&plot_metric_keys=["metric_1","metric_2"]', + }, + history: { push: jest.fn() }, + runDisplayNames: ['runDisplayName1', 'runDisplayName2'], + }; + }); + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); + + test('predictChartType()', () => { + expect( + MetricsPlotPanel.predictChartType(minimalPropsForLineChart.metricsWithRunInfoAndHistory), + ).toBe(CHART_TYPE_LINE); + expect( + MetricsPlotPanel.predictChartType(minimalPropsForBarChart.metricsWithRunInfoAndHistory), + ).toBe(CHART_TYPE_BAR); + }); + + test('isComparing()', () => { + const s1 = '?runs=["runUuid1","runUuid2"]&plot_metric_keys=["metric_1","metric_2"]'; + const s2 = '?runs=["runUuid1"]&plot_metric_keys=["metric_1","metric_2"]'; + expect(MetricsPlotPanel.isComparing(s1)).toBe(true); + expect(MetricsPlotPanel.isComparing(s2)).toBe(false); + }); + + test('getMetrics() should sort the history by timestamp for `Time (Relative)` x-axis', () => { + wrapper = shallow(); + instance = wrapper.instance(); + + instance.setState({ selectedXAxis: X_AXIS_RELATIVE }); + const metrics = minimalPropsForLineChart.metricsWithRunInfoAndHistory; + metrics[0].history.sort(); // sort in place before comparison + expect(instance.getMetrics()).toEqual(metrics); + }); + + test('getMetrics() should sort the history by timestamp for `Time (Wall)` x-axis', () => { + wrapper = shallow(); + instance = wrapper.instance(); + + instance.setState({ selectedXAxis: X_AXIS_WALL }); + const metrics = minimalPropsForLineChart.metricsWithRunInfoAndHistory; + metrics[0].history.sort(); // sort in place before comparison + expect(instance.getMetrics()).toEqual(metrics); + }); + + test('getMetrics() should sort the history by step×tamp for `Step` x-axis', () => { + wrapper = shallow(); + instance = wrapper.instance(); + + instance.setState({ selectedXAxis: X_AXIS_STEP }); + const metrics = minimalPropsForLineChart.metricsWithRunInfoAndHistory; + metrics[0].history.sort(Utils.compareByStepAndTimestamp); // sort in place before comparison + expect(instance.getMetrics()).toEqual(metrics); + }); +}); diff --git a/mlflow/server/js/src/components/MetricsPlotView.js b/mlflow/server/js/src/components/MetricsPlotView.js new file mode 100644 index 0000000000000..0a624b610f403 --- /dev/null +++ b/mlflow/server/js/src/components/MetricsPlotView.js @@ -0,0 +1,128 @@ +import React from 'react'; +import Utils from '../utils/Utils'; +import _ from 'lodash'; +import PropTypes from 'prop-types'; +import { X_AXIS_STEP, X_AXIS_RELATIVE } from './MetricsPlotControls'; +import { CHART_TYPE_BAR } from './MetricsPlotPanel'; +import Plot from 'react-plotly.js'; + +const MAX_RUN_NAME_DISPLAY_LENGTH = 36; + +export class MetricsPlotView extends React.Component { + static propTypes = { + runUuids: PropTypes.arrayOf(String).isRequired, + runDisplayNames: PropTypes.arrayOf(String).isRequired, + metrics: PropTypes.arrayOf(Object).isRequired, + xAxis: PropTypes.string.isRequired, + metricKeys: PropTypes.arrayOf(String).isRequired, + // Whether or not to show point markers on the line chart + showPoint: PropTypes.bool.isRequired, + chartType: PropTypes.string.isRequired, + isComparing: PropTypes.bool.isRequired, + yAxisLogScale: PropTypes.bool.isRequired, + lineSmoothness: PropTypes.number, + }; + + static getLineLegend = (metricKey, runDisplayName, isComparing) => { + let legend = metricKey; + if (isComparing) { + legend += `, ${Utils.truncateString(runDisplayName, MAX_RUN_NAME_DISPLAY_LENGTH)}`; + } + return legend; + }; + + static parseTimestamp = (timestamp, history, xAxis) => { + if (xAxis === X_AXIS_RELATIVE) { + const minTimestamp = _.minBy(history, 'timestamp').timestamp; + return (timestamp - minTimestamp) / 1000; + } + return Utils.formatTimestamp(timestamp); + }; + + getPlotPropsForLineChart = () => { + const { metrics, xAxis, showPoint, yAxisLogScale, lineSmoothness, isComparing } = this.props; + const data = metrics.map((metric) => { + const { metricKey, runDisplayName, history } = metric; + const isSingleHistory = history.length === 0; + return { + name: MetricsPlotView.getLineLegend(metricKey, runDisplayName, isComparing), + x: history.map((entry) => { + if (xAxis === X_AXIS_STEP) { + return entry.step; + } + return MetricsPlotView.parseTimestamp(entry.timestamp, history, xAxis); + }), + y: history.map((entry) => entry.value), + type: 'scatter', + mode: isSingleHistory ? 'markers' : showPoint ? 'lines+markers' : 'lines', + line: { shape: 'spline', smoothing: lineSmoothness }, + }; + }); + const props = { data }; + if (yAxisLogScale) { + props.layout = { + yaxis: { type: 'log', autorange: true }, + }; + } + return props; + }; + + getPlotPropsForBarChart = () => { + /* eslint-disable no-param-reassign */ + const { runUuids, runDisplayNames, yAxisLogScale } = this.props; + + // A reverse lookup of `metricKey: { runUuid: value, metricKey }` + const historyByMetricKey = this.props.metrics.reduce((map, metric) => { + const { runUuid, metricKey, history } = metric; + const value = history[0] && history[0].value; + if (!map[metricKey]) { + map[metricKey] = { metricKey, [runUuid]: value }; + } else { + map[metricKey][runUuid] = value; + } + return map; + }, {}); + + const arrayOfHistorySortedByMetricKey = _.sortBy( + Object.values(historyByMetricKey), + 'metricKey', + ); + + const sortedMetricKeys = arrayOfHistorySortedByMetricKey.map((history) => history.metricKey); + + const data = runUuids.map((runUuid, i) => ({ + name: Utils.truncateString(runDisplayNames[i], MAX_RUN_NAME_DISPLAY_LENGTH), + x: sortedMetricKeys, + y: arrayOfHistorySortedByMetricKey.map((history) => history[runUuid]), + type: 'bar', + })); + + const layout = { barmode: 'group' }; + const props = { data, layout }; + if (yAxisLogScale) { + props.layout.yaxis = { type: 'log', autorange: true }; + } + return props; + }; + + render() { + const plotProps = + this.props.chartType === CHART_TYPE_BAR + ? this.getPlotPropsForBarChart() + : this.getPlotPropsForLineChart(); + return ( +
+ +
+ ); + } +} diff --git a/mlflow/server/js/src/components/MetricsPlotView.test.js b/mlflow/server/js/src/components/MetricsPlotView.test.js new file mode 100644 index 0000000000000..64442eaa63a75 --- /dev/null +++ b/mlflow/server/js/src/components/MetricsPlotView.test.js @@ -0,0 +1,189 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { MetricsPlotView } from './MetricsPlotView'; +import { X_AXIS_RELATIVE, X_AXIS_WALL } from './MetricsPlotControls'; +import { CHART_TYPE_BAR, CHART_TYPE_LINE } from './MetricsPlotPanel'; +import Utils from '../utils/Utils'; +import Plot from 'react-plotly.js'; + +const metricsForLine = [ + { + metricKey: 'metric_0', + history: [ + { + key: 'metric_0', + value: 100, + step: 0, + timestamp: 1556662043000, + }, + { + key: 'metric_0', + value: 200, + step: 1, + timestamp: 1556662044000, + }, + ], + runUuid: 'runUuid1', + runDisplayName: 'RunDisplayName1', + }, + { + metricKey: 'metric_1', + history: [ + { + key: 'metric_1', + value: 300, + step: 0, + timestamp: 1556662043000, + }, + { + key: 'metric_0', + value: 400, + step: 1, + timestamp: 1556662044000, + }, + ], + runUuid: 'runUuid2', + runDisplayName: 'RunDisplayName2', + }, +]; + +const metricsForBar = [ + { + metricKey: 'metric_0', + history: [ + { + key: 'metric_0', + value: 100, + step: 0, + timestamp: 1556662043000, + }, + ], + runUuid: 'runUuid1', + runDisplayName: 'RunDisplayName1', + }, + { + metricKey: 'metric_0', + history: [ + { + key: 'metric_0', + value: 300, + step: 0, + timestamp: 1556662043000, + }, + ], + runUuid: 'runUuid2', + runDisplayName: 'RunDisplayName2', + }, +]; + +describe('unit tests', () => { + let wrapper; + let instance; + let minimalPropsForLineChart; + let minimalPropsForBarChart; + + beforeEach(() => { + minimalPropsForLineChart = { + runUuids: ['runUuid1', 'runUuid2'], + runDisplayNames: ['RunDisplayName1', 'RunDisplayName2'], + xAxis: X_AXIS_RELATIVE, + metrics: metricsForLine, + metricKeys: ['metric_0', 'metric_1'], + showPoint: false, + chartType: CHART_TYPE_LINE, + isComparing: false, + yAxisLogScale: false, + lineSmoothness: 0, + }; + minimalPropsForBarChart = { + ...minimalPropsForLineChart, + metrics: metricsForBar, + metricKeys: ['metric_0'], + chartType: CHART_TYPE_BAR, + }; + }); + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); + + test('getPlotPropsForLineChart()', () => { + wrapper = shallow(); + instance = wrapper.instance(); + expect(instance.getPlotPropsForLineChart()).toEqual({ + data: [ + { + name: 'metric_0', + x: [0, 1], + y: [100, 200], + type: 'scatter', + mode: 'lines', + line: { + shape: 'spline', + smoothing: 0, + }, + }, + { + name: 'metric_1', + x: [0, 1], + y: [300, 400], + type: 'scatter', + mode: 'lines', + line: { + shape: 'spline', + smoothing: 0, + }, + }, + ], + }); + }); + + test('getPlotPropsForBarChart()', () => { + wrapper = shallow(); + instance = wrapper.instance(); + expect(instance.getPlotPropsForBarChart()).toEqual({ + data: [ + { + name: 'RunDisplayName1', + x: ['metric_0'], + y: [100], + type: 'bar', + }, + { + name: 'RunDisplayName2', + x: ['metric_0'], + y: [300], + type: 'bar', + }, + ], + layout: { + barmode: 'group', + }, + }); + }); + + test('getLineLegend()', () => { + // how both metric and run name when comparing multiple runs + expect(MetricsPlotView.getLineLegend('metric_1', 'Run abc', true)).toBe('metric_1, Run abc'); + // only show metric name when there + expect(MetricsPlotView.getLineLegend('metric_1', 'Run abc', false)).toBe('metric_1'); + }); + + test('parseTimestamp()', () => { + const timestamp = 1556662044000; + const timestampStr = Utils.formatTimestamp(timestamp); + const history = [{ timestamp: 1556662043000 }]; + // convert to step when axis is Time (Relative) + expect(MetricsPlotView.parseTimestamp(timestamp, history, X_AXIS_RELATIVE)).toBe(1); + // convert to date time string when axis is Time (Wall) + expect(MetricsPlotView.parseTimestamp(timestamp, history, X_AXIS_WALL)).toBe(timestampStr); + }); + + test('should disable both plotly logo and the link to plotly studio', () => { + wrapper = shallow(); + const plot = wrapper.find(Plot); + expect(plot.props().config.displaylogo).toBe(false); + expect(plot.props().config.modeBarButtonsToRemove).toContain('sendDataToCloud'); + }); +}); diff --git a/mlflow/server/js/src/components/NoExperimentView.js b/mlflow/server/js/src/components/NoExperimentView.js new file mode 100644 index 0000000000000..bdd18264729bd --- /dev/null +++ b/mlflow/server/js/src/components/NoExperimentView.js @@ -0,0 +1,28 @@ +import React, { Component } from 'react'; +import Colors from '../styles/Colors'; +import noExperiments from '../static/no-experiments.svg'; + +export default class NoExperimentView extends Component { + render() { + return ( +
+ No experiments found. +

+ No Experiments Exist +

+

+ To create an experiment use the{' '} + + mlflow experiments + {' '} + CLI. +

+
+ ); + } +} diff --git a/mlflow/server/js/src/components/ParallelCoordinatesPlotControls.js b/mlflow/server/js/src/components/ParallelCoordinatesPlotControls.js new file mode 100644 index 0000000000000..6e6610873e600 --- /dev/null +++ b/mlflow/server/js/src/components/ParallelCoordinatesPlotControls.js @@ -0,0 +1,56 @@ +import React from 'react'; +import PropTypes from 'prop-types'; +import { TreeSelect } from 'antd'; + +export class ParallelCoordinatesPlotControls extends React.Component { + static propTypes = { + // An array of available parameter keys to select + paramKeys: PropTypes.arrayOf(String).isRequired, + // An array of available metric keys to select + metricKeys: PropTypes.arrayOf(String).isRequired, + selectedParamKeys: PropTypes.arrayOf(String).isRequired, + selectedMetricKeys: PropTypes.arrayOf(String).isRequired, + handleParamsSelectChange: PropTypes.func.isRequired, + handleMetricsSelectChange: PropTypes.func.isRequired, + }; + + static handleFilterChange = (text, option) => + option.props.title.toUpperCase().includes(text.toUpperCase()); + + render() { + const { + paramKeys, + metricKeys, + selectedParamKeys, + selectedMetricKeys, + handleParamsSelectChange, + handleMetricsSelectChange + } = this.props; + return ( +
+
Parameters:
+ ({ title: k, value: k, label: k}))} + onChange={handleParamsSelectChange} + filterTreeNode={ParallelCoordinatesPlotControls.handleFilterChange} + /> +
Metrics:
+ ({ title: k, value: k, label: k}))} + onChange={handleMetricsSelectChange} + filterTreeNode={ParallelCoordinatesPlotControls.handleFilterChange} + /> +
+ ); + } +} diff --git a/mlflow/server/js/src/components/ParallelCoordinatesPlotControls.test.js b/mlflow/server/js/src/components/ParallelCoordinatesPlotControls.test.js new file mode 100644 index 0000000000000..5158bb0767f7c --- /dev/null +++ b/mlflow/server/js/src/components/ParallelCoordinatesPlotControls.test.js @@ -0,0 +1,24 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { ParallelCoordinatesPlotControls } from './ParallelCoordinatesPlotControls'; + +describe('unit tests', () => { + let wrapper; + let mininumProps; + + beforeEach(() => { + mininumProps = { + paramKeys: ['param_0', 'param_1'], + metricKeys: ['metric_0', 'metric_1'], + selectedParamKeys: ['param_0', 'param_1'], + selectedMetricKeys: ['metric_0', 'metric_1'], + handleParamsSelectChange: jest.fn(), + handleMetricsSelectChange: jest.fn(), + }; + }); + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); +}); diff --git a/mlflow/server/js/src/components/ParallelCoordinatesPlotPanel.css b/mlflow/server/js/src/components/ParallelCoordinatesPlotPanel.css new file mode 100644 index 0000000000000..d262941abb251 --- /dev/null +++ b/mlflow/server/js/src/components/ParallelCoordinatesPlotPanel.css @@ -0,0 +1,7 @@ +.parallel-coorinates-plot-panel { + display: flex; +} + +.parallel-coorinates-plot-panel .plot-controls .metrics-select { + width: 300px; +} diff --git a/mlflow/server/js/src/components/ParallelCoordinatesPlotPanel.js b/mlflow/server/js/src/components/ParallelCoordinatesPlotPanel.js new file mode 100644 index 0000000000000..e4911fce8c3f5 --- /dev/null +++ b/mlflow/server/js/src/components/ParallelCoordinatesPlotPanel.js @@ -0,0 +1,69 @@ +import React from 'react'; +import { connect } from 'react-redux'; +import PropTypes from 'prop-types'; +import ParallelCoordinatesPlotView from './ParallelCoordinatesPlotView'; +import { ParallelCoordinatesPlotControls } from './ParallelCoordinatesPlotControls'; +import { getSharedMetricKeysByRunUuids, getSharedParamKeysByRunUuids } from '../reducers/Reducers'; +import _ from 'lodash'; +import { Empty } from 'antd'; + +import './ParallelCoordinatesPlotPanel.css'; + +export class ParallelCoordinatesPlotPanel extends React.Component { + static propTypes = { + runUuids: PropTypes.arrayOf(String).isRequired, + // An array of parameter keys shared by all runs + sharedParamKeys: PropTypes.arrayOf(String).isRequired, + // An array of metric keys shared by all runs + sharedMetricKeys: PropTypes.arrayOf(String).isRequired, + }; + + state = { + // Default to select all parameters + selectedParamKeys: this.props.sharedParamKeys, + // Default to select the first metric key. + // Note that there will be no color scaling if no metric is selected. + selectedMetricKeys: this.props.sharedMetricKeys.slice(0, 1), + }; + + handleParamsSelectChange = (paramValues) => { + this.setState({ selectedParamKeys: paramValues }); + }; + + handleMetricsSelectChange = (metricValues) => { + this.setState({ selectedMetricKeys: metricValues }); + }; + + render() { + const { runUuids, sharedParamKeys, sharedMetricKeys } = this.props; + const { selectedParamKeys, selectedMetricKeys } = this.state; + return ( +
+ + {(!_.isEmpty(selectedParamKeys) || !_.isEmpty(selectedMetricKeys)) ? ( + + ) : } +
+ ); + } +} + +const mapStateToProps = (state, ownProps) => { + const { runUuids } = ownProps; + const sharedParamKeys = getSharedParamKeysByRunUuids(runUuids, state); + const sharedMetricKeys = getSharedMetricKeysByRunUuids(runUuids, state); + return { sharedParamKeys, sharedMetricKeys }; +}; + +export default connect(mapStateToProps)(ParallelCoordinatesPlotPanel); diff --git a/mlflow/server/js/src/components/ParallelCoordinatesPlotPanel.test.js b/mlflow/server/js/src/components/ParallelCoordinatesPlotPanel.test.js new file mode 100644 index 0000000000000..696dafe088560 --- /dev/null +++ b/mlflow/server/js/src/components/ParallelCoordinatesPlotPanel.test.js @@ -0,0 +1,37 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { ParallelCoordinatesPlotPanel } from './ParallelCoordinatesPlotPanel'; +import ParallelCoordinatesPlotView from './ParallelCoordinatesPlotView'; +import { Empty } from 'antd'; + +describe('unit tests', () => { + let wrapper; + let instance; + let mininumProps; + + beforeEach(() => { + mininumProps = { + runUuids: ['runUuid_0', 'runUuid_1'], + sharedParamKeys: ['param_0', 'param_1'], + sharedMetricKeys: ['metric_0', 'metric_1'], + }; + }); + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); + + test('should render empty component when no dimension is selected', () => { + wrapper = shallow(); + instance = wrapper.instance(); + expect(wrapper.find(ParallelCoordinatesPlotView)).toHaveLength(1); + expect(wrapper.find(Empty)).toHaveLength(0); + instance.setState({ + selectedParamKeys: [], + selectedMetricKeys: [], + }); + expect(wrapper.find(ParallelCoordinatesPlotView)).toHaveLength(0); + expect(wrapper.find(Empty)).toHaveLength(1); + }); +}); diff --git a/mlflow/server/js/src/components/ParallelCoordinatesPlotView.js b/mlflow/server/js/src/components/ParallelCoordinatesPlotView.js new file mode 100644 index 0000000000000..dbdb105de3446 --- /dev/null +++ b/mlflow/server/js/src/components/ParallelCoordinatesPlotView.js @@ -0,0 +1,186 @@ +import React from 'react'; +import { connect } from 'react-redux'; +import Plot from 'react-plotly.js'; +import PropTypes from 'prop-types'; +import Utils from '../utils/Utils'; +import _ from 'lodash'; + +const AXIS_LABEL_CLS = '.pcp-plot .parcoords .y-axis .axis-heading .axis-title'; + +export class ParallelCoordinatesPlotView extends React.Component { + static propTypes = { + runUuids: PropTypes.arrayOf(String).isRequired, + paramKeys: PropTypes.arrayOf(String).isRequired, + metricKeys: PropTypes.arrayOf(String).isRequired, + paramDimensions: PropTypes.arrayOf(Object).isRequired, + metricDimensions: PropTypes.arrayOf(Object).isRequired, + }; + + state = { + // Current sequence of all axes, both parameters and metrics. + sequence: [...this.props.paramKeys, ...this.props.metricKeys], + }; + + static getDerivedStateFromProps(props, state) { + const keysFromProps = [...props.paramKeys, ...props.metricKeys]; + const keysFromState = state.sequence; + if (!_.isEqual(_.sortBy(keysFromProps), _.sortBy(keysFromState))) { + return { sequence: keysFromProps }; + } + return null; + } + + getData() { + const { sequence } = this.state; + const { paramDimensions, metricDimensions, metricKeys } = this.props; + const lastMetricKey = this.findLastKeyFromState(metricKeys); + const lastMetricDimension = this.props.metricDimensions.find((d) => d.label === lastMetricKey); + const colorScaleConfigs = ParallelCoordinatesPlotView.getColorScaleConfigsForDimension( + lastMetricDimension, + ); + // This make sure axis order consistency across renders. + const orderedDimensions = ParallelCoordinatesPlotView.getDimensionsOrderedBySequence( + [...paramDimensions, ...metricDimensions], + sequence, + ); + return [ + { + type: 'parcoords', + line: { ...colorScaleConfigs }, + dimensions: orderedDimensions, + }, + ]; + } + + static getDimensionsOrderedBySequence(dimensions, sequence) { + return _.sortBy(dimensions, [(dimension) => sequence.indexOf(dimension.label)]); + } + + static getLabelElementsFromDom = () => Array.from(document.querySelectorAll(AXIS_LABEL_CLS)); + + findLastKeyFromState(keys) { + const { sequence } = this.state; + const keySet = new Set(keys); + return _.findLast(sequence, (key) => keySet.has(key)); + } + + static getColorScaleConfigsForDimension(dimension) { + if (!dimension) return null; + const cmin = _.min(dimension.values); + const cmax = _.max(dimension.values); + return { + showscale: true, + colorscale: 'Jet', + cmin, + cmax, + color: dimension.values, + }; + } + + // Update styles(green & bold) for metric axes. + // Note(Zangr) 2019-6-25 this is needed because there is no per axis label setting available. This + // needs to be called every time chart updates. More information about currently available label + // setting here: https://plot.ly/javascript/reference/#parcoords-labelfont + updateMetricAxisLabelStyle = () => { + /* eslint-disable no-param-reassign */ + const metricsKeySet = new Set(this.props.metricKeys); + // TODO(Zangr) 2019-06-20 This assumes name uniqueness across params & metrics. Find a way to + // make it more deterministic. Ex. Add add different data attributes to indicate axis kind. + ParallelCoordinatesPlotView.getLabelElementsFromDom() + .filter((el) => metricsKeySet.has(el.innerHTML)) + .forEach((el) => { + el.style.fill = 'green'; + el.style.fontWeight = 'bold'; + }); + }; + + maybeUpdateStateForColorScale = (currentSequenceFromPlotly) => { + const rightmostMetricKeyFromState = this.findLastKeyFromState(this.props.metricKeys); + const metricsKeySet = new Set(this.props.metricKeys); + const rightmostMetricKeyFromPlotly = _.findLast( + currentSequenceFromPlotly, + (key) => metricsKeySet.has(key), + ); + // Currently we always render color scale based on the rightmost metric axis, so if that changes + // we need to setState with the new axes sequence to trigger a rerender. + if (rightmostMetricKeyFromState !== rightmostMetricKeyFromPlotly) { + this.setState({ sequence: currentSequenceFromPlotly }); + } + }; + + handlePlotUpdate = ({ data: [{ dimensions }] }) => { + this.updateMetricAxisLabelStyle(); + this.maybeUpdateStateForColorScale(dimensions.map((d) => d.label)); + }; + + render() { + return ( + + ); + } +} + +export const generateAttributesForCategoricalDimension = (labels) => { + // Create a lookup from label to its own alphabetical sorted order. + // Ex. ['A', 'B', 'C'] => { 'A': '0', 'B': '1', 'C': '2' } + const sortedUniqLabels = _.uniq(labels).sort(); + const labelToIndexStr = _.invert(sortedUniqLabels); + const attributes = {}; + + // Values are assigned to their alphabetical sorted index number + attributes.values = labels.map((label) => Number(labelToIndexStr[label])); + + // Default to alphabetical order for categorical axis here. Ex. [0, 1, 2, 3 ...] + attributes.tickvals = _.range(sortedUniqLabels.length); + + // Default to alphabetical order for categorical axis here. Ex. ['A', 'B', 'C', 'D' ...] + attributes.ticktext = sortedUniqLabels; + + return attributes; +}; + +// Infer type with the first run's value +export const inferType = (key, runUuids, entryByRunUuid) => { + return isNaN(entryByRunUuid[runUuids[0]][key].value) ? 'string' : 'number'; +}; + +export const createDimension = (key, runUuids, entryByRunUuid) => { + let attributes = {}; + const dataType = inferType(key, runUuids, entryByRunUuid); + if (dataType === 'string') { + attributes = generateAttributesForCategoricalDimension( + runUuids.map((runUuid) => entryByRunUuid[runUuid][key].value), + ); + } else { + attributes.values = runUuids.map((runUuid) => { + const { value } = entryByRunUuid[runUuid][key]; + return isNaN(value) ? 0 : Number(Utils.formatMetric(value)); // Default NaN to zero here + }); + } + return { + label: key, + ...attributes, + }; +}; + +const mapStateToProps = (state, ownProps) => { + const { runUuids, paramKeys, metricKeys } = ownProps; + const { latestMetricsByRunUuid, paramsByRunUuid } = state.entities; + const paramDimensions = paramKeys.map((paramKey) => + createDimension(paramKey, runUuids, paramsByRunUuid), + ); + const metricDimensions = metricKeys.map((metricKey) => + createDimension(metricKey, runUuids, latestMetricsByRunUuid), + ); + return { paramDimensions, metricDimensions }; +}; + +export default connect(mapStateToProps)(ParallelCoordinatesPlotView); diff --git a/mlflow/server/js/src/components/ParallelCoordinatesPlotView.test.js b/mlflow/server/js/src/components/ParallelCoordinatesPlotView.test.js new file mode 100644 index 0000000000000..561c00c325daa --- /dev/null +++ b/mlflow/server/js/src/components/ParallelCoordinatesPlotView.test.js @@ -0,0 +1,178 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { + ParallelCoordinatesPlotView, + generateAttributesForCategoricalDimension, + createDimension, + inferType, +} from './ParallelCoordinatesPlotView'; + +describe('unit tests', () => { + let wrapper; + let instance; + let mininumProps; + + beforeEach(() => { + mininumProps = { + runUuids: ['runUuid_0', 'runUuid_1'], + paramKeys: ['param_0', 'param_1'], + metricKeys: ['metric_0', 'metric_1'], + paramDimensions: [ + { + label: 'param_0', + values: [1, 2], + }, + { + label: 'param_1', + values: [2, 3], + }, + ], + metricDimensions: [ + { + label: 'metric_0', + values: [1, 2], + }, + { + label: 'metric_1', + values: [2, 3], + }, + ], + }; + }); + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); + + test('getDerivedStateFromProps should return null when the selections do not change', () => { + const props = { + paramKeys: ['param_0', 'param_1'], + metricKeys: ['metric_0', 'metric_1'], + }; + // state with different order but same selections + const state = { + sequence: ['param_0', 'metric_0', 'metric_1', 'param_1'], + }; + expect(ParallelCoordinatesPlotView.getDerivedStateFromProps(props, state)).toBe(null); + }); + + test('getDerivedStateFromProps should return state when the selections changes', () => { + const props = { + paramKeys: ['param_0', 'param_1'], + metricKeys: ['metric_0', 'metric_1', 'metric_2'], // props comes with an extra metric_2 + }; + const state = { + sequence: ['param_0', 'metric_0', 'metric_1', 'param_1'], + }; + expect(ParallelCoordinatesPlotView.getDerivedStateFromProps(props, state)).toEqual({ + sequence: ['param_0', 'param_1', 'metric_0', 'metric_1', 'metric_2'], + }); + }); + + test('maybeUpdateStateForColorScale should trigger setState when last metric change', () => { + wrapper = shallow(); + instance = wrapper.instance(); + instance.findLastMetricFromState = jest.fn(() => 'metric_1'); + instance.setState = jest.fn(); + instance.maybeUpdateStateForColorScale(['metric_1', 'metric_0']); // rightmost metric changes + expect(instance.setState).toBeCalled(); + }); + + test('maybeUpdateStateForColorScale should not trigger setState when last metric stays', () => { + wrapper = shallow(); + instance = wrapper.instance(); + instance.findLastMetricFromState = jest.fn(() => 'metric_1'); + instance.setState = jest.fn(); + instance.maybeUpdateStateForColorScale(['metric_0', 'metric_1']); // rightmost metric stays + expect(instance.setState).not.toBeCalled(); + }); + + test('generateAttributesForCategoricalDimension', () => { + expect(generateAttributesForCategoricalDimension(['A', 'B', 'C', 'B', 'C'])).toEqual({ + values: [0, 1, 2, 1, 2], + tickvals: [0, 1, 2], + ticktext: ['A', 'B', 'C'], + }); + }); + + test('inferType works with numeric dimension', () => { + const key = 'metric_0'; + const runUuids = ['runUuid_0', 'runUuid_1']; + const entryByRunUuid = { + runUuid_0: { + metric_0: { value: 1 }, + }, + runUuid_1: { + metric_0: { value: 2 }, + }, + }; + expect(inferType(key, runUuids, entryByRunUuid)).toBe('number'); + }); + + test('inferType works with categorical dimension', () => { + const key = 'metric_0'; + const runUuids = ['runUuid_0', 'runUuid_1']; + const entryByRunUuid = { + runUuid_0: { + metric_0: { value: 'B' }, + }, + runUuid_1: { + metric_0: { value: 'A' }, + }, + }; + expect(inferType(key, runUuids, entryByRunUuid)).toBe('string'); + }); + + test('createDimension should work with numeric dimension', () => { + const key = 'metric_0'; + const runUuids = ['runUuid_0', 'runUuid_1']; + const entryByRunUuid = { + runUuid_0: { + metric_0: { value: 1 }, + }, + runUuid_1: { + metric_0: { value: 2 }, + }, + }; + expect(createDimension(key, runUuids, entryByRunUuid)).toEqual({ + label: 'metric_0', + values: [1, 2], + }); + }); + + test('createDimension should work with categorical dimension', () => { + const key = 'metric_0'; + const runUuids = ['runUuid_0', 'runUuid_1']; + const entryByRunUuid = { + runUuid_0: { + metric_0: { value: 'B' }, + }, + runUuid_1: { + metric_0: { value: 'A' }, + }, + }; + expect(createDimension(key, runUuids, entryByRunUuid)).toEqual({ + label: 'metric_0', + values: [1, 0], + tickvals: [0, 1], + ticktext: ['A', 'B'], + }); + }); + + test('getColorScaleConfigsForDimension', () => { + wrapper = shallow(); + instance = wrapper.instance(); + const dimension = { + label: 'metric_0', + values: [3, 1, 2, 3, 0, 2], + }; + expect(ParallelCoordinatesPlotView.getColorScaleConfigsForDimension(dimension)).toEqual({ + showscale: true, + colorscale: 'Jet', + cmin: 0, + cmax: 3, + color: [3, 1, 2, 3, 0, 2], + }); + }); +}); diff --git a/mlflow/server/js/src/components/PermissionDeniedView.js b/mlflow/server/js/src/components/PermissionDeniedView.js new file mode 100644 index 0000000000000..4f46a34c44d63 --- /dev/null +++ b/mlflow/server/js/src/components/PermissionDeniedView.js @@ -0,0 +1,23 @@ +import React, { Component } from 'react'; +import PropTypes from 'prop-types'; +import Colors from '../styles/Colors'; + +export default class PermissionDeniedView extends Component { + static propTypes = { + errorMessage: PropTypes.string.isRequired, + }; + + render() { + const defaultMessage = 'The current user does not have permission to view this page.'; + return ( +
+

+ Permission Denied +

+

+ {this.props.errorMessage || defaultMessage} +

+
+ ); + } +} diff --git a/mlflow/server/js/src/components/RequestStateWrapper.css b/mlflow/server/js/src/components/RequestStateWrapper.css index 3b16002be753a..f02025e07e085 100644 --- a/mlflow/server/js/src/components/RequestStateWrapper.css +++ b/mlflow/server/js/src/components/RequestStateWrapper.css @@ -4,54 +4,3 @@ margin-left: auto; margin-right: auto; } - -.RequestStateWrapper-spinner { - width: 100px; - margin-top: 100px; - margin-left: auto; - margin-right: auto; -} - -.RequestStateWrapper-spinner img { - position: absolute; - top: 350px; - left: 50%; - width: 80px; - height: 80px; - margin:-40px 0 0 -40px; - opacity: 0; - -webkit-animation: spin 3s linear 1s infinite; - -moz-animation: spin 3s linear 1s infinite; - animation: spin 3s linear 1s infinite; -} - -@-moz-keyframes spin { - 0% { - opacity: 1; - } - 100% { - opacity: 1; - -moz-transform: rotate(360deg); - } -} - -@-webkit-keyframes spin { - 0% { - opacity: 1; - } - 100% { - opacity: 1; - -webkit-transform: rotate(360deg); - } -} - -@keyframes spin { - 0% { - opacity: 1; - } - 100% { - opacity: 1; - -webkit-transform: rotate(360deg); - transform: rotate(360deg); - } -} diff --git a/mlflow/server/js/src/components/RequestStateWrapper.js b/mlflow/server/js/src/components/RequestStateWrapper.js index d70561cf0e6bd..0e3b2a0dc0529 100644 --- a/mlflow/server/js/src/components/RequestStateWrapper.js +++ b/mlflow/server/js/src/components/RequestStateWrapper.js @@ -1,20 +1,20 @@ import React, { Component } from 'react'; import './RequestStateWrapper.css'; -import spinner from '../static/mlflow-spinner.png'; import { connect } from 'react-redux'; import { getApis } from '../reducers/Reducers'; import PropTypes from 'prop-types'; +import {Spinner} from "./Spinner"; export class RequestStateWrapper extends Component { static propTypes = { + // Should this component render the child before all the requests are complete? shouldOptimisticallyRender: PropTypes.bool, requests: PropTypes.arrayOf(PropTypes.object).isRequired, - children: PropTypes.node.isRequired, - // (requests) => undefined | React Node. - // This function is called when all requests are complete and when one or more of them is - // in the error state. The function can choose to render an error view depending on the + // (isLoading: boolean, shouldRenderError: boolean, requests) => undefined | React Node. + // This function is called when all requests are complete. + // The function can choose to render an error view depending on the // type of errors received. If undefined is returned, then render the AppErrorBoundary view. - errorRenderFunc: PropTypes.func, + children: PropTypes.oneOfType([PropTypes.func, PropTypes.element]), }; static defaultProps = { @@ -43,34 +43,31 @@ export class RequestStateWrapper extends Component { } render() { - const { children, errorRenderFunc, requests } = this.props; + const { children, requests } = this.props; const { shouldRender, shouldRenderError } = this.state; - if (shouldRender) { - if (shouldRenderError) { - if (errorRenderFunc) { - const result = errorRenderFunc(this.props.requests); - if (result) { - return result; - } + if (shouldRender || this.props.shouldOptimisticallyRender) { + if (typeof children === "function") { + const child = children(!shouldRender, shouldRenderError, requests); + if (child) { + return child; } - // This triggers the OOPS error boundary. - console.error("ERROR", requests); - throw Error("GOTO error boundary"); - } else { - return
{children}
; + triggerError(requests); } + if (shouldRenderError) { + triggerError(requests); + } + return children; } - if (this.props.shouldOptimisticallyRender) { - return
{children}
; - } - return ( -
- Page loading... -
- ); + return ; } } +const triggerError = (requests) => { + // This triggers the OOPS error boundary. + console.error("ERROR", requests); + throw Error("GOTO error boundary"); +}; + const mapStateToProps = (state, ownProps) => { return Object.assign({}, ownProps, { requests: getApis(ownProps.requestIds, state) diff --git a/mlflow/server/js/src/components/RequestStateWrapper.test.js b/mlflow/server/js/src/components/RequestStateWrapper.test.js index 1912c2d7516b2..2d3940eeab894 100644 --- a/mlflow/server/js/src/components/RequestStateWrapper.test.js +++ b/mlflow/server/js/src/components/RequestStateWrapper.test.js @@ -3,6 +3,7 @@ import { RequestStateWrapper } from './RequestStateWrapper'; import ErrorCodes from '../sdk/ErrorCodes'; import { ErrorWrapper } from '../Actions'; import { shallow } from 'enzyme'; +import {Spinner} from "./Spinner"; const activeRequest = { id: 'a', @@ -31,7 +32,7 @@ test("Renders loading page when requests are not complete", () => {
I am the child
); - expect(wrapper.find('.RequestStateWrapper-spinner')).toHaveLength(1); + expect(wrapper.find(Spinner)).toHaveLength(1); }); test("Renders children when requests are complete", () => { @@ -46,7 +47,7 @@ test("Renders children when requests are complete", () => { expect(wrapper.find('div.child').text()).toContain("I am the child"); }); -test("Throws exception if errorRenderFunc is not defined and wrapper has bad request.", () => { +test("Throws exception if child is a React element and wrapper has bad request.", () => { try { shallow( I am the child
); + assert.fail(); } catch (e) { expect(e.message).toContain("GOTO error boundary"); } }); -test("Renders errorRenderFunc if wrapper has bad request.", () => { +test("Render func works if wrapper has bad request.", () => { const wrapper = shallow( - { - expect(requests).toEqual([errorRequest]); - return
Error!
; + + {(isLoading, shouldRenderError, requests) => { + if (shouldRenderError) { + expect(requests).toEqual([errorRequest]); + return
Error!
; + } + return
I am the child
; }} - > -
I am the child
); expect(wrapper.find('div.error')).toHaveLength(1); diff --git a/mlflow/server/js/src/components/RunPage.js b/mlflow/server/js/src/components/RunPage.js index e4d53b02f0d3c..315203ce2a3a5 100644 --- a/mlflow/server/js/src/components/RunPage.js +++ b/mlflow/server/js/src/components/RunPage.js @@ -37,21 +37,23 @@ class RunPage extends Component { requestIds={[this.state.getRunRequestId, this.state.listArtifactRequestId, this.state.getExperimentRequestId]} - errorRenderFunc={(requests) => { - const getRunRequest = Utils.getRequestWithId(requests, this.state.getRunRequestId); - if (getRunRequest.error.getErrorCode() === ErrorCodes.RESOURCE_DOES_NOT_EXIST) { - return ; - } - return undefined; - }} > - Routes.getMetricPageRoute([this.props.runUuid], key, this.props.experimentId) + {(isLoading, shouldRenderError, requests) => { + if (shouldRenderError) { + const getRunRequest = Utils.getRequestWithId(requests, this.state.getRunRequestId); + if (getRunRequest.error.getErrorCode() === ErrorCodes.RESOURCE_DOES_NOT_EXIST) { + return ; + } + return undefined; } - experimentId={this.props.experimentId} - /> + return + Routes.getMetricPageRoute([this.props.runUuid], key, this.props.experimentId) + } + experimentId={this.props.experimentId} + />; + }}
); diff --git a/mlflow/server/js/src/components/RunView.css b/mlflow/server/js/src/components/RunView.css index b282b43e836ad..4774686f950d9 100644 --- a/mlflow/server/js/src/components/RunView.css +++ b/mlflow/server/js/src/components/RunView.css @@ -22,20 +22,19 @@ display: flex; flex-wrap: wrap; align-items: center; - padding-top: 16px; } .run-info { - min-width: 440px; - margin-bottom: 12px; + min-width: 450px; + margin-bottom: 8px; margin-right: 12px; } .metadata-header { - font-size: 16px; + font-size: 14px; color: #888; } .metadata-info { - font-size: 16px; + font-size: 14px; color: #333; } @@ -87,8 +86,13 @@ textarea.run-command { color: #ffffff; } +/** Menus in a header have negative margin-top to account for the header's margin-bottom */ +.mlflow-menu.header-menu { + margin-top: -22px; +} + .mlflow-dropdown { - margin-top: 28px; + margin-top: 18px; } .rename-run-form { diff --git a/mlflow/server/js/src/components/RunView.js b/mlflow/server/js/src/components/RunView.js index dc1bf83a99f90..04fad5f286609 100644 --- a/mlflow/server/js/src/components/RunView.js +++ b/mlflow/server/js/src/components/RunView.js @@ -5,17 +5,19 @@ import { connect } from 'react-redux'; import './RunView.css'; import HtmlTableView from './HtmlTableView'; import { Link } from 'react-router-dom'; +import Routes from '../Routes'; import { Dropdown, MenuItem } from 'react-bootstrap'; import ArtifactPage from './ArtifactPage'; import { getLatestMetrics } from '../reducers/MetricReducer'; import { Experiment } from '../sdk/MlflowMessages'; import Utils from '../utils/Utils'; -import { MLFLOW_INTERNAL_PREFIX } from "../utils/TagUtils"; import { NoteInfo } from "../utils/NoteUtils"; import BreadcrumbTitle from "./BreadcrumbTitle"; import RenameRunModal from "./modals/RenameRunModal"; import NoteEditorView from "./NoteEditorView"; import NoteShowView from "./NoteShowView"; +import EditableTagsTableView from './EditableTagsTableView'; +import { Icon } from 'antd'; const NOTES_KEY = 'notes'; @@ -35,7 +37,7 @@ class RunView extends Component { this.handleSubmittedNote = this.handleSubmittedNote.bind(this); this.handleNoteEditorViewCancel = this.handleNoteEditorViewCancel.bind(this); this.renderNoteSection = this.renderNoteSection.bind(this); - this.state.showTags = getVisibleTagValues(props.tags).length > 0; + this.state.showTags = Utils.getVisibleTagValues(props.tags).length > 0; } static propTypes = { @@ -144,15 +146,18 @@ class RunView extends Component { } getRunCommand() { - const { run, params } = this.props; + const { tags, params } = this.props; let runCommand = null; - if (run.source_type === "PROJECT") { - runCommand = 'mlflow run ' + shellEscape(run.source_name); - if (run.source_version && run.source_version !== "latest") { - runCommand += ' -v ' + shellEscape(run.source_version); + const sourceName = Utils.getSourceName(tags); + const sourceVersion = Utils.getSourceVersion(tags); + const entryPointName = Utils.getEntryPointName(tags); + if (Utils.getSourceType(tags) === "PROJECT") { + runCommand = 'mlflow run ' + shellEscape(sourceName); + if (sourceVersion && sourceVersion !== "latest") { + runCommand += ' -v ' + shellEscape(sourceVersion); } - if (run.entry_point_name && run.entry_point_name !== "main") { - runCommand += ' -e ' + shellEscape(run.entry_point_name); + if (entryPointName && entryPointName !== "main") { + runCommand += ' -e ' + shellEscape(entryPointName); } Object.values(params).sort().forEach(p => { runCommand += ' -P ' + shellEscape(p.key + '=' + p.value); @@ -162,11 +167,13 @@ class RunView extends Component { } render() { - const { run, params, tags, latestMetrics, getMetricPagePath } = this.props; + const { runUuid, run, params, tags, latestMetrics, getMetricPagePath } = this.props; const noteInfo = NoteInfo.fromRunTags(tags); const startTime = run.getStartTime() ? Utils.formatTimestamp(run.getStartTime()) : '(unknown)'; const duration = run.getStartTime() && run.getEndTime() ? run.getEndTime() - run.getStartTime() : null; + const queryParams = window.location && window.location.search ? + window.location.search : ""; const tableStyles = { table: { width: 'auto', @@ -190,7 +197,7 @@ class RunView extends Component { - +
Run ID: - {run.getRunUuid()} + {runUuid}
Source: - {Utils.renderSourceTypeIcon(run.source_type)} - {Utils.renderSource(run, tags)} + {Utils.renderSourceTypeIcon(Utils.getSourceType(tags))} + {Utils.renderSource(tags, queryParams)}
- {run.source_version ? + {Utils.getSourceVersion(tags) ?
Git Commit: - {Utils.renderVersion(run, false)} + {Utils.renderVersion(tags, false)}
: null } - {run.source_type === "PROJECT" ? + {Utils.getSourceType(tags) === "PROJECT" ?
Entry Point: - {run.entry_point_name || "main"} + {Utils.getEntryPointName(tags) || "main"}
: null }
User: - {run.getUserId()} + {Utils.getUser(run, tags)}
{duration !== null ?
@@ -247,11 +254,28 @@ class RunView extends Component {
: null } + {tags['mlflow.parentRunId'] !== undefined ? +
+ Parent Run: + + + {tags['mlflow.parentRunId'].value} + + +
+ : null + } {tags['mlflow.databricks.runURL'] !== undefined ?
Job Output: - Logs + + Logs +
: null @@ -274,8 +298,8 @@ class RunView extends Component { {!this.state.showNotes || !this.state.showNotesEditor ? {' '} - - + + : @@ -312,12 +336,10 @@ class RunView extends Component { {' '}Tags {this.state.showTags ? - : - null + : null }
@@ -326,7 +348,7 @@ class RunView extends Component { {' '}Artifacts {this.state.showArtifacts ? - : + : null }
@@ -366,15 +388,6 @@ const getParamValues = (params) => { ); }; -const getVisibleTagValues = (tags) => { - // Collate tag objects into list of [key, value] lists and filter MLflow-internal tags - return Object.values(tags).map((t) => - [t.getKey(), t.getValue()] - ).filter(t => - !t[0].startsWith(MLFLOW_INTERNAL_PREFIX) - ); -}; - const getMetricValues = (latestMetrics, getMetricPagePath) => { return Object.values(latestMetrics).sort().map((m) => { const key = m.key; @@ -389,7 +402,7 @@ const getMetricValues = (latestMetrics, getMetricPagePath) => { }; const shellEscape = (str) => { - if (/["\r\n\t ]/.test(str)) { + if ((/["\r\n\t ]/).test(str)) { return '"' + str.replace(/"/g, '\\"') + '"'; } return str; diff --git a/mlflow/server/js/src/components/Spinner.css b/mlflow/server/js/src/components/Spinner.css new file mode 100644 index 0000000000000..f44cb78c93faf --- /dev/null +++ b/mlflow/server/js/src/components/Spinner.css @@ -0,0 +1,56 @@ +.Spinner { + width: 100px; + margin-top: 100px; + margin-left: auto; + margin-right: auto; +} + +.Spinner img { + position: absolute; + top: 350px; + left: 50%; + width: 80px; + height: 80px; + margin:-40px 0 0 -40px; + opacity: 0; + -webkit-animation: spin 3s linear 1s infinite; + -moz-animation: spin 3s linear 1s infinite; + animation: spin 3s linear 1s infinite; +} + +.Spinner.Spinner-immediate img { + animation-delay: 0s; + -moz-animation-delay: 0s; + -webkit-animation-delay: 0s; +} + +@-moz-keyframes spin { + 0% { + opacity: 1; + } + 100% { + opacity: 1; + -moz-transform: rotate(360deg); + } +} + +@-webkit-keyframes spin { + 0% { + opacity: 1; + } + 100% { + opacity: 1; + -webkit-transform: rotate(360deg); + } +} + +@keyframes spin { + 0% { + opacity: 1; + } + 100% { + opacity: 1; + -webkit-transform: rotate(360deg); + transform: rotate(360deg); + } +} diff --git a/mlflow/server/js/src/components/Spinner.js b/mlflow/server/js/src/components/Spinner.js new file mode 100644 index 0000000000000..acb597cc278da --- /dev/null +++ b/mlflow/server/js/src/components/Spinner.js @@ -0,0 +1,21 @@ +import React, { Component } from 'react'; +import PropTypes from 'prop-types'; +import spinner from '../static/mlflow-spinner.png'; +import classNames from 'classnames'; +import './Spinner.css'; + +export class Spinner extends Component { + static propTypes = { + showImmediately: PropTypes.bool, + }; + + render() { + const className = classNames({ + "Spinner": true, + "Spinner-immediate": this.props.showImmediately, + }); + return
+ Page loading... +
; + } +} diff --git a/mlflow/server/js/src/components/artifact-view-components/ShowArtifactTextView.js b/mlflow/server/js/src/components/artifact-view-components/ShowArtifactTextView.js index 482be392a0d0a..e3fed2e6d3733 100644 --- a/mlflow/server/js/src/components/artifact-view-components/ShowArtifactTextView.js +++ b/mlflow/server/js/src/components/artifact-view-components/ShowArtifactTextView.js @@ -2,7 +2,7 @@ import React, { Component } from 'react'; import PropTypes from 'prop-types'; import { getSrc } from './ShowArtifactPage'; import './ShowArtifactTextView.css'; -import { CSRF_HEADER_NAME, getCsrfToken } from '../../setupCsrf'; +import { getRequestHeaders } from '../../setupAjaxHeaders'; class ShowArtifactTextView extends Component { constructor(props) { @@ -60,7 +60,7 @@ class ShowArtifactTextView extends Component { const getArtifactRequest = new Request(getSrc(this.props.path, this.props.runUuid), { method: 'GET', redirect: 'follow', - headers: new Headers({ [CSRF_HEADER_NAME]: getCsrfToken() }) + headers: new Headers(getRequestHeaders(document.cookie)) }); fetch(getArtifactRequest).then((response) => { return response.blob(); diff --git a/mlflow/server/js/src/components/modals/ConfirmModal.js b/mlflow/server/js/src/components/modals/ConfirmModal.js index 36d883eb3a7d5..6344badbb21a0 100644 --- a/mlflow/server/js/src/components/modals/ConfirmModal.js +++ b/mlflow/server/js/src/components/modals/ConfirmModal.js @@ -33,7 +33,7 @@ export class ConfirmModal extends Component { handleSubmit: PropTypes.func.isRequired, onClose: PropTypes.func.isRequired, title: PropTypes.string.isRequired, - helpText: PropTypes.string.isRequired, + helpText: PropTypes.node.isRequired, confirmButtonText: PropTypes.string.isRequired, }; @@ -70,9 +70,9 @@ export class ConfirmModal extends Component { -

+

{this.props.helpText} -

+
+ )} + + ); + } +} + +export class EditableTable extends React.Component { + static propTypes = { + columns: PropTypes.arrayOf(Object).isRequired, + data: PropTypes.arrayOf(Object).isRequired, + onSaveEdit: PropTypes.func.isRequired, + form: PropTypes.object.isRequired, + }; + + constructor(props) { + super(props); + this.state = { editingKey: '', isRequestPending: false }; + this.columns = this.initColumns(); + } + + initColumns = () => [ + ...this.props.columns.map((col) => + (col.editable + ? { + ...col, + // `onCell` returns props to be added to EditableCell + onCell: (record) => ({ + record, + dataIndex: col.dataIndex, + title: col.title, + editing: this.isEditing(record), + save: this.save, + cancel: this.cancel, + recordKey: record.key, + }), + } + : col), + ), + { + title: 'Actions', + dataIndex: 'operation', + width: 100, + render: (text, record) => { + const { editingKey, isRequestPending } = this.state; + const editing = this.isEditing(record); + if (editing && isRequestPending) { + return ; + } + return editing ? ( + + this.save(record.key)} style={{ marginRight: 10 }}> + Save + + this.cancel(record.key)}>Cancel + + ) : ( + this.edit(record.key)}> + + + ); + }, + }, + ]; + + isEditing = (record) => record.key === this.state.editingKey; + + cancel = () => { + this.setState({ editingKey: '' }); + }; + + save = (key) => { + this.props.form.validateFields((err, values) => { + if (!err) { + const record = this.props.data.find((r) => r.key === key); + if (record) { + this.setState({ isRequestPending: true }); + this.props.onSaveEdit({ ...record, ...values }).then(() => { + this.setState({ editingKey: '', isRequestPending: false }); + }); + } + } + }); + }; + + edit = (key) => { + this.setState({ editingKey: key }); + }; + + render() { + const components = { + body: { + cell: EditableCell, + }, + }; + const { data, form } = this.props; + return ( + +
+ { + const { metricsList } = ownProps; + return {metricRanges: ExperimentViewUtil.computeMetricRanges(metricsList)}; +}; + +export default connect(mapStateToProps)(ExperimentRunsTableMultiColumnView); diff --git a/mlflow/server/js/src/components/ExperimentView.css b/mlflow/server/js/src/components/ExperimentView.css index c75232f2d30b1..953ea07680ad9 100644 --- a/mlflow/server/js/src/components/ExperimentView.css +++ b/mlflow/server/js/src/components/ExperimentView.css @@ -9,17 +9,17 @@ } .ExperimentView td, .ExperimentView th { - border-top: 1px solid #e2e2e2; - border-bottom: 1px solid #e2e2e2; + border-top: 1px solid #e2e2e2; + border-bottom: 1px solid #e2e2e2; } -.ExperimentView th.top-row { +.ExperimentView .top-row { text-align: center; border-bottom: none; border-top: none; } -.ExperimentView th.bottom-row { +.ExperimentView .bottom-row { text-align: left; border-top: none; } @@ -54,11 +54,11 @@ } .ExperimentView-search-controls { - margin-top: 30px; + margin-top: 8px; } .ExperimentView-run-buttons{ - margin-top: 30px; + margin-top: 16px; margin-bottom: 16px; } @@ -66,22 +66,27 @@ display: inline-block; width: 50%; min-width: 210px; - margin-top: 16px; + margin-top: 8px; } .ExperimentView-search { - display: table; - width: 100%; + display: flex; + align-items: center; } .ExperimentView-search-input { - display: table-cell; + flex: 1; width: auto; } +.ExperimentView-search-help { + margin-right: 10px; + cursor: pointer; +} + .ExperimentView-lifecycle-input { - display: table-cell; - width: 100px; + width: 156px; + padding-right: 8px; } .ExperimentView-lifecycle-button { @@ -89,7 +94,7 @@ } .ExperimentView-paramKeyFilter, .ExperimentView-metricKeyFilter, .ExperimentView-search-input, .ExperimentView-lifecycle-input { - padding-right: 16px; + padding-right: 8px; } .ExperimentView-search-buttons { @@ -100,7 +105,7 @@ .ExperimentView-search-buttons .btn { display: block; width: 100%; - margin-bottom: 12px; + margin-bottom: 8px; } .ExperimentView-search-inputs { @@ -108,13 +113,13 @@ } .ExperimentView-search-controls .filter-label { - width: 110px; + width: 92px; float: left; margin-top: 6px; } .ExperimentView-search-controls .filter-wrapper { - margin-left: 110px; + margin-left: 92px; } .ExperimentView-search-controls input { @@ -125,12 +130,18 @@ margin-right: 30px; } +.clear-button { + margin-bottom: 0; + height: 32px; +} + div.error-message { - margin-left: 100px; + margin-top: 8px; + margin-left: 0; /*width: auto;*/ } span.error-message { - color: red; + color: #db1905; } .metric-filler-bg { @@ -152,7 +163,7 @@ span.error-message { position: relative; } -.ExperimentView th.sortable { +.ExperimentView .sortable { white-space: nowrap; cursor: pointer; @@ -173,6 +184,10 @@ span.error-message { font-weight: bold; } +.ExperimentView .underline-on-hover:hover { + text-decoration: underline; +} + .ExperimentView .metric-param-value { margin-left: 4px; } @@ -190,9 +205,9 @@ span.error-message { text-overflow: ellipsis; white-space: nowrap; vertical-align: top; + height: 100%; } - .ExperimentView .metric-param-container-cell { min-width: 280px; } @@ -200,13 +215,66 @@ span.error-message { .ExperimentView .metric-param-cell { display: inline-block; width: 250px; - padding: 2px; + padding: 4px; } .ExperimentView .metric-param-content { - padding: 4px; + padding-top: 0px; } .ExperimentView-expander:hover { cursor: pointer; } + +.ExperimentView .ReactVirtualized__Table__headerRow { + overflow: visible; + text-transform: inherit; + font-weight: inherit; +} + +.ExperimentView .ReactVirtualized__Table__Grid:focus { + outline: 0; +} + + /* Workaround as per https://github.com/bvaughn/react-virtualized/issues/876#issuecomment-367029529 */ +.ExperimentView .ReactVirtualized__Table__rowColumn { + overflow: visible !important; +} + +.ExperimentView .ReactVirtualized__Table__headerColumn { + color: rgb(136, 136, 136); +} + +.ExperimentView .ReactVirtualized__Table .run-table-container { + padding: 8px; +} + +.ExperimentView .ReactVirtualized__Table .BaggedCell .run-table-container { + padding: 4px; +} + +/** + * Style for ancestor elements of the runs table. Arranges its children along a vertical axis + * via (flex-direction: column). If the parent element has 'display: flex' set, will fill up + * 100% of the available vertical space (via 'flex: 1 1 auto') + */ +.runs-table-flex-container { + flex: 1 1 auto; + flex-direction: column; + display: flex; +} + +.ExperimentView .ReactVirtualized__Table__row:hover { + background-color: #f5f5f5; +} + +.search-input-tooltip .ant-popover-inner-content { + background-color: rgba(0, 0, 0, 0.75); + color: white; + border-radius: 4px; +} + +.search-input-tooltip.ant-popover-placement-bottom > .ant-popover-content > .ant-popover-arrow { + border-top-color: rgba(0, 0, 0, 0.75); + border-left-color: rgba(0, 0, 0, 0.75); +} diff --git a/mlflow/server/js/src/components/ExperimentView.js b/mlflow/server/js/src/components/ExperimentView.js index c1ac5efc64c89..6b9e6d44a43f9 100644 --- a/mlflow/server/js/src/components/ExperimentView.js +++ b/mlflow/server/js/src/components/ExperimentView.js @@ -2,30 +2,32 @@ import React, { Component } from 'react'; import PropTypes from 'prop-types'; import { connect } from 'react-redux'; import './ExperimentView.css'; -import { getApis, getExperiment, getParams, getRunInfos, getRunTags } from '../reducers/Reducers'; -import 'react-virtualized/styles.css'; +import { getExperiment, getParams, getRunInfo, getRunTags } from '../reducers/Reducers'; import { withRouter } from 'react-router-dom'; import Routes from '../Routes'; -import { Button, ButtonGroup, DropdownButton, MenuItem } from 'react-bootstrap'; +import { Button, DropdownButton, MenuItem } from 'react-bootstrap'; import { Experiment, RunInfo } from '../sdk/MlflowMessages'; -import { SearchUtils } from '../utils/SearchUtils'; -import classNames from 'classnames'; import { saveAs } from 'file-saver'; import { getLatestMetrics } from '../reducers/MetricReducer'; import KeyFilter from '../utils/KeyFilter'; -import ExperimentRunsTableMultiColumnView from "./ExperimentRunsTableMultiColumnView"; import ExperimentRunsTableCompactView from "./ExperimentRunsTableCompactView"; import { LIFECYCLE_FILTER } from './ExperimentPage'; import ExperimentViewUtil from './ExperimentViewUtil'; import DeleteRunModal from './modals/DeleteRunModal'; import RestoreRunModal from './modals/RestoreRunModal'; +import LocalStorageUtils from "../utils/LocalStorageUtils"; +import { ExperimentViewPersistedState } from "../sdk/MlflowLocalStorageMessages"; +import { Icon, Popover } from 'antd'; -export const DEFAULT_EXPANDED_VALUE = true; +import Utils from '../utils/Utils'; +import {Spinner} from "./Spinner"; +export const DEFAULT_EXPANDED_VALUE = false; -class ExperimentView extends Component { + +export class ExperimentView extends Component { constructor(props) { super(props); this.onCheckbox = this.onCheckbox.bind(this); @@ -40,13 +42,21 @@ class ExperimentView extends Component { this.isAllChecked = this.isAllChecked.bind(this); this.onCheckbox = this.onCheckbox.bind(this); this.onCheckAll = this.onCheckAll.bind(this); - this.setSortBy = this.setSortBy.bind(this); + this.initiateSearch = this.initiateSearch.bind(this); this.onDeleteRun = this.onDeleteRun.bind(this); this.onRestoreRun = this.onRestoreRun.bind(this); this.onLifecycleFilterInput = this.onLifecycleFilterInput.bind(this); this.onCloseDeleteRunModal = this.onCloseDeleteRunModal.bind(this); this.onCloseRestoreRunModal = this.onCloseRestoreRunModal.bind(this); this.onExpand = this.onExpand.bind(this); + this.addBagged = this.addBagged.bind(this); + this.removeBagged = this.removeBagged.bind(this); + const store = ExperimentView.getLocalStore(this.props.experiment.experiment_id); + const persistedState = new ExperimentViewPersistedState(store.loadComponentState()); + this.state = { + ...ExperimentView.getDefaultUnpersistedState(), + persistedState: persistedState.toJSON(), + }; } static propTypes = { @@ -75,31 +85,51 @@ class ExperimentView extends Component { // Input to the lifecycleFilter field lifecycleFilter: PropTypes.string.isRequired, + orderByKey: PropTypes.string, + orderByAsc: PropTypes.bool.isRequired, + // The initial searchInput searchInput: PropTypes.string.isRequired, - }; + searchRunsError: PropTypes.string, + isLoading: PropTypes.bool.isRequired, - state = { - runsHiddenByExpander: {}, - // By default all runs are expanded. In this state, runs are explicitly expanded or unexpanded. - runsExpanded: {}, - runsSelected: {}, - paramKeyFilterInput: '', - metricKeyFilterInput: '', - lifecycleFilterInput: LIFECYCLE_FILTER.ACTIVE, - searchInput: '', - searchErrorMessage: undefined, - sort: { - ascending: false, - isMetric: false, - isParam: false, - key: "start_time" - }, - showMultiColumns: true, - showDeleteRunModal: false, - showRestoreRunModal: false, + nextPageToken: PropTypes.string, + handleLoadMoreRuns: PropTypes.func.isRequired, + loadingMore: PropTypes.bool.isRequired, }; + /** Returns default values for state attributes that aren't persisted in local storage. */ + static getDefaultUnpersistedState() { + return { + // Object mapping from run UUID -> boolean (whether the run is selected) + runsSelected: {}, + // Text entered into the param filter field + paramKeyFilterInput: '', + // Text entered into the metric filter field + metricKeyFilterInput: '', + // Lifecycle stage of runs to display + lifecycleFilterInput: '', + // Text entered into the runs-search field + searchInput: '', + // String error message, if any, from an attempted search + searchErrorMessage: undefined, + // True if a model for deleting one or more runs should be displayed + showDeleteRunModal: false, + // True if a model for restoring one or more runs should be displayed + showRestoreRunModal: false, + }; + } + + /** + * Returns a LocalStorageStore instance that can be used to persist data associated with the + * ExperimentView component (e.g. component state such as table sort settings), for the + * specified experiment. + */ + static getLocalStore(experimentId) { + return LocalStorageUtils.getStoreForComponent("ExperimentView", experimentId); + } + + shouldComponentUpdate(nextProps, nextState) { // Don't update the component if a modal is showing before and after the update try. if (this.state.showDeleteRunModal && nextState.showDeleteRunModal) return false; @@ -107,6 +137,35 @@ class ExperimentView extends Component { return true; } + /** + * Returns true if search filter text was updated, e.g. if a user entered new text into the + * param filter, metric filter, or search text boxes. + */ + filtersDidUpdate(prevState) { + return prevState.paramKeyFilterInput !== this.state.paramKeyFilterInput || + prevState.metricKeyFilterInput !== this.state.metricKeyFilterInput || + prevState.searchInput !== this.state.searchInput; + } + + /** Snapshots desired attributes of the component's current state in local storage. */ + snapshotComponentState() { + const store = ExperimentView.getLocalStore(this.props.experiment.experiment_id); + store.saveComponentState(new ExperimentViewPersistedState(this.state.persistedState)); + } + + componentDidUpdate(prevProps, prevState) { + // Don't snapshot state on changes to search filter text; we only want to save these on search + // in ExperimentPage + if (!this.filtersDidUpdate(prevState)) { + this.snapshotComponentState(); + } + } + + componentWillUnmount() { + // Snapshot component state on unmounts to ensure we've captured component state in cases where + // componentDidUpdate doesn't fire. + this.snapshotComponentState(); + } static getDerivedStateFromProps(nextProps, prevState) { // Compute the actual runs selected. (A run cannot be selected if it is not passed in as a @@ -131,10 +190,6 @@ class ExperimentView extends Component { }; } - setShowMultiColumns(value) { - this.setState({ showMultiColumns: value }); - } - onDeleteRun() { this.setState({ showDeleteRunModal: true }); } @@ -151,23 +206,83 @@ class ExperimentView extends Component { this.setState({ showRestoreRunModal: false }); } + /** + * Mark a column as bagged by removing it from the appropriate array of unbagged columns. + * @param isParam If true, the column is assumed to be a metric column; if false, the column is + * assumed to be a param column. + * @param colName Name of the column (metric or param key). + */ + addBagged(isParam, colName) { + const unbagged = isParam ? this.state.persistedState.unbaggedParams : + this.state.persistedState.unbaggedMetrics; + const idx = unbagged.indexOf(colName); + const newUnbagged = idx >= 0 ? + unbagged.slice(0, idx).concat(unbagged.slice(idx + 1, unbagged.length)) : unbagged; + const stateKey = isParam ? "unbaggedParams" : "unbaggedMetrics"; + this.setState( + { + persistedState: new ExperimentViewPersistedState({ + ...this.state.persistedState, + [stateKey]: newUnbagged, + }).toJSON(), + }); + } + + /** + * Mark a column as unbagged by adding it to the appropriate array of unbagged columns. + * @param isParam If true, the column is assumed to be a metric column; if false, the column is + * assumed to be a param column. + * @param colName Name of the column (metric or param key). + */ + removeBagged(isParam, colName) { + const unbagged = isParam ? this.state.persistedState.unbaggedParams : + this.state.persistedState.unbaggedMetrics; + const stateKey = isParam ? "unbaggedParams" : "unbaggedMetrics"; + this.setState( + { + persistedState: new ExperimentViewPersistedState({ + ...this.state.persistedState, + [stateKey]: unbagged.concat([colName]) + }).toJSON() + }); + } + render() { const { experiment_id, name, artifact_location } = this.props.experiment; const { runInfos, paramKeyFilter, metricKeyFilter, + isLoading, + loadingMore, + nextPageToken, + handleLoadMoreRuns, } = this.props; // Apply our parameter and metric key filters to just pass the filtered, sorted lists // of parameter and metric names around later const paramKeyList = paramKeyFilter.apply(this.props.paramKeyList); const metricKeyList = metricKeyFilter.apply(this.props.metricKeyList); + const unbaggedParamKeyList = paramKeyFilter.apply(this.state.persistedState.unbaggedParams); + const unbaggedMetricKeyList = metricKeyFilter.apply(this.state.persistedState.unbaggedMetrics); + const compareDisabled = Object.keys(this.state.runsSelected).length < 2; const deleteDisabled = Object.keys(this.state.runsSelected).length < 1; const restoreDisabled = Object.keys(this.state.runsSelected).length < 1; + const searchInputHelpTooltipContent = ( +
+ Search runs using a simplified version of the SQL WHERE clause.
+ + Learn more + +
+ ); return ( -
+
-
- {this.state.searchErrorMessage !== undefined ? +
+ {this.props.searchRunsError ?
- {this.state.searchErrorMessage} + {this.props.searchRunsError}
: null } @@ -208,15 +323,28 @@ class ExperimentView extends Component {
- +
-
+ + +
@@ -247,7 +375,7 @@ class ExperimentView extends Component {
- +
- +
- {runInfos.length} matching {runInfos.length === 1 ? 'run' : 'runs'} + Showing {runInfos.length} matching {runInfos.length === 1 ? 'run' : 'runs'} - - - - - -
- {this.state.showMultiColumns ? - : ( + : - - } + ) + }
); } - onSortBy(isMetric, isParam, key) { - const sort = this.state.sort; - this.setSortBy(isMetric, isParam, key, !sort.ascending); + onSortBy(orderByKey, orderByAsc) { + this.initiateSearch({orderByKey, orderByAsc}); } - setSortBy(isMetric, isParam, key, ascending) { - this.setState({sort: { - ascending: ascending, - key: key, - isMetric: isMetric, - isParam: isParam - }}); + initiateSearch({ + paramKeyFilterInput, + metricKeyFilterInput, + searchInput, + lifecycleFilterInput, + orderByKey, + orderByAsc, + }) { + const myParamKeyFilterInput = (paramKeyFilterInput !== undefined ? + paramKeyFilterInput : this.state.paramKeyFilterInput); + const myMetricKeyFilterInput = (metricKeyFilterInput !== undefined ? + metricKeyFilterInput : this.state.metricKeyFilterInput); + const mySearchInput = (searchInput !== undefined ? searchInput : this.state.searchInput); + const myLifecycleFilterInput = (lifecycleFilterInput !== undefined ? + lifecycleFilterInput : this.state.lifecycleFilterInput); + const myOrderByKey = (orderByKey !== undefined ? orderByKey : this.props.orderByKey); + const myOrderByAsc = (orderByAsc !== undefined ? orderByAsc : this.props.orderByAsc); + + try { + this.props.onSearch(myParamKeyFilterInput, myMetricKeyFilterInput, mySearchInput, + myLifecycleFilterInput, myOrderByKey, myOrderByAsc); + } catch (ex) { + if (ex.errorMessage !== undefined) { + this.setState({ searchErrorMessage: ex.errorMessage }); + } else { + throw ex; + } + } } onCheckbox(runUuid) { @@ -396,17 +520,24 @@ class ExperimentView extends Component { } onExpand(runId, childrenIds) { - const newExpanderState = !ExperimentViewUtil.isExpanderOpen(this.state.runsExpanded, runId); - const newRunsHiddenByExpander = {...this.state.runsHiddenByExpander}; + const newExpanderState = !ExperimentViewUtil.isExpanderOpen( + this.state.persistedState.runsExpanded, runId); + const newRunsHiddenByExpander = {...this.state.persistedState.runsHiddenByExpander}; childrenIds.forEach((childId) => { newRunsHiddenByExpander[childId] = !newExpanderState; }); - this.setState({ + const newPersistedStateFields = { runsExpanded: { - ...this.state.runsExpanded, + ...this.state.persistedState.runsExpanded, [runId]: newExpanderState, }, runsHiddenByExpander: newRunsHiddenByExpander, + }; + this.setState({ + persistedState: new ExperimentViewPersistedState({ + ...this.state.persistedState, + ...newPersistedStateFields, + }).toJSON(), }); // Deselect the children const newRunsSelected = {...this.state.runsSelected}; @@ -446,23 +577,19 @@ class ExperimentView extends Component { searchInput, lifecycleFilterInput } = this.state; - const paramKeyFilter = new KeyFilter(paramKeyFilterInput); - const metricKeyFilter = new KeyFilter(metricKeyFilterInput); - try { - const andedExpressions = SearchUtils.parseSearchInput(searchInput); - this.props.onSearch(paramKeyFilter, metricKeyFilter, andedExpressions, searchInput, - lifecycleFilterInput); - } catch (ex) { - this.setState({ searchErrorMessage: ex.errorMessage }); - } + this.initiateSearch({paramKeyFilterInput, metricKeyFilterInput, searchInput, + lifecycleFilterInput}); } onClear() { - const paramKeyFilter = new KeyFilter(); - const metricKeyFilter = new KeyFilter(); - const andedExpressions = []; - this.props.onSearch(paramKeyFilter, metricKeyFilter, andedExpressions, "", - LIFECYCLE_FILTER.ACTIVE); + // When user clicks "Clear", reset persisted state attributes to their default values. + const newPersistedState = new ExperimentViewPersistedState(); + this.setState({persistedState: newPersistedState.toJSON()}, () => { + this.snapshotComponentState(); + this.initiateSearch({paramKeyFilterInput: "", metricKeyFilterInput: "", + searchInput: "", lifecycleFilterInput: LIFECYCLE_FILTER.ACTIVE, + orderByKey: null, orderByAsc: true}); + }); } onCompare() { @@ -477,7 +604,8 @@ class ExperimentView extends Component { this.props.paramKeyFilter.apply(this.props.paramKeyList), this.props.metricKeyFilter.apply(this.props.metricKeyList), this.props.paramsList, - this.props.metricsList); + this.props.metricsList, + this.props.tagsList); const blob = new Blob([csv], { type: 'application/csv;charset=utf-8' }); saveAs(blob, "runs.csv"); } @@ -489,7 +617,7 @@ class ExperimentView extends Component { if (str === undefined) { return ""; } - if (/[,"\r\n]/.test(str)) { + if ((/[,"\r\n]/).test(str)) { return '"' + str.replace(/"/g, '""') + '"'; } return str; @@ -535,7 +663,8 @@ class ExperimentView extends Component { paramKeyList, metricKeyList, paramsList, - metricsList) { + metricsList, + tagsList) { const columns = [ "Run ID", "Name", @@ -554,14 +683,16 @@ class ExperimentView extends Component { const data = runInfos.map((runInfo, index) => { const row = [ runInfo.run_uuid, - runInfo.name, - runInfo.source_type, - runInfo.source_name, - runInfo.user_id, + Utils.getRunName(tagsList[index]), // add run name to csv export row + Utils.getSourceType(tagsList[index]), + Utils.getSourceName(tagsList[index]), + Utils.getUser(runInfo, tagsList[index]), runInfo.status, ]; + const paramsMap = ExperimentViewUtil.toParamsMap(paramsList[index]); const metricsMap = ExperimentViewUtil.toMetricsMap(metricsList[index]); + paramKeyList.forEach((paramKey) => { if (paramsMap[paramKey]) { row.push(paramsMap[paramKey].getValue()); @@ -583,30 +714,29 @@ class ExperimentView extends Component { } } -const mapStateToProps = (state, ownProps) => { - const { lifecycleFilter, searchRunsRequestId } = ownProps; - const searchRunApi = getApis([searchRunsRequestId], state)[0]; +export const mapStateToProps = (state, ownProps) => { + const { lifecycleFilter } = ownProps; + // The runUuids we should serve. - let runUuids; - if (searchRunApi.data.runs) { - runUuids = new Set(searchRunApi.data.runs.map((r) => r.info.run_uuid)); - } else { - runUuids = new Set(); - } - const runInfos = getRunInfos(state).filter((rInfo) => - runUuids.has(rInfo.getRunUuid()) - ).filter((rInfo) => { - if (lifecycleFilter === LIFECYCLE_FILTER.ACTIVE) { - return rInfo.lifecycle_stage === 'active'; - } else { - return rInfo.lifecycle_stage === 'deleted'; - } - }); + const { runInfosByUuid } = state.entities; + const runUuids = Object.values(runInfosByUuid) + .filter((r) => r.experiment_id === ownProps.experimentId.toString()) + .map((r) => r.run_uuid); + + const runInfos = runUuids.map((run_id) => getRunInfo(run_id, state)) + .filter((rInfo) => { + if (lifecycleFilter === LIFECYCLE_FILTER.ACTIVE) { + return rInfo.lifecycle_stage === 'active'; + } else { + return rInfo.lifecycle_stage === 'deleted'; + } + }); const experiment = getExperiment(ownProps.experimentId, state); const metricKeysSet = new Set(); const paramKeysSet = new Set(); const metricsList = runInfos.map((runInfo) => { - const metrics = Object.values(getLatestMetrics(runInfo.getRunUuid(), state)); + const metricsByRunUuid = getLatestMetrics(runInfo.getRunUuid(), state); + const metrics = Object.values(metricsByRunUuid || {}); metrics.forEach((metric) => { metricKeysSet.add(metric.key); }); @@ -634,13 +764,10 @@ const mapStateToProps = (state, ownProps) => { const styles = { lifecycleButtonLabel: { - width: '60px' + width: '32px' }, lifecycleButtonFilterWrapper: { - marginLeft: '60px', - }, - tableToggleButtonGroup: { - marginLeft: '16px', + marginLeft: '48px', }, }; diff --git a/mlflow/server/js/src/components/ExperimentView.test.js b/mlflow/server/js/src/components/ExperimentView.test.js new file mode 100644 index 0000000000000..5767d12a03d3c --- /dev/null +++ b/mlflow/server/js/src/components/ExperimentView.test.js @@ -0,0 +1,95 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { ExperimentView, mapStateToProps } from './ExperimentView'; +import Fixtures from "../test-utils/Fixtures"; +import {LIFECYCLE_FILTER} from "./ExperimentPage"; +import KeyFilter from "../utils/KeyFilter"; +import {addApiToState, addExperimentToState, createPendingApi, emptyState} from "../test-utils/ReduxStoreFixtures"; +import {getUUID} from "../Actions"; +import {Spinner} from "./Spinner"; + +let onSearchSpy; + +beforeEach(() => { + onSearchSpy = jest.fn(); +}); + +const getExperimentViewMock = () => { + return shallow(); +}; +test(`Clearing filter state calls search handler with correct arguments`, () => { + const wrapper = getExperimentViewMock(); + wrapper.instance().onClear(); + expect(onSearchSpy.mock.calls.length).toBe(1); + expect(onSearchSpy.mock.calls[0][0]).toBe(''); + expect(onSearchSpy.mock.calls[0][1]).toBe(''); + expect(onSearchSpy.mock.calls[0][2]).toBe(''); + expect(onSearchSpy.mock.calls[0][3]).toBe(LIFECYCLE_FILTER.ACTIVE); + expect(onSearchSpy.mock.calls[0][4]).toBe(null); + expect(onSearchSpy.mock.calls[0][5]).toBe(true); +}); +test('Entering filter input updates component state', () => { + const wrapper = getExperimentViewMock(); + wrapper.instance().setState = jest.fn(); + // Test entering param filter input + wrapper.find('.ExperimentView-paramKeyFilter input').first().simulate( + 'change', {target: {value: 'param name'}}); + expect(wrapper.instance().setState).toBeCalledWith({paramKeyFilterInput: 'param name'}); + // Test entering metric filter input + wrapper.find('.ExperimentView-metricKeyFilter input').first().simulate( + 'change', {target: {value: 'metric name'}}); + expect(wrapper.instance().setState).toBeCalledWith({metricKeyFilterInput: 'metric name'}); + // Test entering search input + wrapper.find('.ExperimentView-search-input input').first().simulate( + 'change', {target: {value: 'search input string'}}); + expect(wrapper.instance().setState).toBeCalledWith({searchInput: 'search input string'}); +}); + +test("ExperimentView will show spinner if isLoading prop is true", () => { + const wrapper = getExperimentViewMock(); + expect(wrapper.find(Spinner)).toHaveLength(1); +}); + +// mapStateToProps should only be run after the call to getExperiment from ExperimentPage is +// resolved +test("mapStateToProps doesn't blow up if the searchRunsApi is pending", () => { + const searchRunsId = getUUID(); + let state = emptyState; + const experiment = Fixtures.createExperiment(); + state = addApiToState(state, createPendingApi(searchRunsId)); + state = addExperimentToState(state, experiment); + const newProps = mapStateToProps(state, { + lifecycleFilter: LIFECYCLE_FILTER.ACTIVE, + searchRunsRequestId: searchRunsId, + experimentId: experiment.experiment_id + }); + expect(newProps).toEqual({ + runInfos: [], + experiment, + metricKeyList: [], + paramKeyList: [], + metricsList: [], + paramsList: [], + tagsList: [], + }); +}); diff --git a/mlflow/server/js/src/components/ExperimentViewUtil.js b/mlflow/server/js/src/components/ExperimentViewUtil.js index 1bdc8483035d9..c30d69d935802 100644 --- a/mlflow/server/js/src/components/ExperimentViewUtil.js +++ b/mlflow/server/js/src/components/ExperimentViewUtil.js @@ -1,17 +1,20 @@ +import classNames from 'classnames'; import React from 'react'; import Utils from "../utils/Utils"; import { Link } from 'react-router-dom'; import Routes from '../Routes'; import { DEFAULT_EXPANDED_VALUE } from './ExperimentView'; +import { CollapsibleTagsCell } from './CollapsibleTagsCell'; export default class ExperimentViewUtil { /** Returns checkbox cell for a row. */ - static getCheckboxForRow(selected, checkboxHandler) { - return
+ static getCheckboxForRow(selected, checkboxHandler, cellType) { + const CellComponent = `${cellType}`; + return
-
+
{Utils.formatTimestamp(startTime)}
-
+ , +
{user}
-
+ , +
{runName}
-
+ , +
- {Utils.renderSourceTypeIcon(runInfo.source_type)} + {Utils.renderSourceTypeIcon(Utils.getSourceType(tags))} {sourceType}
-
+ , +
- {Utils.renderVersion(runInfo)} + {Utils.renderVersion(tags)}
-
+ static getSelectAllCheckbox(onCheckAll, isAllCheckedBool, cellType) { + const CellComponent = `${cellType}`; + return - onSortBy(false, false, key)} + className={cellClassName} + onClick={() => (isSortable ? onSortBy(canonicalSortKey, !curOrderByAsc) : null)} > {text} - {sortIcon} - ; } - static isSortedBy(sortState, isMetric, isParam, key) { - return (sortState.isMetric === isMetric && sortState.isParam === isParam - && sortState.key === key); + + /** + * Returns a table cell corresponding to a single metric value. The metric is assumed to be + * unbagged (marked to be displayed in its own column). + * @param metricKey The key of the desired metric + * @param metricsMap Object mapping metric keys to their latest values for a single run + * @param metricRanges Object mapping metric keys to objects of the form {min: ..., max: ...} + * containing min and max values of the metric across all visible runs. + * @param cellType Tag type (string like "div", "td", etc) of containing cell. + */ + static getUnbaggedMetricCell(metricKey, metricsMap, metricRanges, cellType) { + const className = "left-border run-table-container"; + const keyName = "metric-" + metricKey; + const CellComponent = `${cellType}`; + if (metricsMap[metricKey]) { + const metric = metricsMap[metricKey].getValue(); + const range = metricRanges[metricKey]; + let fraction = 1.0; + if (range.max > range.min) { + fraction = (metric - range.min) / (range.max - range.min); + } + const percent = (fraction * 100) + "%"; + return ( + + {/* We need the extra div because metric-filler-bg is inline-block */} +
+
+
+
+ {Utils.formatMetric(metric)} +
+
+
+ + ); + } + return ; + } + + static getUnbaggedParamCell(paramKey, paramsMap, cellType) { + const CellComponent = `${cellType}`; + const className = "left-border run-table-container"; + const keyName = "param-" + paramKey; + if (paramsMap[paramKey]) { + return +
+ {paramsMap[paramKey].getValue()} +
+
; + } else { + return ; + } } static computeMetricRanges(metricsByRun) { @@ -215,9 +291,11 @@ export default class ExperimentViewUtil { const sortValue = (sortState.isMetric ? metricsMap : paramsMap)[sortState.key]; return (sortValue === undefined ? undefined : sortValue.value); } else if (sortState.key === 'user_id') { - return Utils.formatUser(runInfo.user_id); + return Utils.formatUser(Utils.getUser(runInfo, tags)); } else if (sortState.key === 'source') { return Utils.formatSource(runInfo, tags); + } else if (sortState.key === 'run_name') { + return Utils.getRunName(tags); } else { return runInfo[sortState.key]; } @@ -229,27 +307,39 @@ export default class ExperimentViewUtil { return expanderOpen; } - static getExpander(hasExpander, expanderOpen, onExpandBound) { + static getExpander(hasExpander, expanderOpen, onExpandBound, runUuid, cellType) { + const CellComponent = `${cellType}`; if (!hasExpander) { - return
- + - + -
+ {editing ? ( + + {getFieldDecorator(dataIndex, { + rules: [ + { + required: true, + message: `${title} is required.`, + }, + ], + initialValue: record[dataIndex], + })()} + + ) : ( + children + )} +
+ + ); + } +} + +export const EditableFormTable = Form.create()(EditableTable); diff --git a/mlflow/server/js/src/components/tables/EditableFormTable.test.js b/mlflow/server/js/src/components/tables/EditableFormTable.test.js new file mode 100644 index 0000000000000..a818a6ab4fdaa --- /dev/null +++ b/mlflow/server/js/src/components/tables/EditableFormTable.test.js @@ -0,0 +1,34 @@ +import React from 'react'; +import { shallow } from 'enzyme'; +import { EditableTable } from './EditableFormTable'; + +describe('unit tests', () => { + let wrapper; + const minimalProps = { + columns: [ + { + title: 'Name', + dataIndex: 'name', + width: 200, + }, + { + title: 'Value', + dataIndex: 'value', + width: 200, + editable: true, + } + ], + data: [ + { key: 'tag1', name: 'tag1', value: 'value1' }, + { key: 'tag2', name: 'tag2', value: 'value2' }, + ], + // eslint-disable-next-line no-unused-vars + form: { getFieldDecorator: jest.fn(opts => c => c) }, + onSaveEdit: () => {}, + }; + + test('should render with minimal props without exploding', () => { + wrapper = shallow(); + expect(wrapper.length).toBe(1); + }); +}); diff --git a/mlflow/server/js/src/index.css b/mlflow/server/js/src/index.css index b4cc7250b98cb..90b859269c725 100644 --- a/mlflow/server/js/src/index.css +++ b/mlflow/server/js/src/index.css @@ -1,3 +1,5 @@ +@import '~antd/dist/antd.css'; + body { margin: 0; padding: 0; diff --git a/mlflow/server/js/src/index.js b/mlflow/server/js/src/index.js index 63450592f1665..700b703628b4e 100644 --- a/mlflow/server/js/src/index.js +++ b/mlflow/server/js/src/index.js @@ -3,11 +3,11 @@ import ReactDOM from 'react-dom'; import './index.css'; import App from './components/App'; import registerServiceWorker from './registerServiceWorker'; -import { setupCsrf } from './setupCsrf'; +import { setupAjaxHeaders } from './setupAjaxHeaders'; import { Provider } from 'react-redux'; import store from './Store'; -setupCsrf(); +setupAjaxHeaders(); const root = ( diff --git a/mlflow/server/js/src/reducers/MetricReducer.js b/mlflow/server/js/src/reducers/MetricReducer.js index cb7cddadea2b1..04fbf3e8ed25f 100644 --- a/mlflow/server/js/src/reducers/MetricReducer.js +++ b/mlflow/server/js/src/reducers/MetricReducer.js @@ -1,5 +1,11 @@ -import { fulfilled, GET_METRIC_HISTORY_API, GET_RUN_API, SEARCH_RUNS_API } from '../Actions'; -import { Run, RunInfo, Metric } from '../sdk/MlflowMessages'; +import { + fulfilled, + GET_METRIC_HISTORY_API, + GET_RUN_API, + LOAD_MORE_RUNS_API, + SEARCH_RUNS_API +} from '../Actions'; +import { RunInfo, Metric } from '../sdk/MlflowMessages'; export const getMetricsByKey = (runUuid, key, state) => { return state.entities.metricsByRunUuid[runUuid][key]; @@ -35,12 +41,12 @@ export const latestMetricsByRunUuid = (state = {}, action) => { [runUuid]: metricArrToObject(metrics), }; } - case fulfilled(SEARCH_RUNS_API): { + case fulfilled(SEARCH_RUNS_API): + case fulfilled(LOAD_MORE_RUNS_API): { const newState = { ...state }; if (action.payload.runs) { action.payload.runs.forEach((rJson) => { - const run = Run.fromJs(rJson); - const runUuid = run.getInfo().getRunUuid(); + const runUuid = rJson.info.run_uuid; const metrics = rJson.data.metrics || []; newState[runUuid] = metricArrToObject(metrics); }); diff --git a/mlflow/server/js/src/reducers/Reducers.js b/mlflow/server/js/src/reducers/Reducers.js index 0753eeb84e767..f8d5bed0e53a9 100644 --- a/mlflow/server/js/src/reducers/Reducers.js +++ b/mlflow/server/js/src/reducers/Reducers.js @@ -1,14 +1,23 @@ import { combineReducers } from 'redux'; import { CLOSE_ERROR_MODAL, - fulfilled, GET_EXPERIMENT_API, GET_RUN_API, isFulfilledApi, isPendingApi, + fulfilled, + GET_EXPERIMENT_API, + GET_RUN_API, + isFulfilledApi, + isPendingApi, isRejectedApi, LIST_ARTIFACTS_API, - LIST_EXPERIMENTS_API, OPEN_ERROR_MODAL, SEARCH_RUNS_API, SET_TAG_API, + LIST_EXPERIMENTS_API, + OPEN_ERROR_MODAL, + SEARCH_RUNS_API, + LOAD_MORE_RUNS_API, + SET_TAG_API, rejected, } from '../Actions'; -import { Experiment, Run, Param, RunInfo, RunTag } from '../sdk/MlflowMessages'; +import {Experiment, Param, RunInfo, RunTag } from '../sdk/MlflowMessages'; import { ArtifactNode } from '../utils/ArtifactUtils'; import { metricsByRunUuid, latestMetricsByRunUuid } from './MetricReducer'; +import _ from 'lodash'; export const getExperiments = (state) => { return Object.values(state.entities.experimentsById); @@ -22,7 +31,7 @@ const experimentsById = (state = {}, action) => { switch (action.type) { case fulfilled(LIST_EXPERIMENTS_API): { let newState = Object.assign({}, state); - if (action.payload) { + if (action.payload && action.payload.experiments) { action.payload.experiments.forEach((eJson) => { const experiment = Experiment.fromJs(eJson); newState = Object.assign(newState, {[experiment.getExperimentId()]: experiment}); @@ -42,39 +51,30 @@ const experimentsById = (state = {}, action) => { } }; -export const getRunInfos = (state) => { - return Object.values(state.entities.runInfosByUuid).sort((a, b) => { - if (a.start_time < b.start_time) { - return 1; - } else if (a.start_time > b.start_time) { - return -1; - } else { - return 0; - } - }); -}; - export const getRunInfo = (runUuid, state) => { return state.entities.runInfosByUuid[runUuid]; }; const runInfosByUuid = (state = {}, action) => { switch (action.type) { - case fulfilled(GET_EXPERIMENT_API): { - let newState = { ...state }; + case fulfilled(GET_RUN_API): { + const runInfo = RunInfo.fromJs(action.payload.run.info); + return amendRunInfosByUuid(state, runInfo); + } + case fulfilled(SEARCH_RUNS_API): { + const newState = {}; if (action.payload && action.payload.runs) { action.payload.runs.forEach((rJson) => { - const runInfo = RunInfo.fromJs(rJson); - newState = amendRunInfosByUuid(newState, runInfo); + const runInfo = RunInfo.fromJs(rJson.info); + newState[runInfo.getRunUuid()] = runInfo; }); } return newState; } - case fulfilled(GET_RUN_API): { - const runInfo = RunInfo.fromJs(action.payload.run.info); - return amendRunInfosByUuid(state, runInfo); + case rejected(SEARCH_RUNS_API): { + return {}; } - case fulfilled(SEARCH_RUNS_API): { + case fulfilled(LOAD_MORE_RUNS_API): { let newState = { ...state }; if (action.payload && action.payload.runs) { action.payload.runs.forEach((rJson) => { @@ -106,19 +106,29 @@ export const getParams = (runUuid, state) => { }; const paramsByRunUuid = (state = {}, action) => { + const paramArrToObject = (params) => { + const paramObj = {}; + params.forEach((p) => paramObj[p.key] = Param.fromJs(p)); + return paramObj; + }; switch (action.type) { case fulfilled(GET_RUN_API): { - const runInfo = RunInfo.fromJs(action.payload.run.info); - return amendParamsByRunUuid(state, action.payload.run.data.params, runInfo.getRunUuid()); + const run = action.payload.run; + const runUuid = run.info.run_uuid; + const params = run.data.params || []; + const newState = { ...state }; + newState[runUuid] = paramArrToObject(params); + return newState; } - case fulfilled(SEARCH_RUNS_API): { + case fulfilled(SEARCH_RUNS_API): + case fulfilled(LOAD_MORE_RUNS_API): { const runs = action.payload.runs; - let newState = { ...state }; + const newState = { ...state }; if (runs) { runs.forEach((rJson) => { - const run = Run.fromJs(rJson); - newState = amendParamsByRunUuid( - newState, rJson.data.params, run.getInfo().getRunUuid()); + const runUuid = rJson.info.run_uuid; + const params = rJson.data.params || []; + newState[runUuid] = paramArrToObject(params); }); } return newState; @@ -128,23 +138,6 @@ const paramsByRunUuid = (state = {}, action) => { } }; -const amendParamsByRunUuid = (state, params, runUuid) => { - let newState = { ...state }; - if (params) { - params.forEach((pJson) => { - const param = Param.fromJs(pJson); - const oldParams = newState[runUuid] ? newState[runUuid] : {}; - newState = { - ...newState, - [runUuid]: { - ...oldParams, - [param.getKey()]: param, - } - }; - }); - } - return newState; -}; export const getRunTags = (runUuid, state) => { const tags = state.entities.tagsByRunUuid[runUuid]; @@ -156,19 +149,29 @@ export const getRunTags = (runUuid, state) => { }; const tagsByRunUuid = (state = {}, action) => { + const tagArrToObject = (tags) => { + const tagObj = {}; + tags.forEach((tag) => tagObj[tag.key] = RunTag.fromJs(tag)); + return tagObj; + }; switch (action.type) { case fulfilled(GET_RUN_API): { const runInfo = RunInfo.fromJs(action.payload.run.info); - return amendTagsByRunUuid(state, action.payload.run.data.tags, runInfo.getRunUuid()); + const tags = action.payload.run.data.tags || []; + const runUuid = runInfo.getRunUuid(); + const newState = {...state}; + newState[runUuid] = tagArrToObject(tags); + return newState; } - case fulfilled(SEARCH_RUNS_API): { + case fulfilled(SEARCH_RUNS_API): + case fulfilled(LOAD_MORE_RUNS_API): { const runs = action.payload.runs; - let newState = { ...state }; + const newState = { ...state }; if (runs) { runs.forEach((rJson) => { - const run = Run.fromJs(rJson); - newState = amendTagsByRunUuid( - newState, rJson.data.tags, run.getInfo().getRunUuid()); + const runUuid = rJson.info.run_uuid; + const tags = rJson.data.tags || []; + newState[runUuid] = tagArrToObject(tags); }); } return newState; @@ -268,9 +271,19 @@ const entities = combineReducers({ artifactRootUriByRunUuid, }); +export const getSharedParamKeysByRunUuids = (runUuids, state) => + _.intersection( + ...runUuids.map((runUuid) => Object.keys(state.entities.paramsByRunUuid[runUuid])), + ); + +export const getSharedMetricKeysByRunUuids = (runUuids, state) => + _.intersection( + ...runUuids.map((runUuid) => Object.keys(state.entities.latestMetricsByRunUuid[runUuid])), + ); + export const getApis = (requestIds, state) => { return requestIds.map((id) => ( - state.apis[id] + state.apis[id] || {} )); }; diff --git a/mlflow/server/js/src/sdk/ErrorCodes.js b/mlflow/server/js/src/sdk/ErrorCodes.js index ced963a9cb93c..2b6456bdb57f1 100644 --- a/mlflow/server/js/src/sdk/ErrorCodes.js +++ b/mlflow/server/js/src/sdk/ErrorCodes.js @@ -4,4 +4,5 @@ export default { INTERNAL_ERROR: 'INTERNAL_ERROR', INVALID_PARAMETER_VALUE: 'INVALID_PARAMETER_VALUE', RESOURCE_DOES_NOT_EXIST: 'RESOURCE_DOES_NOT_EXIST', + PERMISSION_DENIED: 'PERMISSION_DENIED', }; diff --git a/mlflow/server/js/src/sdk/MlflowLocalStorageMessages.js b/mlflow/server/js/src/sdk/MlflowLocalStorageMessages.js new file mode 100644 index 0000000000000..8ce61740e1071 --- /dev/null +++ b/mlflow/server/js/src/sdk/MlflowLocalStorageMessages.js @@ -0,0 +1,52 @@ +/** + * This class contains definitions of message entities corresponding to data stored in LocalStorage. + * The backwards-compatibility behavior of these messages is as follows: + * + * Backwards-compatible changes: + * 1) Adding a new field: Backwards-compatible. New fields that are absent from old data in + * local storage will take on the specified default value. + * 2) Removing a field: Backwards-compatible. Unknown fields from old data in local storage will be + * ignored at construction-time. + * + * Backwards-incompatible changes (AVOID MAKING SUCH CHANGES): + * 1) Changing the type of a field. Old data loaded from local storage will be of the wrong type. + * 2) Changing the role/usage of a field. It's better to add a new field than to repurpose an + * existing field, since a repurposed field may be populated with unexpected data cached in + * local storage. + */ +import Immutable from "immutable"; + +/** + * This class wraps attributes of the ExperimentPage component's state that should be + * persisted in / restored from local storage. + */ +export const ExperimentPagePersistedState = Immutable.Record({ + // Comma-separated string containing containing the keys of parameters to display + paramKeyFilterString: "", + // Comma-separated string containing containing the keys of metrics to display + metricKeyFilterString: "", + // SQL-like query string used to filter runs, e.g. "params.alpha = '0.5'" + searchInput: "", + // Canonical order_by key like "params.`alpha`". May be null to indicate the table + // should use the natural row ordering provided by the server. + orderByKey: null, + // Whether the order imposed by orderByKey should be ascending or descending. + orderByAsc: false, +}, 'ExperimentPagePersistedState'); +/** + * This class wraps attributes of the ExperimentPage component's state that should be + * persisted in / restored from local storage. + */ +export const ExperimentViewPersistedState = Immutable.Record({ + // Object mapping run UUIDs (strings) to booleans, where a boolean value of true indicates that + // a run has been minimized (its child runs are hidden). + runsHiddenByExpander: {}, + // Object mapping run UUIDs (strings) to booleans, where a boolean value of true indicates that + // a run has been expanded (its child runs are visible). + runsExpanded: {}, + // Arrays of "unbagged", or split-out metric and param keys (strings). We maintain these as lists + // to help keep them ordered (i.e. splitting out a column shouldn't change the ordering of columns + // that have already been split out) + unbaggedMetrics: [], + unbaggedParams: [], +}, 'ExperimentViewPersistedState'); diff --git a/mlflow/server/js/src/sdk/MlflowLocalStorageMessages.test.js b/mlflow/server/js/src/sdk/MlflowLocalStorageMessages.test.js new file mode 100644 index 0000000000000..9ccec2a1aba02 --- /dev/null +++ b/mlflow/server/js/src/sdk/MlflowLocalStorageMessages.test.js @@ -0,0 +1,19 @@ +import React from 'react'; +import { ExperimentPagePersistedState, ExperimentViewPersistedState } from '../sdk/MlflowLocalStorageMessages'; + +test('Local storage messages ignore unknown fields', () => { + const persistedState = ExperimentPagePersistedState({heyYallImAnUnknownField: "value"}); + expect(persistedState.paramKeyFilterString).toEqual(""); + expect(persistedState.metricKeyFilterString).toEqual(""); + expect(persistedState.searchInput).toEqual(""); +}); + +test('Local storage messages set default values for unspecified fields', () => { + const persistedState = ExperimentViewPersistedState({}); + expect(persistedState.sort === { + ascending: false, + isMetric: false, + isParam: false, + key: "start_time", + }); +}); diff --git a/mlflow/server/js/src/sdk/MlflowMessages.js b/mlflow/server/js/src/sdk/MlflowMessages.js index 1debe45732a2a..58bda38f6b572 100644 --- a/mlflow/server/js/src/sdk/MlflowMessages.js +++ b/mlflow/server/js/src/sdk/MlflowMessages.js @@ -24,6 +24,9 @@ export const Metric = Immutable.Record({ // optional INT64 timestamp: undefined, + + // optional INT64 + step: undefined, }, 'Metric'); /** @@ -49,6 +52,9 @@ const extended_Metric = ModelBuilder.extend(Metric, { getTimestamp() { return this.timestamp !== undefined ? this.timestamp : 0; }, + getStep() { + return this.step !== undefined ? this.step : 0; + }, }); /** @@ -112,15 +118,6 @@ export const RunInfo = Immutable.Record({ // optional INT64 experiment_id: undefined, - // optional STRING - name: undefined, - - // optional SourceType - source_type: undefined, - - // optional STRING - source_name: undefined, - // optional STRING user_id: undefined, @@ -133,12 +130,6 @@ export const RunInfo = Immutable.Record({ // optional INT64 end_time: undefined, - // optional STRING - source_version: undefined, - - // optional STRING - entry_point_name: undefined, - // optional STRING artifact_uri: undefined, @@ -166,18 +157,6 @@ const extended_RunInfo = ModelBuilder.extend(RunInfo, { getExperimentId() { return this.experiment_id !== undefined ? this.experiment_id : 0; }, - getName() { - return this.name !== undefined ? this.name : ''; - }, - getSourceType() { - return this.source_type !== undefined ? this.source_type : 'NOTEBOOK'; - }, - getSourceName() { - return this.source_name !== undefined ? this.source_name : ''; - }, - getUserId() { - return this.user_id !== undefined ? this.user_id : ''; - }, getStatus() { return this.status !== undefined ? this.status : 'RUNNING'; }, @@ -187,12 +166,6 @@ const extended_RunInfo = ModelBuilder.extend(RunInfo, { getEndTime() { return this.end_time !== undefined ? this.end_time : 0; }, - getSourceVersion() { - return this.source_version !== undefined ? this.source_version : ''; - }, - getEntryPointName() { - return this.entry_point_name !== undefined ? this.entry_point_name : ''; - }, getArtifactUri() { return this.artifact_uri !== undefined ? this.artifact_uri : ''; }, @@ -529,48 +502,6 @@ GetRun.fromJs = function fromJs(pojo) { return new extended_GetRun(pojoWithNestedImmutables); }; -export const GetMetric = Immutable.Record({ - // required STRING - run_uuid: undefined, - - // required STRING - metric_key: undefined, -}, 'GetMetric'); - -/** - * By default Immutable.fromJS will translate an object field in JSON into Immutable.Map. - * This reviver allow us to keep the Immutable.Record type when serializing JSON message - * into nested Immutable Record class. - */ -GetMetric.fromJsReviver = function fromJsReviver(key, value) { - switch (key) { - default: - return Immutable.fromJS(value); - } -}; - -const extended_GetMetric = ModelBuilder.extend(GetMetric, { - - getRunUuid() { - return this.run_uuid !== undefined ? this.run_uuid : ''; - }, - getMetricKey() { - return this.metric_key !== undefined ? this.metric_key : ''; - }, -}); - -/** - * This is a customized fromJs function used to translate plain old Javascript - * objects into this Immutable Record. Example usage: - * - * // The pojo is your javascript object - * const record = GetMetric.fromJs(pojo); - */ -GetMetric.fromJs = function fromJs(pojo) { - const pojoWithNestedImmutables = RecordUtils.fromJs(pojo, GetMetric.fromJsReviver); - return new extended_GetMetric(pojoWithNestedImmutables); -}; - export const MetricSearchExpression = Immutable.Record({ // optional STRING key: undefined, @@ -794,9 +725,6 @@ export const SearchRuns = Immutable.Record({ // repeated INT64 experiment_ids: Immutable.List(), - // repeated SearchExpression - anded_expressions: Immutable.List(), - // optional ViewType run_view_type: 'ACTIVE_ONLY', }, 'SearchRuns'); @@ -811,10 +739,6 @@ SearchRuns.fromJsReviver = function fromJsReviver(key, value) { case 'experiment_ids': return Immutable.List(value); - case 'anded_expressions': - return Immutable.List(value.map((element) => - SearchExpression.fromJs(element) - )); default: return Immutable.fromJS(value); } diff --git a/mlflow/server/js/src/sdk/MlflowService.js b/mlflow/server/js/src/sdk/MlflowService.js index 7a000837309fa..88f523dadd5ff 100644 --- a/mlflow/server/js/src/sdk/MlflowService.js +++ b/mlflow/server/js/src/sdk/MlflowService.js @@ -9,6 +9,7 @@ import $ from 'jquery'; import JsonBigInt from 'json-bigint'; +import Utils from "../utils/Utils"; const StrictJsonBigInt = JsonBigInt({ strict: true, storeAsString: true }); @@ -21,7 +22,7 @@ export class MlflowService { * @return {Promise} */ static createExperiment({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/experiments/create', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/experiments/create'), { type: 'POST', dataType: 'json', data: JSON.stringify(data), @@ -38,7 +39,7 @@ export class MlflowService { * @return {Promise} */ static listExperiments({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/experiments/list', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/experiments/list'), { type: 'GET', dataType: 'json', converters: { @@ -58,7 +59,7 @@ export class MlflowService { * @return {Promise} */ static getExperiment({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/experiments/get', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/experiments/get'), { type: 'GET', dataType: 'json', converters: { @@ -78,7 +79,7 @@ export class MlflowService { * @return {Promise} */ static createRun({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/runs/create', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/runs/create'), { type: 'POST', dataType: 'json', data: JSON.stringify(data), @@ -95,7 +96,7 @@ export class MlflowService { * @return {Promise} */ static deleteRun({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/runs/delete', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/runs/delete'), { type: 'POST', dataType: 'json', data: JSON.stringify(data), @@ -112,7 +113,7 @@ export class MlflowService { * @return {Promise} */ static restoreRun({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/runs/restore', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/runs/restore'), { type: 'POST', dataType: 'json', data: JSON.stringify(data), @@ -129,7 +130,7 @@ export class MlflowService { * @return {Promise} */ static updateRun({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/runs/update', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/runs/update'), { type: 'POST', dataType: 'json', data: JSON.stringify(data), @@ -146,7 +147,7 @@ export class MlflowService { * @return {Promise} */ static logMetric({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/runs/log-metric', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/runs/log-metric'), { type: 'POST', dataType: 'json', data: JSON.stringify(data), @@ -163,7 +164,7 @@ export class MlflowService { * @return {Promise} */ static logParam({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/runs/log-parameter', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/runs/log-parameter'), { type: 'POST', dataType: 'json', data: JSON.stringify(data), @@ -180,47 +181,7 @@ export class MlflowService { * @return {Promise} */ static getRun({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/runs/get', { - type: 'GET', - dataType: 'json', - converters: { - 'text json': StrictJsonBigInt.parse, - }, - data: data, - jsonp: false, - success: success, - error: error, - }); - } - - /** - * @param {GetMetric} data: Immutable Record - * @param {function} success - * @param {function} error - * @return {Promise} - */ - static getMetric({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/metrics/get', { - type: 'GET', - dataType: 'json', - converters: { - 'text json': StrictJsonBigInt.parse, - }, - data: data, - jsonp: false, - success: success, - error: error, - }); - } - - /** - * @param {GetParam} data: Immutable Record - * @param {function} success - * @param {function} error - * @return {Promise} - */ - static getParam({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/params/get', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/runs/get'), { type: 'GET', dataType: 'json', converters: { @@ -240,7 +201,7 @@ export class MlflowService { * @return {Promise} */ static searchRuns({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/runs/search', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/runs/search'), { type: 'POST', dataType: 'json', data: JSON.stringify(data), @@ -257,7 +218,7 @@ export class MlflowService { * @return {Promise} */ static listArtifacts({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/artifacts/list', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/artifacts/list'), { type: 'GET', dataType: 'json', converters: { @@ -277,7 +238,7 @@ export class MlflowService { * @return {Promise} */ static getMetricHistory({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/metrics/get-history', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/metrics/get-history'), { type: 'GET', dataType: 'json', converters: { @@ -297,7 +258,7 @@ export class MlflowService { * @return {Promise} */ static setTag({ data, success, error }) { - return $.ajax('/ajax-api/2.0/preview/mlflow/runs/set-tag', { + return $.ajax(Utils.getAjaxUrl('ajax-api/2.0/preview/mlflow/runs/set-tag'), { type: 'POST', dataType: 'json', data: JSON.stringify(data), diff --git a/mlflow/server/js/src/setupAjaxHeaders.js b/mlflow/server/js/src/setupAjaxHeaders.js new file mode 100644 index 0000000000000..a873e3999e1d6 --- /dev/null +++ b/mlflow/server/js/src/setupAjaxHeaders.js @@ -0,0 +1,30 @@ +import $ from 'jquery'; +import cookie from 'cookie'; + +// To enable running behind applications that require specific headers +// to be set during HTTP requests (e.g., CSRF tokens), we support parsing +// a set of cookies with a key prefix of "mlflow-request-header-$HeaderName", +// which will be added as an HTTP header to all AJAX requests. +export const setupAjaxHeaders = () => { + const requestHeaders = getRequestHeaders(document.cookie); + $(document).ajaxSend((event, jqXHR) => { + if (requestHeaders) { + for (const [headerKey, headerValue] of Object.entries(requestHeaders)) { + jqXHR.setRequestHeader(headerKey, headerValue); + } + } + }); +}; + +export const getRequestHeaders = (documentCookie) => { + const headerCookiePrefix = "mlflow-request-header-"; + const parsedCookie = cookie.parse(documentCookie); + console.log(parsedCookie); + const headers = {}; + for (const cookieName in parsedCookie) { + if (cookieName.startsWith(headerCookiePrefix)) { + headers[cookieName.substring(headerCookiePrefix.length)] = parsedCookie[cookieName]; + } + } + return headers; +}; diff --git a/mlflow/server/js/src/setupAjaxHeaders.test.js b/mlflow/server/js/src/setupAjaxHeaders.test.js new file mode 100644 index 0000000000000..cd69bf6bf2753 --- /dev/null +++ b/mlflow/server/js/src/setupAjaxHeaders.test.js @@ -0,0 +1,12 @@ +import { getRequestHeaders } from './setupAjaxHeaders'; + +test('empty cookie should result in no headers', () => { + const headers = getRequestHeaders(""); + expect(headers).toEqual({}); +}); + +test('cookies prefixed with mlflow-request-header- should be returned', () => { + const headers = getRequestHeaders( + "a=b; mlflow-request-header-My-CSRF=1; mlflow-request-header-Hello=World; c=d"); + expect(headers).toEqual({"My-CSRF": "1", "Hello": "World"}); +}); diff --git a/mlflow/server/js/src/setupCsrf.js b/mlflow/server/js/src/setupCsrf.js deleted file mode 100644 index 8808a07084c9e..0000000000000 --- a/mlflow/server/js/src/setupCsrf.js +++ /dev/null @@ -1,23 +0,0 @@ -import $ from 'jquery'; -import cookie from 'cookie'; - -// To enable running behind applications that require CSRF tokens, we -// support parsing an optional "mlflow-csrf-token" cookie, which we will -// add as an 'X-CSRF-Token' header to all AJAX requests. -export const setupCsrf = () => { - const csrfToken = getCsrfToken(); - $.ajaxSetup({ - beforeSend(xhr) { - if (csrfToken) { - xhr.setRequestHeader(CSRF_HEADER_NAME, csrfToken); - } - } - }); -}; - -export const getCsrfToken = () => { - const parsedCookie = cookie.parse(document.cookie); - return parsedCookie['mlflow-csrf-token']; -}; - -export const CSRF_HEADER_NAME = 'X-CSRF-Token'; diff --git a/mlflow/server/js/src/setupTests.js b/mlflow/server/js/src/setupTests.js index 82edfc9e5adea..5f3516c38c6d9 100644 --- a/mlflow/server/js/src/setupTests.js +++ b/mlflow/server/js/src/setupTests.js @@ -2,3 +2,10 @@ import { configure } from 'enzyme'; import Adapter from 'enzyme-adapter-react-16'; configure({ adapter: new Adapter() }); +// Included to mock local storage in JS tests, see docs at +// https://www.npmjs.com/package/jest-localstorage-mock#in-create-react-app +require('jest-localstorage-mock'); + +// for plotly.js to work +// +window.URL.createObjectURL = function createObjectURL() {}; diff --git a/mlflow/server/js/src/static/no-experiments.svg b/mlflow/server/js/src/static/no-experiments.svg new file mode 100644 index 0000000000000..cfe714d89bcee --- /dev/null +++ b/mlflow/server/js/src/static/no-experiments.svg @@ -0,0 +1,22 @@ + + + + Empty + Created with Sketch. + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mlflow/server/js/src/test-utils/Fixtures.js b/mlflow/server/js/src/test-utils/Fixtures.js new file mode 100644 index 0000000000000..06eecb5ec5a1e --- /dev/null +++ b/mlflow/server/js/src/test-utils/Fixtures.js @@ -0,0 +1,16 @@ +import { Experiment } from '../sdk/MlflowMessages'; + +const createExperiment = ({ + experiment_id = '0', + name = 'Default', + lifecycle_stage = 'active' } = {} +) => ( + Experiment.fromJs({ experiment_id, name, lifecycle_stage }) +); +export default { + createExperiment, + experiments: [ + createExperiment(), + createExperiment({ experiment_id: '1', name: 'Test'}), + ] +}; diff --git a/mlflow/server/js/src/test-utils/ReduxStoreFixtures.js b/mlflow/server/js/src/test-utils/ReduxStoreFixtures.js new file mode 100644 index 0000000000000..820086fd04ea0 --- /dev/null +++ b/mlflow/server/js/src/test-utils/ReduxStoreFixtures.js @@ -0,0 +1,37 @@ +export const emptyState = { + apis: {}, + entities: { + runInfosByUuid: {}, + experimentsById: {}, + } +}; + +export const addApiToState = (state, api) => { + const oldApi = state.apis || {}; + return { + ...state, + apis: { + ...oldApi, + [api.id]: api, + } + }; +}; + +export const addExperimentToState = (state, experiment) => { + const oldExperiments = state.entities.experimentsById; + return { + ...state, + entities: { + ...state.entities, + experimentsById: { + ...oldExperiments, + [experiment.experiment_id]: experiment, + } + } + }; +}; + +export const createPendingApi = (id) => { + return { id, active: true }; +}; + diff --git a/mlflow/server/js/src/utils/LocalStorageUtils.js b/mlflow/server/js/src/utils/LocalStorageUtils.js new file mode 100644 index 0000000000000..562a0ba33fdde --- /dev/null +++ b/mlflow/server/js/src/utils/LocalStorageUtils.js @@ -0,0 +1,69 @@ +/** + * Utils for working with local storage. + */ +export default class LocalStorageUtils { + /** + * Protocol version of MLflow's local storage. Should be incremented on any breaking change in how + * data persisted in local storage is used, to prevent old (invalid) cached data from being loaded + * and breaking the application. + */ + static version = "1.0"; + + /** + * Return a LocalStorageStore corresponding to the specified component and ID, where the ID + * can be used to disambiguate between multiple instances of cached data for the same component + * (e.g. cached data for multiple experiments). + */ + static getStoreForComponent(componentName, id) { + return new LocalStorageStore([componentName, id].join("-")); + } +} + +/** + * Interface to browser local storage that allows for setting key-value pairs under the specified + * "scope". + */ +class LocalStorageStore { + constructor(scope) { + this.scope = scope; + } + static reactComponentStateKey = "ReactComponentState"; + + /** + * Loads React component state cached in local storage into a vanilla JS object. + */ + loadComponentState() { + const storedVal = this.getItem(LocalStorageStore.reactComponentStateKey); + if (storedVal) { + return JSON.parse(storedVal); + } + return {}; + } + + /** + * Save React component state in local storage. + * @param stateRecord: Immutable.Record instance containing component state. + */ + saveComponentState(stateRecord) { + this.setItem( + LocalStorageStore.reactComponentStateKey, JSON.stringify(stateRecord.toJSON())); + } + + /** + * Helper method for constructing a scoped key to use for setting/getting values in + * local storage. + */ + withScopePrefix(key) { + return ["MLflowLocalStorage", LocalStorageUtils.version, this.scope, key].join("-"); + } + + /** Save the specified key-value pair in local storage. */ + setItem(key, value) { + window.localStorage.setItem(this.withScopePrefix(key), value); + } + + /** Fetch the value corresponding to the passed-in key from local storage. */ + getItem(key) { + return window.localStorage.getItem(this.withScopePrefix(key)); + } +} diff --git a/mlflow/server/js/src/utils/LocalStorageUtils.test.js b/mlflow/server/js/src/utils/LocalStorageUtils.test.js new file mode 100644 index 0000000000000..f9eb7679053c4 --- /dev/null +++ b/mlflow/server/js/src/utils/LocalStorageUtils.test.js @@ -0,0 +1,34 @@ +import LocalStorageUtils from './LocalStorageUtils'; +import { ExperimentPagePersistedState } from "../sdk/MlflowLocalStorageMessages"; + +test('Setting key-value pairs in one scope does not affect the other', () => { + const store0 = LocalStorageUtils.getStoreForComponent("SomeTestComponent", 1); + const store1 = LocalStorageUtils.getStoreForComponent("AnotherTestComponent", 1); + const store2 = LocalStorageUtils.getStoreForComponent("SomeTestComponent", 2); + const persistedState0 = new ExperimentPagePersistedState({searchInput: "params.ollKorrect"}); + const persistedState1 = new ExperimentPagePersistedState({searchInput: "metrics.ok"}); + [store1, store2].forEach((otherStore) => { + store0.setItem("myKey", "myCoolVal"); + otherStore.setItem("myKey", "thisValIsBetterYo"); + expect(store0.getItem("myKey")).toEqual("myCoolVal"); + expect(otherStore.getItem("myKey")).toEqual("thisValIsBetterYo"); + + store0.saveComponentState(persistedState0); + otherStore.saveComponentState(persistedState1); + expect(store0.loadComponentState().searchInput).toEqual("params.ollKorrect"); + expect(otherStore.loadComponentState().searchInput).toEqual("metrics.ok"); + }); +}); + +test('Overwriting key-value pairs is possible', () => { + const store = LocalStorageUtils.getStoreForComponent("SomeTestComponent", 1); + store.setItem("a", "b"); + expect(store.getItem("a")).toEqual("b"); + store.setItem("a", "c"); + expect(store.getItem("a")).toEqual("c"); + store.saveComponentState(new ExperimentPagePersistedState({searchInput: "params.ollKorrect"})); + expect(store.loadComponentState().searchInput).toEqual("params.ollKorrect"); + store.saveComponentState(new ExperimentPagePersistedState({searchInput: "params.okay"})); + expect(store.loadComponentState().searchInput).toEqual("params.okay"); +}); + diff --git a/mlflow/server/js/src/utils/SearchUtils.js b/mlflow/server/js/src/utils/SearchUtils.js deleted file mode 100644 index a975e69122c6d..0000000000000 --- a/mlflow/server/js/src/utils/SearchUtils.js +++ /dev/null @@ -1,51 +0,0 @@ -export class SearchUtils { - static parseSearchInput(searchInput) { - const trimmedInput = searchInput.trim(); - if (trimmedInput === '') { - return []; - } - const searchClauses = searchInput.split("and"); - return searchClauses.map((clause) => Private.parseSearchClause(clause)); - } -} - -const METRIC_CLAUSE_REGEX = /metrics\.([a-zA-z0-9]+)\s{0,}(=|!=|>|>=|<=|<)\s{0,}(\d+\.{0,}\d{0,})/; -const PARAM_CLAUSE_REGEX = /params\.([a-zA-z0-9]+)\s{0,}(=|!=)\s{0,}"([a-zA-Z0-9.-]+)"/; -class Private { - static parseSearchClause(searchClauseString) { - const trimmedInput = searchClauseString.trim(); - const metricMatches = METRIC_CLAUSE_REGEX.exec(trimmedInput); - if (metricMatches) { - return { - metric: { - key: metricMatches[1], - double: { - comparator: metricMatches[2], - value: parseFloat(metricMatches[3]), - } - } - }; - } - const paramMatches = PARAM_CLAUSE_REGEX.exec(trimmedInput); - if (paramMatches) { - return { - parameter: { - key: paramMatches[1], - string: { - comparator: paramMatches[2], - value: paramMatches[3], - } - } - }; - } - throw new SearchError("The search input should be like 'metrics.alpha >= 0.9' or " + - "'params.file = \"test.txt\"'."); - } -} - -export class SearchError { - constructor(errorMessage) { - this.errorMessage = errorMessage; - } -} - diff --git a/mlflow/server/js/src/utils/Utils.js b/mlflow/server/js/src/utils/Utils.js index a464d094ce50d..76c8fc885f0fa 100644 --- a/mlflow/server/js/src/utils/Utils.js +++ b/mlflow/server/js/src/utils/Utils.js @@ -4,6 +4,9 @@ import notebookSvg from '../static/notebook.svg'; import emptySvg from '../static/empty.svg'; import laptopSvg from '../static/laptop.svg'; import projectSvg from '../static/project.svg'; +import qs from 'qs'; +import { MLFLOW_INTERNAL_PREFIX } from './TagUtils'; +import { message } from 'antd'; class Utils { /** @@ -28,6 +31,11 @@ class Utils { } static runNameTag = 'mlflow.runName'; + static sourceNameTag = 'mlflow.source.name'; + static sourceTypeTag = 'mlflow.source.type'; + static gitCommitTag = 'mlflow.source.git.commit'; + static entryPointTag = 'mlflow.project.entryPoint'; + static userTag = 'mlflow.user'; static formatMetric(value) { if (Math.abs(value) < 10) { @@ -69,13 +77,13 @@ class Utils { /** * Format timestamps from millisecond epoch time. */ - static formatTimestamp(timestamp) { + static formatTimestamp(timestamp, format = 'yyyy-mm-dd HH:MM:ss') { if (timestamp === undefined) { return '(unknown)'; } const d = new Date(0); d.setUTCMilliseconds(timestamp); - return dateFormat(d, "yyyy-mm-dd HH:MM:ss"); + return dateFormat(d, format); } /** @@ -114,31 +122,94 @@ class Utils { return /[@/]github.com[:/]([^/.]+)\/([^/#]+)#?(.*)/; } + static getGitLabRegex() { + return /[@/]gitlab.com[:/]([^/.]+)\/([^/#]+)#?(.*)/; + } + + static getBitbucketRegex() { + return /[@/]bitbucket.org[:/]([^/.]+)\/([^/#]+)#?(.*)/; + } + + static getGitRepoUrl(sourceName) { + const gitHubMatch = sourceName.match(Utils.getGitHubRegex()); + const gitLabMatch = sourceName.match(Utils.getGitLabRegex()); + const bitbucketMatch = sourceName.match(Utils.getBitbucketRegex()); + let url = null; + if (gitHubMatch || gitLabMatch) { + const baseUrl = gitHubMatch ? "https://github.com/" : "https://gitlab.com/"; + const match = gitHubMatch || gitLabMatch; + url = baseUrl + match[1] + "/" + match[2].replace(/.git/, ''); + if (match[3]) { + url = url + "/tree/master/" + match[3]; + } + } else if (bitbucketMatch) { + const baseUrl = "https://bitbucket.org/"; + url = baseUrl + bitbucketMatch[1] + "/" + bitbucketMatch[2].replace(/.git/, ''); + if (bitbucketMatch[3]) { + url = url + "/src/master/" + bitbucketMatch[3]; + } + } + return url; + } + + static getGitCommitUrl(sourceName, sourceVersion) { + const gitHubMatch = sourceName.match(Utils.getGitHubRegex()); + const gitLabMatch = sourceName.match(Utils.getGitLabRegex()); + const bitbucketMatch = sourceName.match(Utils.getBitbucketRegex()); + let url = null; + if (gitHubMatch || gitLabMatch) { + const baseUrl = gitHubMatch ? "https://github.com/" : "https://gitlab.com/"; + const match = gitHubMatch || gitLabMatch; + url = (baseUrl + match[1] + "/" + match[2].replace(/.git/, '') + + "/tree/" + sourceVersion) + "/" + match[3]; + } else if (bitbucketMatch) { + const baseUrl = "https://bitbucket.org/"; + url = (baseUrl + bitbucketMatch[1] + "/" + bitbucketMatch[2].replace(/.git/, '') + + "/src/" + sourceVersion) + "/" + bitbucketMatch[3]; + } + return url; + } + + /** + * Returns a copy of the provided URL with its query parameters set to `queryParams`. + * @param url URL string like "http://my-mlflow-server.com/#/experiments/9. + * @param queryParams Optional query parameter string like "?param=12345". Query params provided + * via this string will override existing query param values in `url` + */ + static setQueryParams(url, queryParams) { + const urlObj = new URL(url); + urlObj.search = queryParams || ""; + return urlObj.toString(); + } + /** * Renders the source name and entry point into an HTML element. Used for display. - * @param run MlflowMessages.RunInfo * @param tags Object containing tag key value pairs. + * @param queryParams Query params to add to certain source type links. */ - static renderSource(run, tags) { - let res = Utils.formatSource(run); - if (run.source_type === "PROJECT") { - const match = run.source_name.match(Utils.getGitHubRegex()); - if (match) { - let url = "https://github.com/" + match[1] + "/" + match[2].replace(/.git/, ''); - if (match[3]) { - url = url + "/tree/master/" + match[3]; - } - res = {res}; + static renderSource(tags, queryParams) { + const sourceName = Utils.getSourceName(tags); + const sourceType = Utils.getSourceType(tags); + let res = Utils.formatSource(tags); + if (sourceType === "PROJECT") { + const url = Utils.getGitRepoUrl(sourceName); + if (url) { + res = {res}; } return res; - } else if (run.source_type === "NOTEBOOK") { + } else if (sourceType === "NOTEBOOK") { + const revisionIdTag = 'mlflow.databricks.notebookRevisionID'; const notebookIdTag = 'mlflow.databricks.notebookID'; - const webappUrlTag = 'mlflow.databricks.webappURL'; + const revisionId = tags && tags[revisionIdTag] && tags[revisionIdTag].value; const notebookId = tags && tags[notebookIdTag] && tags[notebookIdTag].value; - const webappUrl = tags && tags[webappUrlTag] && tags[webappUrlTag].value; - if (notebookId && webappUrl) { - res = ( - {Utils.baseName(run.source_name)} + if (notebookId) { + let url = Utils.setQueryParams(window.location.origin, queryParams); + url += `#notebook/${notebookId}`; + if (revisionId) { + url += `/revision/${revisionId}`; + } + res = ( + {Utils.baseName(sourceName)} ); } return res; @@ -171,15 +242,17 @@ class Utils { * Renders the source name and entry point into a string. Used for sorting. * @param run MlflowMessages.RunInfo */ - static formatSource(run) { - if (run.source_type === "PROJECT") { - let res = Utils.dropExtension(Utils.baseName(run.source_name)); - if (run.entry_point_name && run.entry_point_name !== "main") { - res += ":" + run.entry_point_name; + static formatSource(tags) { + const sourceName = Utils.getSourceName(tags); + const entryPointName = Utils.getEntryPointName(tags); + if (Utils.getSourceType(tags) === "PROJECT") { + let res = Utils.dropExtension(Utils.baseName(sourceName)); + if (entryPointName && entryPointName !== "main") { + res += ":" + entryPointName; } return res; } else { - return Utils.baseName(run.source_name); + return Utils.baseName(sourceName); } } @@ -199,15 +272,57 @@ class Utils { return ""; } - static renderVersion(run, shortVersion = true) { - if (run.source_version) { - const versionString = shortVersion ? run.source_version.substring(0, 6) : run.source_version; - if (run.source_type === "PROJECT") { - const match = run.source_name.match(Utils.getGitHubRegex()); - if (match) { - const url = ("https://github.com/" + match[1] + "/" + match[2].replace(/.git/, '') + - "/tree/" + run.source_version) + "/" + match[3]; - return {versionString}; + static getSourceName(runTags) { + const sourceNameTag = runTags[Utils.sourceNameTag]; + if (sourceNameTag) { + return sourceNameTag.value; + } + return ""; + } + + static getSourceType(runTags) { + const sourceTypeTag = runTags[Utils.sourceTypeTag]; + if (sourceTypeTag) { + return sourceTypeTag.value; + } + return ""; + } + + static getSourceVersion(runTags) { + const gitCommitTag = runTags[Utils.gitCommitTag]; + if (gitCommitTag) { + return gitCommitTag.value; + } + return ""; + } + + static getEntryPointName(runTags) { + const entryPointTag = runTags[Utils.entryPointTag]; + if (entryPointTag) { + return entryPointTag.value; + } + return ""; + } + + // TODO(aaron) Remove runInfo when user_id deprecation is complete. + static getUser(runInfo, runTags) { + const userTag = runTags[Utils.userTag]; + if (userTag) { + return userTag.value; + } + return runInfo.user_id; + } + + static renderVersion(tags, shortVersion = true) { + const sourceVersion = Utils.getSourceVersion(tags); + const sourceName = Utils.getSourceName(tags); + const sourceType = Utils.getSourceType(tags); + if (sourceVersion) { + const versionString = shortVersion ? sourceVersion.substring(0, 6) : sourceVersion; + if (sourceType === "PROJECT") { + const url = Utils.getGitCommitUrl(sourceName, sourceVersion); + if (url) { + return {versionString}; } return versionString; } else { @@ -228,6 +343,64 @@ class Utils { static getRequestWithId(requests, requestId) { return requests.find((r) => r.id === requestId); } + + static getPlotMetricKeysFromUrl(search) { + const params = qs.parse(search); + const plotMetricKeysStr = params && params['plot_metric_keys']; + return plotMetricKeysStr ? JSON.parse(plotMetricKeysStr) : []; + } + + static getSearchParamsFromUrl(search) { + const params = qs.parse(search, {ignoreQueryPrefix: true}); + const str = JSON.stringify(params, + function replaceUndefined(key, value) { + return (value === undefined) ? "" : value; + }); + + return params ? JSON.parse(str) : []; + } + + static getSearchUrlFromState(state) { + const replaced = {}; + for (const key in state) { + if (state[key] === undefined) { + replaced[key] = ''; + } else { + replaced[key] = state[key]; + } + } + return qs.stringify(replaced); + } + + static compareByTimestamp(history1, history2) { + return history1.timestamp - history2.timestamp; + } + + static compareByStepAndTimestamp(history1, history2) { + const stepResult = history1.step - history2.step; + return stepResult === 0 ? (history1.timestamp - history2.timestamp) : stepResult; + } + + static getVisibleTagValues(tags) { + // Collate tag objects into list of [key, value] lists and filter MLflow-internal tags + return Object.values(tags).map((t) => + [t.getKey(), t.getValue()] + ).filter(t => + !t[0].startsWith(MLFLOW_INTERNAL_PREFIX) + ); + } + + static getAjaxUrl(relativeUrl) { + if (process.env.USE_ABSOLUTE_AJAX_URLS === "true") { + return '/' + relativeUrl; + } + return relativeUrl; + } + + static logErrorAndNotifyUser(e) { + console.error(e); + message.error(e.getUserVisibleError()); + } } export default Utils; diff --git a/mlflow/server/js/src/utils/Utils.test.js b/mlflow/server/js/src/utils/Utils.test.js index 1bb78bcc23b62..dda0035c0179c 100644 --- a/mlflow/server/js/src/utils/Utils.test.js +++ b/mlflow/server/js/src/utils/Utils.test.js @@ -1,9 +1,7 @@ import Utils from './Utils'; -import { RunInfo } from '../sdk/MlflowMessages'; import React from 'react'; import { shallow } from 'enzyme'; - test("formatMetric", () => { expect(Utils.formatMetric(0)).toEqual("0"); expect(Utils.formatMetric(0.5)).toEqual("0.5"); @@ -75,59 +73,97 @@ test("baseName", () => { }); test("formatSource & renderSource", () => { - const source_with_name = RunInfo.fromJs({ - "source_name": "source", - "entry_point_name": "entry", - "source_type": "PROJECT", - }); + const source_with_name = { + "mlflow.source.name": { value: "source" }, + "mlflow.source.type": { value: "PROJECT" }, + "mlflow.project.entryPoint": { value: "entry" }, + }; expect(Utils.formatSource(source_with_name)).toEqual("source:entry"); expect(Utils.renderSource(source_with_name)).toEqual("source:entry"); - const source_with_main = RunInfo.fromJs({ - "source_name": "source1", - "entry_point_name": "main", - "source_type": "PROJECT", - }); + const source_with_main = { + "mlflow.source.name": { value: "source1" }, + "mlflow.source.type": { value: "PROJECT" }, + "mlflow.project.entryPoint": { value: "main" }, + }; expect(Utils.formatSource(source_with_main)).toEqual("source1"); expect(Utils.renderSource(source_with_main)).toEqual("source1"); - const source_no_name = RunInfo.fromJs({ - "source_name": "source2", - "source_type": "PROJECT" - }); + const source_no_name = { + "mlflow.source.name": { value: "source2" }, + "mlflow.source.type": { value: "PROJECT" }, + }; expect(Utils.formatSource(source_no_name)).toEqual("source2"); expect(Utils.renderSource(source_no_name)).toEqual("source2"); - const non_project_source = RunInfo.fromJs({ - "source_name": "source3", - "entry_point_name": "entry", - "source_type": "NOTEBOOK", - }); + const non_project_source = { + "mlflow.source.name": { value: "source3" }, + "mlflow.source.type": { value: "NOTEBOOK" }, + "mlflow.project.entryPoint": { value: "entry" }, + }; expect(Utils.formatSource(non_project_source)).toEqual("source3"); expect(Utils.renderSource(non_project_source)).toEqual("source3"); // formatSource should return a string, renderSource should return an HTML element. - const github_url = RunInfo.fromJs({ - "source_name": "git@github.com:mlflow/mlflow-apps.git", - "entry_point_name": "entry", - "source_type": "PROJECT", - }); + const github_url = { + "mlflow.source.name": { value: "git@github.com:mlflow/mlflow-apps.git" }, + "mlflow.source.type": { value: "PROJECT" }, + "mlflow.project.entryPoint": { value: "entry" }, + }; expect(Utils.formatSource(github_url)).toEqual("mlflow-apps:entry"); expect(Utils.renderSource(github_url)).toEqual( - mlflow-apps:entry); + mlflow-apps:entry); + const gitlab_url = { + "mlflow.source.name": { value: "git@gitlab.com:mlflow/mlflow-apps.git" }, + "mlflow.source.type": { value: "PROJECT" }, + "mlflow.project.entryPoint": { value: "entry" }, + }; + expect(Utils.formatSource(gitlab_url)).toEqual("mlflow-apps:entry"); + expect(Utils.renderSource(gitlab_url)).toEqual( + mlflow-apps:entry); + + const bitbucket_url = { + "mlflow.source.name": { value: "git@bitbucket.org:mlflow/mlflow-apps.git" }, + "mlflow.source.type": { value: "PROJECT" }, + "mlflow.project.entryPoint": { value: "entry" }, + }; + expect(Utils.formatSource(bitbucket_url)).toEqual("mlflow-apps:entry"); + expect(Utils.renderSource(bitbucket_url)).toEqual( + mlflow-apps:entry); - const databricksRun = RunInfo.fromJs({ - "source_name": "/Users/admin/test", - "source_type": "NOTEBOOK" - }); const databricksRunTags = { + "mlflow.source.name": { value: "/Users/admin/test" }, + "mlflow.source.type": { value: "NOTEBOOK" }, "mlflow.databricks.notebookID": { value: "13" }, "mlflow.databricks.webappURL": { value: "https://databricks.com" }, }; - const wrapper = shallow(Utils.renderSource(databricksRun, databricksRunTags)); + const wrapper = shallow(Utils.renderSource(databricksRunTags)); expect(wrapper.is("a")).toEqual(true); - expect(wrapper.props().href).toEqual("https://databricks.com/#notebook/13"); + expect(wrapper.props().href).toEqual("http://localhost/#notebook/13"); + + const databricksRunRevisionTags = { + "mlflow.source.name": { value: "/Users/admin/test" }, + "mlflow.source.type": { value: "NOTEBOOK" }, + "mlflow.databricks.notebookRevisionID": { value: "42" }, + "mlflow.databricks.notebookID": { value: "13" }, + "mlflow.databricks.webappURL": { value: "https://databricks.com" }, + }; + const wrapper2 = shallow(Utils.renderSource(databricksRunRevisionTags)); + expect(wrapper2.is("a")).toEqual(true); + expect(wrapper2.props().href).toEqual("http://localhost/#notebook/13/revision/42"); + + const wrapper3 = shallow(Utils.renderSource(databricksRunRevisionTags, "?o=123")); + expect(wrapper3.is("a")).toEqual(true); + // Query params must appear before the hash, see https://tools.ietf.org/html/rfc3986#section-4.2 + // and https://stackoverflow.com/a/34772568 + expect(wrapper3.props().href).toEqual("http://localhost/?o=123#notebook/13/revision/42"); +}); + +test("addQueryParams", () => { + expect(Utils.setQueryParams("http://localhost/foo", "?o=123")).toEqual("http://localhost/foo?o=123"); + expect(Utils.setQueryParams("http://localhost/foo?param=val", "?o=123")).toEqual("http://localhost/foo?o=123"); + expect(Utils.setQueryParams("http://localhost/foo?param=val", "?param=newval")).toEqual("http://localhost/foo?param=newval"); }); test("dropExtension", () => { @@ -166,3 +202,38 @@ test("getGitHubRegex", () => { expect([].concat(match)).toEqual(lst[1]); }); }); + +test('getPlotMetricKeysFromUrl', () => { + const url0 = '?runs=["runUuid1","runUuid2"]&plot_metric_keys=[]'; + const url1 = '?runs=["runUuid1","runUuid2"]&plot_metric_keys=["metric_1"]'; + const url2 = '?runs=["runUuid1","runUuid2"]&plot_metric_keys=["metric_1","metric_2"]'; + expect(Utils.getPlotMetricKeysFromUrl(url0)).toEqual([]); + expect(Utils.getPlotMetricKeysFromUrl(url1)).toEqual(['metric_1']); + expect(Utils.getPlotMetricKeysFromUrl(url2)).toEqual(['metric_1', 'metric_2']); +}); + +test('getSearchParamsFromUrl', () => { + const url0 = '?paramKeyFilterString=filt&metricKeyFilterString=metrics&searchInput='; + const url1 = '?p=&q=&r='; + const url2 = '?'; + const url3 = '?paramKeyFilterString=some=param&metricKeyFilterString=somemetric&searchInput=some-Input'; + expect(Utils.getSearchParamsFromUrl(url0)).toEqual({paramKeyFilterString: "filt", + metricKeyFilterString: "metrics", + searchInput: ""}); + expect(Utils.getSearchParamsFromUrl(url1)).toEqual({p: "", q: "", r: ""}); + expect(Utils.getSearchParamsFromUrl(url2)).toEqual({}); + expect(Utils.getSearchParamsFromUrl(url3)).toEqual({paramKeyFilterString: "some=param", + metricKeyFilterString: "somemetric", + searchInput: "some-Input"}); +}); + +test('getSearchUrlFromState', () => { + const st0 = {}; + const st1 = {a: "example"}; + const st2 = {b: "bbbbbb"}; + const st3 = {param: "params", metrics: undefined, searchInput: "someExpression"}; + expect(Utils.getSearchUrlFromState(st0)).toEqual(""); + expect(Utils.getSearchUrlFromState(st1)).toEqual("a=example"); + expect(Utils.getSearchUrlFromState(st2)).toEqual("b=bbbbbb"); + expect(Utils.getSearchUrlFromState(st3)).toEqual("param=params&metrics=&searchInput=someExpression"); +}); diff --git a/mlflow/sklearn.py b/mlflow/sklearn.py index c5beac6b64975..1006b3b9c2d21 100644 --- a/mlflow/sklearn.py +++ b/mlflow/sklearn.py @@ -11,32 +11,81 @@ from __future__ import absolute_import -import json import os import pickle -import shutil +import yaml -import click -import flask -import pandas -import sklearn - -from mlflow.utils import cli_args +import mlflow from mlflow import pyfunc +from mlflow.exceptions import MlflowException from mlflow.models import Model -import mlflow.tracking +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, INTERNAL_ERROR +from mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration + +FLAVOR_NAME = "sklearn" + +SERIALIZATION_FORMAT_PICKLE = "pickle" +SERIALIZATION_FORMAT_CLOUDPICKLE = "cloudpickle" +SUPPORTED_SERIALIZATION_FORMATS = [ + SERIALIZATION_FORMAT_PICKLE, + SERIALIZATION_FORMAT_CLOUDPICKLE +] -def save_model(sk_model, path, conda_env=None, mlflow_model=Model()): + +def get_default_conda_env(include_cloudpickle=False): + """ + :return: The default Conda environment for MLflow Models produced by calls to + :func:`save_model()` and :func:`log_model()`. + """ + import sklearn + pip_deps = None + if include_cloudpickle: + import cloudpickle + pip_deps = ["cloudpickle=={}".format(cloudpickle.__version__)] + return _mlflow_conda_env( + additional_conda_deps=[ + "scikit-learn={}".format(sklearn.__version__), + ], + additional_pip_deps=pip_deps, + additional_conda_channels=None + ) + + +def save_model(sk_model, path, conda_env=None, mlflow_model=Model(), + serialization_format=SERIALIZATION_FORMAT_CLOUDPICKLE): """ Save a scikit-learn model to a path on the local file system. :param sk_model: scikit-learn model to be saved. :param path: Local path where the model is to be saved. - :param conda_env: Path to a Conda environment file. If provided, this decribes the environment - this model should be run in. At minimum, it should specify python, scikit-learn, - and mlflow with appropriate versions. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If `None`, the default + :func:`get_default_conda_env()`` environment is added to the model. + The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'scikit-learn=0.19.2' + ] + } + :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to. + :param serialization_format: The format in which to serialize the model. This should be one of + the formats listed in + ``mlflow.sklearn.SUPPORTED_SERIALIZATION_FORMATS``. The Cloudpickle + format, ``mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE``, + provides better cross-system compatibility by identifying and + packaging code dependencies with the serialized model. >>> import mlflow.sklearn >>> from sklearn.datasets import load_iris @@ -44,37 +93,86 @@ def save_model(sk_model, path, conda_env=None, mlflow_model=Model()): >>> iris = load_iris() >>> sk_model = tree.DecisionTreeClassifier() >>> sk_model = sk_model.fit(iris.data, iris.target) + >>> #Save the model in cloudpickle format + >>> #set path to location for persistence + >>> sk_path_dir_1 = ... + >>> mlflow.sklearn.save_model( + >>> sk_model, sk_path_dir_1, + >>> serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE) + >>> + >>> #Save the model in pickle format >>> #set path to location for persistence - >>> sk_path_dir = ... - >>> mlflow.sklearn.save_model(sk_model, sk_path_dir) + >>> sk_path_dir_2 = ... + >>> mlflow.sklearn.save_model(sk_model, sk_path_dir_2, + >>> serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE) """ + import sklearn + if serialization_format not in SUPPORTED_SERIALIZATION_FORMATS: + raise MlflowException( + message=( + "Unrecognized serialization format: {serialization_format}. Please specify one" + " of the following supported formats: {supported_formats}.".format( + serialization_format=serialization_format, + supported_formats=SUPPORTED_SERIALIZATION_FORMATS)), + error_code=INVALID_PARAMETER_VALUE) + if os.path.exists(path): - raise Exception("Path '{}' already exists".format(path)) + raise MlflowException(message="Path '{}' already exists".format(path), + error_code=RESOURCE_ALREADY_EXISTS) os.makedirs(path) - model_file = os.path.join(path, "model.pkl") - with open(model_file, "wb") as out: - pickle.dump(sk_model, out) - model_conda_env = None - if conda_env: - model_conda_env = os.path.basename(os.path.abspath(conda_env)) - shutil.copyfile(conda_env, os.path.join(path, model_conda_env)) - pyfunc.add_to_model(mlflow_model, loader_module="mlflow.sklearn", data="model.pkl", - env=model_conda_env) - mlflow_model.add_flavor("sklearn", - pickled_model="model.pkl", - sklearn_version=sklearn.__version__) + model_data_subpath = "model.pkl" + _save_model(sk_model=sk_model, output_path=os.path.join(path, model_data_subpath), + serialization_format=serialization_format) + + conda_env_subpath = "conda.yaml" + if conda_env is None: + conda_env = get_default_conda_env( + include_cloudpickle=serialization_format == SERIALIZATION_FORMAT_CLOUDPICKLE) + elif not isinstance(conda_env, dict): + with open(conda_env, "r") as f: + conda_env = yaml.safe_load(f) + with open(os.path.join(path, conda_env_subpath), "w") as f: + yaml.safe_dump(conda_env, stream=f, default_flow_style=False) + + pyfunc.add_to_model(mlflow_model, loader_module="mlflow.sklearn", data=model_data_subpath, + env=conda_env_subpath) + mlflow_model.add_flavor(FLAVOR_NAME, + pickled_model=model_data_subpath, + sklearn_version=sklearn.__version__, + serialization_format=serialization_format) mlflow_model.save(os.path.join(path, "MLmodel")) -def log_model(sk_model, artifact_path, conda_env=None): +def log_model(sk_model, artifact_path, conda_env=None, + serialization_format=SERIALIZATION_FORMAT_CLOUDPICKLE): """ Log a scikit-learn model as an MLflow artifact for the current run. :param sk_model: scikit-learn model to be saved. :param artifact_path: Run-relative artifact path. - :param conda_env: Path to a Conda environment file. If provided, this decribes the environment - this model should be run in. At minimum, it should specify python, scikit-learn, - and mlflow with appropriate versions. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If `None`, the default + :func:`get_default_conda_env()` environment is added to the model. + The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'scikit-learn=0.19.2' + ] + } + + :param serialization_format: The format in which to serialize the model. This should be one of + the formats listed in + ``mlflow.sklearn.SUPPORTED_SERIALIZATION_FORMATS``. The Cloudpickle + format, ``mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE``, + provides better cross-system compatibility by identifying and + packaging code dependencies with the serialized model. >>> import mlflow >>> import mlflow.sklearn @@ -93,83 +191,74 @@ def log_model(sk_model, artifact_path, conda_env=None): return Model.log(artifact_path=artifact_path, flavor=mlflow.sklearn, sk_model=sk_model, - conda_env=conda_env) + conda_env=conda_env, + serialization_format=serialization_format) def _load_model_from_local_file(path): """Load a scikit-learn model saved as an MLflow artifact on the local file system.""" - # TODO: we could validate the SciKit-Learn version here - model = Model.load(os.path.join(path, "MLmodel")) - assert "sklearn" in model.flavors - params = model.flavors["sklearn"] - with open(os.path.join(path, params["pickled_model"]), "rb") as f: + # TODO: we could validate the scikit-learn version here + with open(path, "rb") as f: + # Models serialized with Cloudpickle can be deserialized using Pickle; in fact, + # Cloudpickle.load() is just a redefinition of pickle.load(). Therefore, we do + # not need to check the serialization format of the model before deserializing. return pickle.load(f) def _load_pyfunc(path): """ Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. + + :param path: Local filesystem path to the MLflow Model with the ``sklearn`` flavor. """ - with open(path, "rb") as f: - return pickle.load(f) + return _load_model_from_local_file(path) -def load_model(path, run_id=None): +def _save_model(sk_model, output_path, serialization_format): """ - Load a scikit-learn model from a local file (if ``run_id`` is None) or a run. + :param sk_model: The scikit-learn model to serialize. + :param output_path: The file path to which to write the serialized model. + :param serialization_format: The format in which to serialize the model. This should be one of + the following: ``mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE`` or + ``mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE``. + """ + with open(output_path, "wb") as out: + if serialization_format == SERIALIZATION_FORMAT_PICKLE: + pickle.dump(sk_model, out) + elif serialization_format == SERIALIZATION_FORMAT_CLOUDPICKLE: + import cloudpickle + cloudpickle.dump(sk_model, out) + else: + raise MlflowException( + message="Unrecognized serialization format: {serialization_format}".format( + serialization_format=serialization_format), + error_code=INTERNAL_ERROR) - :param path: Local filesystem path or run-relative artifact path to the model saved - by :py:func:`mlflow.sklearn.save_model`. - :param run_id: Run ID. If provided, combined with ``path`` to identify the model. - >>> import mlflow.sklearn - >>> sk_model = mlflow.sklearn.load_model("sk_models", run_id="96771d893a5e46159d9f3b49bf9013e2") - >>> #use Pandas DataFrame to make predictions - >>> pandas_df = ... - >>> predictions = sk_model.predict(pandas_df) +def load_model(model_uri): """ - if run_id is not None: - path = mlflow.tracking.utils._get_model_log_dir(model_name=path, run_id=run_id) - return _load_model_from_local_file(path) + Load a scikit-learn model from a local file or a run. + :param model_uri: The location, in URI format, of the MLflow model, for example: -@click.group("sklearn") -def commands(): - """ - Serve scikit-learn models locally. + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` - To serve a model associated with a run on a tracking server, set the MLFLOW_TRACKING_URI - environment variable to the URL of the desired server. - """ - pass - - -@commands.command("serve") -@cli_args.MODEL_PATH -@click.option("--run_id", "-r", metavar="RUN_ID", help="Run ID to look for the model in.") -@click.option("--port", "-p", default=5000, help="Server port. [default: 5000]") -@click.option("--host", default="127.0.0.1", - help="The networking interface on which the prediction server listens. Defaults to " - "127.0.0.1. Use 0.0.0.0 to bind to all addresses, which is useful for running " - "inside of docker.") -def serve_model(model_path, run_id=None, port=None, host="127.0.0.1"): - """ - Serve a scikit-learn model saved with MLflow. + For more information about supported URI schemes, see + `Referencing Artifacts `_. + + :return: A scikit-learn model. - If ``run_id`` is specified, ``model_path`` is treated as an artifact path within that run; - otherwise it is treated as a local path. + >>> import mlflow.sklearn + >>> sk_model = mlflow.sklearn.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2/"sk_models") + >>> #use Pandas DataFrame to make predictions + >>> pandas_df = ... + >>> predictions = sk_model.predict(pandas_df) """ - model = load_model(run_id=run_id, path=model_path) - app = flask.Flask(__name__) - - @app.route('/invocations', methods=['POST']) - def predict(): # pylint: disable=unused-variable - if flask.request.content_type != 'application/json': - return flask.Response(status=415, response='JSON data expected', mimetype='text/plain') - data = flask.request.data.decode('utf-8') - records = pandas.read_json(data, orient="records") - predictions = model.predict(records) - result = json.dumps({"predictions": predictions.tolist()}) - return flask.Response(status=200, response=result + "\n", mimetype='application/json') - - app.run(host, port=port) + local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) + flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) + sklearn_model_artifacts_path = os.path.join(local_model_path, flavor_conf['pickled_model']) + return _load_model_from_local_file(path=sklearn_model_artifacts_path) diff --git a/mlflow/spark.py b/mlflow/spark.py index 9ab226c60767a..e36ef93ff1899 100644 --- a/mlflow/spark.py +++ b/mlflow/spark.py @@ -22,37 +22,70 @@ from __future__ import absolute_import import os -import shutil - -import pyspark -from pyspark import SparkContext -from pyspark.ml.pipeline import PipelineModel +import yaml +import logging import mlflow from mlflow import pyfunc, mleap +from mlflow.exceptions import MlflowException from mlflow.models import Model -from mlflow.utils.logging_utils import eprint +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration +from mlflow.utils.file_utils import TempDir FLAVOR_NAME = "spark" # Default temporary directory on DFS. Used to write / read from Spark ML models. DFS_TMP = "/tmp/mlflow" +_SPARK_MODEL_PATH_SUB = "sparkml" + +_logger = logging.getLogger(__name__) + + +def get_default_conda_env(): + """ + :return: The default Conda environment for MLflow Models produced by calls to + :func:`save_model()` and :func:`log_model()`. + """ + import pyspark + + return _mlflow_conda_env( + additional_conda_deps=[ + "pyspark={}".format(pyspark.__version__), + ], + additional_pip_deps=None, + additional_conda_channels=None) -def log_model(spark_model, artifact_path, conda_env=None, jars=None, dfs_tmpdir=None, +def log_model(spark_model, artifact_path, conda_env=None, dfs_tmpdir=None, sample_input=None): """ Log a Spark MLlib model as an MLflow artifact for the current run. This uses the - MLlib persistence format, and the logged model will have the Spark flavor. + MLlib persistence format and produces an MLflow Model with the Spark flavor. - :param spark_model: PipelineModel to be saved. + :param spark_model: Spark model to be saved - MLFlow can only save descendants of + pyspark.ml.Model which implement MLReadable and MLWritable. :param artifact_path: Run relative artifact path. - :param conda_env: Path to a Conda environment file. If provided, defines environment for the - model. At minimum, it should specify python, pyspark, and mlflow with - appropriate versions. - :param jars: List of JARs needed by the model. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If `None`, the default + :func:`get_default_conda_env()` environment is added to the model. + The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'pyspark=2.3.0' + ] + } :param dfs_tmpdir: Temporary directory path on Distributed (Hadoop) File System (DFS) or local - filesystem if running in local mode. The model will be writen in this + filesystem if running in local mode. The model is written in this destination and then copied into the model's artifact directory. This is necessary as Spark ML models read from and write to DFS if running on a cluster. If this operation completes successfully, all temporary files @@ -76,9 +109,40 @@ def log_model(spark_model, artifact_path, conda_env=None, jars=None, dfs_tmpdir= >>> model = pipeline.fit(training) >>> mlflow.spark.log_model(model, "spark-model") """ - return Model.log(artifact_path=artifact_path, flavor=mlflow.spark, spark_model=spark_model, - jars=jars, conda_env=conda_env, dfs_tmpdir=dfs_tmpdir, - sample_input=sample_input) + from py4j.protocol import Py4JJavaError + + _validate_model(spark_model) + from pyspark.ml import PipelineModel + if not isinstance(spark_model, PipelineModel): + spark_model = PipelineModel([spark_model]) + run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id + run_root_artifact_uri = mlflow.get_artifact_uri() + # If the artifact URI is a local filesystem path, defer to Model.log() to persist the model, + # since Spark may not be able to write directly to the driver's filesystem. For example, + # writing to `file:/uri` will write to the local filesystem from each executor, which will + # be incorrect on multi-node clusters - to avoid such issues we just use the Model.log() path + # here. + if mlflow.tracking.utils._is_local_uri(run_root_artifact_uri): + return Model.log(artifact_path=artifact_path, flavor=mlflow.spark, spark_model=spark_model, + conda_env=conda_env, dfs_tmpdir=dfs_tmpdir, + sample_input=sample_input) + # If Spark cannot write directly to the artifact repo, defer to Model.log() to persist the + # model + model_dir = os.path.join(run_root_artifact_uri, artifact_path) + try: + spark_model.save(os.path.join(model_dir, _SPARK_MODEL_PATH_SUB)) + except Py4JJavaError: + return Model.log(artifact_path=artifact_path, flavor=mlflow.spark, spark_model=spark_model, + conda_env=conda_env, dfs_tmpdir=dfs_tmpdir, + sample_input=sample_input) + + # Otherwise, override the default model log behavior and save model directly to artifact repo + mlflow_model = Model(artifact_path=artifact_path, run_id=run_id) + with TempDir() as tmp: + tmp_model_metadata_dir = tmp.path() + _save_model_metadata( + tmp_model_metadata_dir, spark_model, mlflow_model, sample_input, conda_env) + mlflow.tracking.fluent.log_artifacts(tmp_model_metadata_dir, artifact_path) def _tmp_path(dfs_tmp): @@ -103,16 +167,23 @@ def __init__(self): @classmethod def _jvm(cls): + from pyspark import SparkContext + return SparkContext._gateway.jvm @classmethod def _fs(cls): if not cls._filesystem: - sc = SparkContext.getOrCreate() - cls._conf = sc._jsc.hadoopConfiguration() - cls._filesystem = cls._jvm().org.apache.hadoop.fs.FileSystem.get(cls._conf) + cls._filesystem = cls._jvm().org.apache.hadoop.fs.FileSystem.get(cls._conf()) return cls._filesystem + @classmethod + def _conf(cls): + from pyspark import SparkContext + + sc = SparkContext.getOrCreate() + return sc._jsc.hadoopConfiguration() + @classmethod def _local_path(cls, path): return cls._jvm().org.apache.hadoop.fs.Path(os.path.abspath(path)) @@ -146,7 +217,7 @@ def maybe_copy_from_local_file(cls, src, dst): if qualified_local_path == "file:" + local_path.toString(): return local_path.toString() cls.copy_from_local_file(src, dst, remove_src=False) - eprint("Copied SparkML model to %s" % dst) + _logger.info("Copied SparkML model to %s", dst) return dst @classmethod @@ -154,27 +225,82 @@ def delete(cls, path): cls._fs().delete(cls._remote_path(path), True) -def save_model(spark_model, path, mlflow_model=Model(), conda_env=None, jars=None, +def _save_model_metadata(dst_dir, spark_model, mlflow_model, sample_input, conda_env): + """ + Saves model metadata into the passed-in directory. The persisted metadata assumes that a + model can be loaded from a relative path to the metadata file (currently hard-coded to + "sparkml"). + """ + import pyspark + + if sample_input is not None: + mleap.add_to_model(mlflow_model=mlflow_model, path=dst_dir, spark_model=spark_model, + sample_input=sample_input) + + conda_env_subpath = "conda.yaml" + if conda_env is None: + conda_env = get_default_conda_env() + elif not isinstance(conda_env, dict): + with open(conda_env, "r") as f: + conda_env = yaml.safe_load(f) + with open(os.path.join(dst_dir, conda_env_subpath), "w") as f: + yaml.safe_dump(conda_env, stream=f, default_flow_style=False) + + mlflow_model.add_flavor(FLAVOR_NAME, pyspark_version=pyspark.__version__, + model_data=_SPARK_MODEL_PATH_SUB) + pyfunc.add_to_model(mlflow_model, loader_module="mlflow.spark", data=_SPARK_MODEL_PATH_SUB, + env=conda_env_subpath) + mlflow_model.save(os.path.join(dst_dir, "MLmodel")) + + +def _validate_model(spark_model): + from pyspark.ml.util import MLReadable, MLWritable + from pyspark.ml import Model as PySparkModel + if not isinstance(spark_model, PySparkModel) \ + or not isinstance(spark_model, MLReadable) \ + or not isinstance(spark_model, MLWritable): + raise MlflowException( + "Cannot serialize this model. MLFlow can only save descendants of pyspark.Model" + "that implement MLWritable and MLReadable.", + INVALID_PARAMETER_VALUE) + + +def save_model(spark_model, path, mlflow_model=Model(), conda_env=None, dfs_tmpdir=None, sample_input=None): """ - Save a Spark MLlib PipelineModel to a local path. + Save a Spark MLlib Model to a local path. By default, this function saves models using the Spark MLlib persistence mechanism. Additionally, if a sample input is specified using the ``sample_input`` parameter, the model is also serialized in MLeap format and the MLeap flavor is added. - :param spark_model: Spark PipelineModel to be saved. Can save only PipelineModels. + :param spark_model: Spark model to be saved - MLFlow can only save descendants of + pyspark.ml.Model which implement MLReadable and MLWritable. :param path: Local path where the model is to be saved. :param mlflow_model: MLflow model config this flavor is being added to. - :param conda_env: Conda environment this model depends on. - :param jars: List of JARs needed by the model. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If `None`, the default + :func:`get_default_conda_env()` environment is added to the model. + The following is an *example* dictionary representation of a Conda + environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'pyspark=2.3.0' + ] + } :param dfs_tmpdir: Temporary directory path on Distributed (Hadoop) File System (DFS) or local - filesystem if running in local mode. The model will be written in this + filesystem if running in local mode. The model is be written in this destination and then copied to the requested local path. This is necessary as Spark ML models read from and write to DFS if running on a cluster. All - temporary files created on the DFS will be removed if this operation + temporary files created on the DFS are removed if this operation completes successfully. Defaults to ``/tmp/mlflow``. - :param sample_input: A sample input that will be used to add the MLeap flavor to the model. + :param sample_input: A sample input that is used to add the MLeap flavor to the model. This must be a PySpark DataFrame that the model can evaluate. If ``sample_input`` is ``None``, the MLeap flavor is not added. @@ -185,37 +311,26 @@ def save_model(spark_model, path, mlflow_model=Model(), conda_env=None, jars=Non >>> model = ... >>> mlflow.spark.save_model(model, "spark-model") """ - if jars: - raise Exception("jar dependencies are not implemented") - - if sample_input is not None: - mleap.add_to_model(mlflow_model, path, spark_model, sample_input) - + _validate_model(spark_model) + from pyspark.ml import PipelineModel if not isinstance(spark_model, PipelineModel): - raise Exception("Not a PipelineModel. SparkML can only save PipelineModels.") - + spark_model = PipelineModel([spark_model]) # Spark ML stores the model on DFS if running on a cluster # Save it to a DFS temp dir first and copy it to local path if dfs_tmpdir is None: dfs_tmpdir = DFS_TMP tmp_path = _tmp_path(dfs_tmpdir) spark_model.save(tmp_path) - sparkml_data_path_sub = "sparkml" - sparkml_data_path = os.path.abspath(os.path.join(path, sparkml_data_path_sub)) + sparkml_data_path = os.path.abspath(os.path.join(path, _SPARK_MODEL_PATH_SUB)) _HadoopFileSystem.copy_to_local_file(tmp_path, sparkml_data_path, remove_src=True) - pyspark_version = pyspark.version.__version__ - model_conda_env = None - if conda_env: - model_conda_env = os.path.basename(os.path.abspath(conda_env)) - shutil.copyfile(conda_env, os.path.join(path, model_conda_env)) - mlflow_model.add_flavor(FLAVOR_NAME, pyspark_version=pyspark_version, - model_data=sparkml_data_path_sub) - pyfunc.add_to_model(mlflow_model, loader_module="mlflow.spark", data=sparkml_data_path_sub, - env=model_conda_env) - mlflow_model.save(os.path.join(path, "MLmodel")) + _save_model_metadata( + dst_dir=path, spark_model=spark_model, mlflow_model=mlflow_model, + sample_input=sample_input, conda_env=conda_env) def _load_model(model_path, dfs_tmpdir=None): + from pyspark.ml.pipeline import PipelineModel + if dfs_tmpdir is None: dfs_tmpdir = DFS_TMP tmp_path = _tmp_path(dfs_tmpdir) @@ -226,14 +341,22 @@ def _load_model(model_path, dfs_tmpdir=None): return PipelineModel.load(model_path) -def load_model(path, run_id=None, dfs_tmpdir=None): +def load_model(model_uri, dfs_tmpdir=None): """ Load the Spark MLlib model from the path. - :param path: Local filesystem path or run-relative artifact path to the model. - :param run_id: Run ID. If provided, combined with ``path`` to identify the model. + :param model_uri: The location, in URI format, of the MLflow model, for example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. :param dfs_tmpdir: Temporary directory path on Distributed (Hadoop) File System (DFS) or local - filesystem if running in local mode. The model will be loaded from this + filesystem if running in local mode. The model is loaded from this destination. Defaults to ``/tmp/mlflow``. :return: pyspark.ml.pipeline.PipelineModel @@ -248,24 +371,24 @@ def load_model(path, run_id=None, dfs_tmpdir=None): >>> # Make predictions on test documents. >>> prediction = model.transform(test) """ - if run_id is not None: - path = mlflow.tracking.utils._get_model_log_dir(model_name=path, run_id=run_id) - m = Model.load(os.path.join(path, 'MLmodel')) - if FLAVOR_NAME not in m.flavors: - raise Exception("Model does not have {} flavor".format(FLAVOR_NAME)) - conf = m.flavors[FLAVOR_NAME] - model_path = os.path.join(path, conf['model_data']) - return _load_model(model_path=model_path, dfs_tmpdir=dfs_tmpdir) + local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) + flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) + spark_model_artifacts_path = os.path.join(local_model_path, flavor_conf['model_data']) + return _load_model(model_path=spark_model_artifacts_path, dfs_tmpdir=dfs_tmpdir) def _load_pyfunc(path): """ Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. + + :param path: Local filesystem path to the MLflow Model with the ``spark`` flavor. """ # NOTE: The getOrCreate() call below may change settings of the active session which we do not # intend to do here. In particular, setting master to local[1] can break distributed clusters. # To avoid this problem, we explicitly check for an active session. This is not ideal but there # is no good workaround at the moment. + import pyspark + spark = pyspark.sql.SparkSession._instantiatedSession if spark is None: spark = pyspark.sql.SparkSession.builder.config("spark.python.worker.reuse", True)\ diff --git a/mlflow/store/__init__.py b/mlflow/store/__init__.py index e69de29bb2d1d..889f5d7c43ec1 100644 --- a/mlflow/store/__init__.py +++ b/mlflow/store/__init__.py @@ -0,0 +1,14 @@ +""" +An MLflow tracking server has two properties related to how data is stored: *backend store* to +record ML experiments, runs, parameters, metrics, etc., and *artifact store* to store run +artifacts like models, plots, images, etc. + +Several constants are used by multiple backend store implementations. +""" + +# Path to default location for backend when using local FileStore or ArtifactStore. +# Also used as default location for artifacts, when not provided, in non local file based backends +# (eg MySQL) +DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH = "./mlruns" +SEARCH_MAX_RESULTS_DEFAULT = 1000 +SEARCH_MAX_RESULTS_THRESHOLD = 50000 diff --git a/mlflow/store/abstract_store.py b/mlflow/store/abstract_store.py index e4da41e0bb077..2b61b4ecd8fde 100644 --- a/mlflow/store/abstract_store.py +++ b/mlflow/store/abstract_store.py @@ -1,12 +1,20 @@ from abc import abstractmethod, ABCMeta from mlflow.entities import ViewType +from mlflow.store import SEARCH_MAX_RESULTS_DEFAULT + + +class PagedList(list): + + def __init__(self, items, token): + super(PagedList, self).__init__(items) + self.token = token class AbstractStore: """ - Abstract class for Backend Storage - This class will define API interface for front ends to connect with various types of backends + Abstract class for Backend Storage. + This class defines the API interface for front ends to connect with various types of backends. """ __metaclass__ = ABCMeta @@ -23,6 +31,7 @@ def list_experiments(self, view_type=ViewType.ACTIVE_ONLY): """ :param view_type: Qualify requested type of experiments. + :return: a list of Experiment objects stored in store for requested view. """ pass @@ -30,33 +39,51 @@ def list_experiments(self, view_type=ViewType.ACTIVE_ONLY): @abstractmethod def create_experiment(self, name, artifact_location): """ - Creates a new experiment. + Create a new experiment. If an experiment with the given name already exists, throws exception. :param name: Desired name for an experiment :param artifact_location: Base location for artifacts in runs. May be None. - :return: experiment_id (integer) for the newly created experiment if successful, else None + + :return: experiment_id (string) for the newly created experiment if successful, else None. """ pass @abstractmethod def get_experiment(self, experiment_id): """ - Fetches the experiment by ID from the backend store. - Throws an exception if experiment is not found or permanently deleted. + Fetch the experiment by ID from the backend store. + + :param experiment_id: String id for the experiment + + :return: A single :py:class:`mlflow.entities.Experiment` object if it exists, + otherwise raises an exception. - :param experiment_id: Integer id for the experiment - :return: A single Experiment object if it exists, otherwise raises an Exception. """ pass + def get_experiment_by_name(self, experiment_name): + """ + Fetch the experiment by name from the backend store. + This is a base implementation using ``list_experiments``, derived classes may have + some specialized implementations. + + :param experiment_name: Name of experiment + + :return: A single :py:class:`mlflow.entities.Experiment` object if it exists. + """ + for experiment in self.list_experiments(ViewType.ALL): + if experiment.name == experiment_name: + return experiment + return None + @abstractmethod def delete_experiment(self, experiment_id): """ - Deletes the experiment from the backend store. Deleted experiments can be restored until + Delete the experiment from the backend store. Deleted experiments can be restored until permanently deleted. - :param experiment_id: Integer id for the experiment + :param experiment_id: String id for the experiment """ pass @@ -65,7 +92,7 @@ def restore_experiment(self, experiment_id): """ Restore deleted experiment unless it is permanently deleted. - :param experiment_id: Integer id for the experiment + :param experiment_id: String id for the experiment """ pass @@ -74,36 +101,46 @@ def rename_experiment(self, experiment_id, new_name): """ Update an experiment's name. The new name must be unique. - :param experiment_id: Integer id for the experiment + :param experiment_id: String id for the experiment """ pass @abstractmethod - def get_run(self, run_uuid): + def get_run(self, run_id): """ - Fetches the run from backend store + Fetch the run from backend store. The resulting :py:class:`Run ` + contains a collection of run metadata - :py:class:`RunInfo `, + as well as a collection of run parameters, tags, and metrics - + :py:class`RunData `. In the case where multiple metrics with the + same key are logged for the run, the :py:class:`RunData ` contains + the value at the latest timestamp for each metric. If there are multiple values with the + latest timestamp for a given metric, the maximum of these values is returned. - :param run_uuid: Unique identifier for the run - :return: A single Run object if it exists, otherwise raises an Exception + :param run_id: Unique identifier for the run. + + :return: A single :py:class:`mlflow.entities.Run` object, if the run exists. Otherwise, + raises an exception. """ pass - def update_run_info(self, run_uuid, run_status, end_time): + @abstractmethod + def update_run_info(self, run_id, run_status, end_time): """ - Updates the metadata of the specified run. - :return: RunInfo describing the updated run. + Update the metadata of the specified run. + + :return: :py:class:`mlflow.entities.RunInfo` describing the updated run. """ pass - def create_run(self, experiment_id, user_id, run_name, source_type, source_name, - entry_point_name, start_time, source_version, tags, parent_run_id): + @abstractmethod + def create_run(self, experiment_id, user_id, start_time, tags): """ - Creates a run under the specified experiment ID, setting the run's status to "RUNNING" + Create a run under the specified experiment ID, setting the run's status to "RUNNING" and the start time to the current time. - :param experiment_id: ID of the experiment for this run + :param experiment_id: String id of the experiment for this run :param user_id: ID of the user launching this run - :param source_type: Enum (integer) describing the source of the run + :return: The created Run object """ pass @@ -111,99 +148,123 @@ def create_run(self, experiment_id, user_id, run_name, source_type, source_name, @abstractmethod def delete_run(self, run_id): """ - Deletes a run. - :param run_id: + Delete a run. + + :param run_id """ pass @abstractmethod def restore_run(self, run_id): """ - Restores a run. - :param run_id: + Restore a run. + + :param run_id """ pass - def log_metric(self, run_uuid, metric): + def log_metric(self, run_id, metric): """ - Logs a metric for the specified run - :param run_uuid: String id for the run - :param metric: Metric instance to log + Log a metric for the specified run + + :param run_id: String id for the run + :param metric: :py:class:`mlflow.entities.Metric` instance to log """ - pass + self.log_batch(run_id, metrics=[metric], params=[], tags=[]) - def log_param(self, run_uuid, param): + def log_param(self, run_id, param): """ - Logs a param for the specified run - :param run_uuid: String id for the run - :param param: Param instance to log + Log a param for the specified run + + :param run_id: String id for the run + :param param: :py:class:`mlflow.entities.Param` instance to log """ - pass + self.log_batch(run_id, metrics=[], params=[param], tags=[]) - def set_tag(self, run_uuid, tag): + def set_tag(self, run_id, tag): """ - Sets a tag for the specified run - :param run_uuid: String id for the run - :param tag: RunTag instance to set + Set a tag for the specified run + + :param run_id: String id for the run + :param tag: :py:class:`mlflow.entities.RunTag` instance to set """ - pass + self.log_batch(run_id, metrics=[], params=[], tags=[tag]) @abstractmethod - def get_metric(self, run_uuid, metric_key): + def get_metric_history(self, run_id, metric_key): """ - Returns the last logged value for a given metric. + Return a list of metric objects corresponding to all values logged for a given metric. - :param run_uuid: Unique identifier for run + :param run_id: Unique identifier for run :param metric_key: Metric name within the run - :return: A single float value for the given metric if logged, else None + :return: A list of :py:class:`mlflow.entities.Metric` entities if logged, else empty list """ pass - @abstractmethod - def get_param(self, run_uuid, param_name): + def search_runs(self, experiment_ids, filter_string, run_view_type, + max_results=SEARCH_MAX_RESULTS_DEFAULT, order_by=None, page_token=None): """ - Returns the value of the specified parameter. - - :param run_uuid: Unique identifier for run - :param param_name: Parameter name within the run + Return runs that match the given list of search expressions within the experiments. - :return: Value of the given parameter if logged, else None - """ - pass + :param page_token: + :param page_token: + :param experiment_ids: List of experiment ids to scope the search + :param filter_string: A search filter string. + :param run_view_type: ACTIVE_ONLY, DELETED_ONLY, or ALL runs + :param max_results: Maximum number of runs desired. + :param order_by: List of order_by clauses. + :param page_token: Token specifying the next page of results. It should be obtained from + a ``search_runs`` call. + + :return: A list of :py:class:`mlflow.entities.Run` objects that satisfy the search + expressions. The pagination token for the next page can be obtained via the ``token`` + attribute of the object; however, some store implementations may not support pagination + and thus the returned token would not be meaningful in such cases. + """ + runs, token = self._search_runs(experiment_ids, filter_string, run_view_type, max_results, + order_by, page_token) + return PagedList(runs, token) @abstractmethod - def get_metric_history(self, run_uuid, metric_key): + def _search_runs(self, experiment_ids, filter_string, run_view_type, max_results, order_by, + page_token): """ - Returns all logged value for a given metric. + Return runs that match the given list of search expressions within the experiments, as + well as a pagination token (indicating where the next page should start). Subclasses of + ``AbstractStore`` should implement this method to support pagination instead of + ``search_runs``. - :param run_uuid: Unique identifier for run - :param metric_key: Metric name within the run + See ``search_runs`` for parameter descriptions. - :return: A list of float values logged for the give metric if logged, else empty list + :return: A tuple of ``runs`` and ``token`` where ``runs`` is a list of + :py:class:`mlflow.entities.Run` objects that satisfy the search expressions, + and ``token`` is the pagination token for the next page of results. """ pass - @abstractmethod - def search_runs(self, experiment_ids, search_expressions, run_view_type): + def list_run_infos(self, experiment_id, run_view_type): """ - Returns runs that match the given list of search expressions within the experiments. - Given multiple search expressions, all these expressions are ANDed together for search. + Return run information for runs which belong to the experiment_id. - :param experiment_ids: List of experiment ids to scope the search - :param search_expression: list of search expressions + :param experiment_id: The experiment id which to search - :return: A list of Run objects that satisfy the search expressions + :return: A list of :py:class:`mlflow.entities.RunInfo` objects that satisfy the + search expressions """ - pass + runs = self.search_runs([experiment_id], None, run_view_type) + return [run.info for run in runs] @abstractmethod - def list_run_infos(self, experiment_id, run_view_type): + def log_batch(self, run_id, metrics, params, tags): """ - Returns run information for runs which belong to the experiment_id + Log multiple metrics, params, and tags for the specified run - :param experiment_id: The experiment id which to search. + :param run_id: String id for the run + :param metrics: List of :py:class:`mlflow.entities.Metric` instances to log + :param params: List of :py:class:`mlflow.entities.Param` instances to log + :param tags: List of :py:class:`mlflow.entities.RunTag` instances to log - :return: A list of RunInfo objects that satisfy the search expressions + :return: None. """ pass diff --git a/mlflow/store/artifact_repo.py b/mlflow/store/artifact_repo.py index 5b0aaf9dadf5f..ef8b84cd868dd 100644 --- a/mlflow/store/artifact_repo.py +++ b/mlflow/store/artifact_repo.py @@ -1,15 +1,18 @@ import os +import posixpath +import tempfile from abc import abstractmethod, ABCMeta -from mlflow.store.rest_store import RestStore +from mlflow.utils.validation import path_not_unique, bad_path_message + from mlflow.exceptions import MlflowException -from mlflow.utils.file_utils import build_path, TempDir +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_DOES_NOT_EXIST class ArtifactRepository: """ - Defines how to upload (log) and download potentially large artifacts from different - storage backends. + Abstract artifact repo that defines how to upload (log) and download potentially large + artifacts from different storage backends. """ __metaclass__ = ABCMeta @@ -20,20 +23,22 @@ def __init__(self, artifact_uri): @abstractmethod def log_artifact(self, local_file, artifact_path=None): """ - Logs a local file as an artifact, optionally taking an ``artifact_path`` to place it in + Log a local file as an artifact, optionally taking an ``artifact_path`` to place it in within the run's artifacts. Run artifacts can be organized into directories, so you can place the artifact in a directory this way. + :param local_file: Path to artifact to log :param artifact_path: Directory within the run's artifact directory in which to log the - artifact + artifact. """ pass @abstractmethod def log_artifacts(self, local_dir, artifact_path=None): """ - Logs the files in the specified local directory as artifacts, optionally taking + Log the files in the specified local directory as artifacts, optionally taking an ``artifact_path`` to place them in within the run's artifacts. + :param local_dir: Directory of local artifacts to log :param artifact_path: Directory within the run's artifact directory in which to log the artifacts @@ -43,28 +48,37 @@ def log_artifacts(self, local_dir, artifact_path=None): @abstractmethod def list_artifacts(self, path): """ - Return all the artifacts for this run_uuid directly under path. If path is a file, returns + Return all the artifacts for this run_id directly under path. If path is a file, returns an empty list. Will error if path is neither a file nor directory. - :param path: Relative source path that contain desired artifacts + :param path: Relative source path that contains desired artifacts + :return: List of artifacts as FileInfo listed directly under path. """ pass - def download_artifacts(self, artifact_path): + def download_artifacts(self, artifact_path, dst_path=None): """ Download an artifact file or directory to a local directory if applicable, and return a local path for it. The caller is responsible for managing the lifecycle of the downloaded artifacts. - :param path: Relative source path to the desired artifact - :return: Full path desired artifact. + + :param artifact_path: Relative source path to the desired artifacts. + :param dst_path: Absolute path of the local filesystem destination directory to which to + download the specified artifacts. This directory must already exist. + If unspecified, the artifacts will either be downloaded to a new + uniquely-named directory on the local filesystem or will be returned + directly in the case of the LocalArtifactRepository. + + :return: Absolute path of the local filesystem location containing the desired artifacts. """ + # TODO: Probably need to add a more efficient method to stream just a single artifact # without downloading it, or to get a pre-signed URL for cloud storage. def download_artifacts_into(artifact_path, dest_dir): - basename = os.path.basename(artifact_path) - local_path = build_path(dest_dir, basename) + basename = posixpath.basename(artifact_path) + local_path = os.path.join(dest_dir, basename) listing = self.list_artifacts(artifact_path) if len(listing) > 0: # Artifact_path is a directory, so make a directory for it and download everything @@ -76,13 +90,29 @@ def download_artifacts_into(artifact_path, dest_dir): self._download_file(remote_file_path=artifact_path, local_path=local_path) return local_path - with TempDir(remove_on_exit=False) as tmp: - return download_artifacts_into(artifact_path, tmp.path()) + if dst_path is None: + dst_path = tempfile.mkdtemp() + dst_path = os.path.abspath(dst_path) + + if not os.path.exists(dst_path): + raise MlflowException( + message=( + "The destination path for downloaded artifacts does not" + " exist! Destination path: {dst_path}".format(dst_path=dst_path)), + error_code=RESOURCE_DOES_NOT_EXIST) + elif not os.path.isdir(dst_path): + raise MlflowException( + message=( + "The destination path for downloaded artifacts must be a directory!" + " Destination path: {dst_path}".format(dst_path=dst_path)), + error_code=INVALID_PARAMETER_VALUE) + + return download_artifacts_into(artifact_path, dst_path) @abstractmethod def _download_file(self, remote_file_path, local_path): """ - Downloads the file at the specified relative remote path and saves + Download the file at the specified relative remote path and saves it at the specified local path. :param remote_file_path: Source path to the remote file, relative to the root @@ -91,32 +121,8 @@ def _download_file(self, remote_file_path, local_path): """ pass - @staticmethod - def from_artifact_uri(artifact_uri, store): - """ - Given an artifact URI for an Experiment Run (e.g., /local/file/path or s3://my/bucket), - returns an ArtifactReposistory instance capable of logging and downloading artifacts - on behalf of this URI. - :param store: An instance of AbstractStore which the artifacts are registered in. - """ - if artifact_uri.startswith("s3:/"): - # Import these locally to avoid creating a circular import loop - from mlflow.store.s3_artifact_repo import S3ArtifactRepository - return S3ArtifactRepository(artifact_uri) - elif artifact_uri.startswith("gs:/"): - from mlflow.store.gcs_artifact_repo import GCSArtifactRepository - return GCSArtifactRepository(artifact_uri) - elif artifact_uri.startswith("wasbs:/"): - from mlflow.store.azure_blob_artifact_repo import AzureBlobArtifactRepository - return AzureBlobArtifactRepository(artifact_uri) - elif artifact_uri.startswith("sftp:/"): - from mlflow.store.sftp_artifact_repo import SFTPArtifactRepository - return SFTPArtifactRepository(artifact_uri) - elif artifact_uri.startswith("dbfs:/"): - from mlflow.store.dbfs_artifact_repo import DbfsArtifactRepository - if not isinstance(store, RestStore): - raise MlflowException('`store` must be an instance of RestStore.') - return DbfsArtifactRepository(artifact_uri, store.get_host_creds) - else: - from mlflow.store.local_artifact_repo import LocalArtifactRepository - return LocalArtifactRepository(artifact_uri) + +def verify_artifact_path(artifact_path): + if artifact_path and path_not_unique(artifact_path): + raise MlflowException("Invalid artifact path: '%s'. %s" % (artifact_path, + bad_path_message(artifact_path))) diff --git a/mlflow/store/artifact_repository_registry.py b/mlflow/store/artifact_repository_registry.py new file mode 100644 index 0000000000000..7dc5488d5c4ca --- /dev/null +++ b/mlflow/store/artifact_repository_registry.py @@ -0,0 +1,99 @@ +import entrypoints +import warnings + +from mlflow.exceptions import MlflowException +from mlflow.store.azure_blob_artifact_repo import AzureBlobArtifactRepository +from mlflow.store.dbfs_artifact_repo import dbfs_artifact_repo_factory +from mlflow.store.ftp_artifact_repo import FTPArtifactRepository +from mlflow.store.gcs_artifact_repo import GCSArtifactRepository +from mlflow.store.hdfs_artifact_repo import HdfsArtifactRepository +from mlflow.store.local_artifact_repo import LocalArtifactRepository +from mlflow.store.runs_artifact_repo import RunsArtifactRepository +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.store.sftp_artifact_repo import SFTPArtifactRepository + +from mlflow.utils import get_uri_scheme + + +class ArtifactRepositoryRegistry: + """Scheme-based registry for artifact repository implementations + + This class allows the registration of a function or class to provide an implementation for a + given scheme of `artifact_uri` through the `register` method. Implementations declared though + the entrypoints `mlflow.artifact_repository` group can be automatically registered through the + `register_entrypoints` method. + + When instantiating an artifact repository through the `get_artifact_repository` method, the + scheme of the artifact URI provided will be used to select which implementation to instantiate, + which will be called with same arguments passed to the `get_artifact_repository` method. + """ + + def __init__(self): + self._registry = {} + + def register(self, scheme, repository): + """Register artifact repositories provided by other packages""" + self._registry[scheme] = repository + + def register_entrypoints(self): + # Register artifact repositories provided by other packages + for entrypoint in entrypoints.get_group_all("mlflow.artifact_repository"): + try: + self.register(entrypoint.name, entrypoint.load()) + except (AttributeError, ImportError) as exc: + warnings.warn( + 'Failure attempting to register artifact repository for scheme "{}": {}'.format( + entrypoint.name, str(exc) + ), + stacklevel=2 + ) + + def get_artifact_repository(self, artifact_uri): + """Get an artifact repository from the registry based on the scheme of artifact_uri + + :param store_uri: The store URI. This URI is used to select which artifact repository + implementation to instantiate and is passed to the + constructor of the implementation. + + :return: An instance of `mlflow.store.ArtifactRepository` that fulfills the artifact URI + requirements. + """ + scheme = get_uri_scheme(artifact_uri) + repository = self._registry.get(scheme) + if repository is None: + raise MlflowException( + "Could not find a registered artifact repository for: {}. " + "Currently registered schemes are: {}".format( + artifact_uri, list(self._registry.keys()) + ) + ) + return repository(artifact_uri) + + +_artifact_repository_registry = ArtifactRepositoryRegistry() + +_artifact_repository_registry.register('', LocalArtifactRepository) +_artifact_repository_registry.register('file', LocalArtifactRepository) +_artifact_repository_registry.register('s3', S3ArtifactRepository) +_artifact_repository_registry.register('gs', GCSArtifactRepository) +_artifact_repository_registry.register('wasbs', AzureBlobArtifactRepository) +_artifact_repository_registry.register('ftp', FTPArtifactRepository) +_artifact_repository_registry.register('sftp', SFTPArtifactRepository) +_artifact_repository_registry.register('dbfs', dbfs_artifact_repo_factory) +_artifact_repository_registry.register('hdfs', HdfsArtifactRepository) +_artifact_repository_registry.register('runs', RunsArtifactRepository) + +_artifact_repository_registry.register_entrypoints() + + +def get_artifact_repository(artifact_uri): + """Get an artifact repository from the registry based on the scheme of artifact_uri + + :param store_uri: The store URI. This URI is used to select which artifact repository + implementation to instantiate and is passed to the + constructor of the implementation. + + :return: An instance of `mlflow.store.ArtifactRepository` that fulfills the artifact URI + requirements. + """ + return _artifact_repository_registry.get_artifact_repository(artifact_uri) diff --git a/mlflow/store/azure_blob_artifact_repo.py b/mlflow/store/azure_blob_artifact_repo.py index 88c5f17b78745..d662b5d8acac4 100644 --- a/mlflow/store/azure_blob_artifact_repo.py +++ b/mlflow/store/azure_blob_artifact_repo.py @@ -1,11 +1,12 @@ import os +import posixpath import re from six.moves import urllib from mlflow.entities import FileInfo +from mlflow.exceptions import MlflowException from mlflow.store.artifact_repo import ArtifactRepository -from mlflow.utils.file_utils import build_path, get_relative_path class AzureBlobArtifactRepository(ArtifactRepository): @@ -60,43 +61,50 @@ def parse_wasbs_uri(uri): def log_artifact(self, local_file, artifact_path=None): (container, _, dest_path) = self.parse_wasbs_uri(self.artifact_uri) if artifact_path: - dest_path = build_path(dest_path, artifact_path) - dest_path = build_path(dest_path, os.path.basename(local_file)) + dest_path = posixpath.join(dest_path, artifact_path) + dest_path = posixpath.join( + dest_path, os.path.basename(local_file)) self.client.create_blob_from_path(container, dest_path, local_file) def log_artifacts(self, local_dir, artifact_path=None): (container, _, dest_path) = self.parse_wasbs_uri(self.artifact_uri) if artifact_path: - dest_path = build_path(dest_path, artifact_path) + dest_path = posixpath.join(dest_path, artifact_path) local_dir = os.path.abspath(local_dir) for (root, _, filenames) in os.walk(local_dir): upload_path = dest_path if root != local_dir: - rel_path = get_relative_path(local_dir, root) - upload_path = build_path(dest_path, rel_path) + rel_path = os.path.relpath(root, local_dir) + upload_path = posixpath.join(dest_path, rel_path) for f in filenames: - path = build_path(upload_path, f) - self.client.create_blob_from_path(container, path, build_path(root, f)) + path = posixpath.join(upload_path, f) + self.client.create_blob_from_path( + container, path, os.path.join(root, f)) def list_artifacts(self, path=None): from azure.storage.blob.models import BlobPrefix (container, _, artifact_path) = self.parse_wasbs_uri(self.artifact_uri) dest_path = artifact_path if path: - dest_path = build_path(dest_path, path) + dest_path = posixpath.join(dest_path, path) infos = [] prefix = dest_path + "/" marker = None # Used to make next list request if this one exceeded the result limit while True: results = self.client.list_blobs(container, prefix=prefix, delimiter='/', marker=marker) for r in results: + if not r.name.startswith(artifact_path): + raise MlflowException( + "The name of the listed Azure blob does not begin with the specified" + " artifact path. Artifact path: {artifact_path}. Blob name:" + " {blob_name}".format(artifact_path=artifact_path, blob_name=r.name)) if isinstance(r, BlobPrefix): # This is a prefix for items in a subdirectory - subdir = r.name[len(artifact_path)+1:] + subdir = posixpath.relpath(path=r.name, start=artifact_path) if subdir.endswith("/"): subdir = subdir[:-1] infos.append(FileInfo(subdir, True, None)) else: # Just a plain old blob - file_name = r.name[len(artifact_path)+1:] + file_name = posixpath.relpath(path=r.name, start=artifact_path) infos.append(FileInfo(file_name, False, r.properties.content_length)) # Check whether a new marker is returned, meaning we have to make another request if results.next_marker: @@ -107,5 +115,5 @@ def list_artifacts(self, path=None): def _download_file(self, remote_file_path, local_path): (container, _, remote_root_path) = self.parse_wasbs_uri(self.artifact_uri) - remote_full_path = build_path(remote_root_path, remote_file_path) + remote_full_path = posixpath.join(remote_root_path, remote_file_path) self.client.get_blob_to_path(container, remote_full_path, local_path) diff --git a/mlflow/store/cli.py b/mlflow/store/cli.py index 8b28acd85ec5b..7025c67ab6249 100644 --- a/mlflow/store/cli.py +++ b/mlflow/store/cli.py @@ -1,11 +1,15 @@ -from mlflow.utils.logging_utils import eprint +import logging +import sys import click +from mlflow.store.artifact_repository_registry import get_artifact_repository from mlflow.tracking import _get_store -from mlflow.store.artifact_repo import ArtifactRepository +from mlflow.tracking.artifact_utils import _download_artifact_from_uri from mlflow.utils.proto_json_utils import message_to_json +_logger = logging.getLogger(__name__) + @click.group("artifacts") def commands(): @@ -28,15 +32,16 @@ def commands(): "run's artifact directory.") def log_artifact(local_file, run_id, artifact_path): """ - Logs a local file as an artifact of a run, optionally within a run-specific + Log a local file as an artifact of a run, optionally within a run-specific artifact path. Run artifacts can be organized into directories, so you can place the artifact in a directory this way. """ store = _get_store() artifact_uri = store.get_run(run_id).info.artifact_uri - artifact_repo = ArtifactRepository.from_artifact_uri(artifact_uri, store) + artifact_repo = get_artifact_repository(artifact_uri) artifact_repo.log_artifact(local_file, artifact_path) - eprint("Logged artifact from local file %s to artifact_path=%s" % (local_file, artifact_path)) + _logger.info("Logged artifact from local file %s to artifact_path=%s", + local_file, artifact_path) @commands.command("log-artifacts") @@ -49,15 +54,15 @@ def log_artifact(local_file, run_id, artifact_path): "run's artifact directory.") def log_artifacts(local_dir, run_id, artifact_path): """ - Logs the files within a local directory as an artifact of a run, optionally + Log the files within a local directory as an artifact of a run, optionally within a run-specific artifact path. Run artifacts can be organized into directories, so you can place the artifact in a directory this way. """ store = _get_store() artifact_uri = store.get_run(run_id).info.artifact_uri - artifact_repo = ArtifactRepository.from_artifact_uri(artifact_uri, store) + artifact_repo = get_artifact_repository(artifact_uri) artifact_repo.log_artifacts(local_dir, artifact_path) - eprint("Logged artifact from local dir %s to artifact_path=%s" % (local_dir, artifact_path)) + _logger.info("Logged artifact from local dir %s to artifact_path=%s", local_dir, artifact_path) @commands.command("list") @@ -73,7 +78,7 @@ def list_artifacts(run_id, artifact_path): artifact_path = artifact_path if artifact_path is not None else "" store = _get_store() artifact_uri = store.get_run(run_id).info.artifact_uri - artifact_repo = ArtifactRepository.from_artifact_uri(artifact_uri, store) + artifact_repo = get_artifact_repository(artifact_uri) file_infos = artifact_repo.list_artifacts(artifact_path) print(_file_infos_to_json(file_infos)) @@ -84,18 +89,32 @@ def _file_infos_to_json(file_infos): @commands.command("download") -@click.option("--run-id", "-r", required=True, +@click.option("--run-id", "-r", help="Run ID from which to download") @click.option("--artifact-path", "-a", - help="If specified, a path relative to the run's root directory to download") -def download_artifacts(run_id, artifact_path): + help="For use with Run ID: if specified, a path relative to the run's root " + "directory to download") +@click.option("--artifact-uri", "-u", + help="URI pointing to the artifact file or artifacts directory; use as an " + "alternative to specifying --run_id and --artifact-path") +def download_artifacts(run_id, artifact_path, artifact_uri): """ Download an artifact file or directory to a local directory. The output is the name of the file or directory on the local disk. + + Either ``--run-id`` or ``--artifact-uri`` must be provided. """ + if run_id is None and artifact_uri is None: + _logger.error("Either ``--run-id`` or ``--artifact-uri`` must be provided.") + sys.exit(1) + + if artifact_uri is not None: + print(_download_artifact_from_uri(artifact_uri)) + return + artifact_path = artifact_path if artifact_path is not None else "" store = _get_store() artifact_uri = store.get_run(run_id).info.artifact_uri - artifact_repo = ArtifactRepository.from_artifact_uri(artifact_uri, store) + artifact_repo = get_artifact_repository(artifact_uri) artifact_location = artifact_repo.download_artifacts(artifact_path) print(artifact_location) diff --git a/mlflow/store/db/__init__.py b/mlflow/store/db/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/mlflow/store/db/utils.py b/mlflow/store/db/utils.py new file mode 100644 index 0000000000000..f044bcc667fdc --- /dev/null +++ b/mlflow/store/db/utils.py @@ -0,0 +1,95 @@ +import os + +import logging + +from alembic.migration import MigrationContext # pylint: disable=import-error +import sqlalchemy + + +_logger = logging.getLogger(__name__) + + +def _get_package_dir(): + """Returns directory containing MLflow python package.""" + current_dir = os.path.dirname(os.path.abspath(__file__)) + return os.path.normpath(os.path.join(current_dir, os.pardir, os.pardir)) + + +def _get_alembic_config(db_url, alembic_dir=None): + """ + Constructs an alembic Config object referencing the specified database and migration script + directory. + + :param db_url Database URL, like sqlite:///. See + https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls for a full list of valid + database URLs. + :param alembic_dir Path to migration script directory. Uses canonical migration script + directory under mlflow/alembic if unspecified. TODO: remove this argument in MLflow 1.1, as + it's only used to run special migrations for pre-1.0 users to remove duplicate constraint + names. + """ + from alembic.config import Config + final_alembic_dir = os.path.join(_get_package_dir(), 'store', 'db_migrations')\ + if alembic_dir is None else alembic_dir + config = Config(os.path.join(final_alembic_dir, 'alembic.ini')) + config.set_main_option('script_location', final_alembic_dir) + config.set_main_option('sqlalchemy.url', db_url) + return config + + +def _upgrade_db(url): + """ + Upgrade the schema of an MLflow tracking database to the latest supported version. + version. Note that schema migrations can be slow and are not guaranteed to be transactional - + we recommend taking a backup of your database before running migrations. + + :param url Database URL, like sqlite:///. See + https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls for a full list of valid + database URLs. + """ + # alembic adds significant import time, so we import it lazily + from alembic import command + _logger.info("Updating database tables at %s", url) + config = _get_alembic_config(url) + command.upgrade(config, 'heads') + + +def _get_schema_version(engine): + with engine.connect() as connection: + mc = MigrationContext.configure(connection) + return mc.get_current_revision() + + +def _is_initialized_before_mlflow_1(url): + """ + Returns true if the database at the specified URL was initialized before MLflow 1.0, False + otherwise. + A database is initialized before MLflow 1.0 if and only if its revision ID is set to None. + """ + engine = sqlalchemy.create_engine(url) + return _get_schema_version(engine) is None + + +def _upgrade_db_initialized_before_mlflow_1(url): + """ + Upgrades the schema of an MLflow tracking database created prior to MLflow 1.0, removing + duplicate constraint names. This method performs a one-time update for pre-1.0 users that we + plan to make available in MLflow 1.0 but remove in successive versions (e.g. MLflow 1.1), + after which we will assume that effectively all databases have been initialized using the schema + in mlflow.store.dbmodels.initial_models (with a small number of special-case databases + initialized pre-1.0 and migrated to have the same schema as mlflow.store.dbmodels.initial_models + via this method). + TODO: remove this method in MLflow 1.1. + """ + # alembic adds significant import time, so we import it lazily + from alembic import command + _logger.info("Updating database tables at %s in preparation for MLflow 1.0 schema migrations", + url) + alembic_dir = os.path.join(_get_package_dir(), 'temporary_db_migrations_for_pre_1_users') + config = _get_alembic_config(url, alembic_dir) + command.upgrade(config, 'heads') + # Reset the alembic version to "base" (the 'first' version) so that a) the versioning system + # is unaware that this migration occurred and b) subsequent migrations, like the migration to + # add metric steps, do not need to depend on this one. This allows us to eventually remove this + # method and the associated migration e.g. in MLflow 1.1. + command.stamp(config, "base") diff --git a/mlflow/store/db_migrations/README b/mlflow/store/db_migrations/README new file mode 100644 index 0000000000000..32f5fbde0d768 --- /dev/null +++ b/mlflow/store/db_migrations/README @@ -0,0 +1,4 @@ +This directory contains configuration scripts and database migration logic for MLflow tracking +databases, using the Alembic migration library (https://alembic.sqlalchemy.org). To run database +migrations, use the `mlflow db upgrade` CLI command. To add and modify database migration logic, +see the contributor guide at https://github.com/mlflow/mlflow/blob/master/CONTRIBUTING.rst. diff --git a/mlflow/store/db_migrations/__init__.py b/mlflow/store/db_migrations/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/mlflow/store/db_migrations/alembic.ini b/mlflow/store/db_migrations/alembic.ini new file mode 100644 index 0000000000000..30819efb469a7 --- /dev/null +++ b/mlflow/store/db_migrations/alembic.ini @@ -0,0 +1,74 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +script_location = mlflow/store/db_migrations + +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# timezone to use when rendering the date +# within the migration file as well as the filename. +# string value is passed to dateutil.tz.gettz() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the +# "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; this defaults +# to alembic/versions. When using multiple version +# directories, initial revisions must be specified with --version-path +# version_locations = %(here)s/bar %(here)s/bat alembic/versions + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +sqlalchemy.url = "" + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/mlflow/store/db_migrations/env.py b/mlflow/store/db_migrations/env.py new file mode 100644 index 0000000000000..c7174ed20064b --- /dev/null +++ b/mlflow/store/db_migrations/env.py @@ -0,0 +1,77 @@ + +from logging.config import fileConfig + +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +from mlflow.store.dbmodels.models import Base +target_metadata = Base.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + # Try https://stackoverflow.com/questions/30378233/sqlite-lack-of-alter-support-alembic-migration-failing-because-of-this-solutio + context.configure( + url=url, target_metadata=target_metadata, literal_binds=True, render_as_batch=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata, render_as_batch=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/mlflow/store/db_migrations/script.py.mako b/mlflow/store/db_migrations/script.py.mako new file mode 100644 index 0000000000000..2c0156303a8df --- /dev/null +++ b/mlflow/store/db_migrations/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/mlflow/store/db_migrations/versions/451aebb31d03_add_metric_step.py b/mlflow/store/db_migrations/versions/451aebb31d03_add_metric_step.py new file mode 100644 index 0000000000000..d5118e3307391 --- /dev/null +++ b/mlflow/store/db_migrations/versions/451aebb31d03_add_metric_step.py @@ -0,0 +1,35 @@ +"""add metric step + +Revision ID: 451aebb31d03 +Revises: +Create Date: 2019-04-22 15:29:24.921354 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '451aebb31d03' +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade(): + op.add_column('metrics', sa.Column('step', sa.BigInteger(), nullable=False, server_default='0')) + # Use batch mode so that we can run "ALTER TABLE" statements against SQLite + # databases (see more info at https://alembic.sqlalchemy.org/en/latest/ + # batch.html#running-batch-migrations-for-sqlite-and-other-databases) + with op.batch_alter_table("metrics") as batch_op: + batch_op.drop_constraint(constraint_name='metric_pk', type_="primary") + batch_op.create_primary_key( + constraint_name='metric_pk', + columns=['key', 'timestamp', 'step', 'run_uuid', 'value']) + + +def downgrade(): + # This migration cannot safely be downgraded; once metric data with the same + # (key, timestamp, run_uuid, value) are inserted (differing only in their `step`), we cannot + # revert to a schema where (key, timestamp, run_uuid, value) is the metric primary key. + pass diff --git a/mlflow/store/db_migrations/versions/90e64c465722_migrate_user_column_to_tags.py b/mlflow/store/db_migrations/versions/90e64c465722_migrate_user_column_to_tags.py new file mode 100644 index 0000000000000..30d71e67f6e07 --- /dev/null +++ b/mlflow/store/db_migrations/versions/90e64c465722_migrate_user_column_to_tags.py @@ -0,0 +1,68 @@ +"""migrate user column to tags + +Revision ID: 90e64c465722 +Revises: 451aebb31d03 +Create Date: 2019-05-29 10:43:52.919427 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy import orm, Column, Integer, String, ForeignKey, PrimaryKeyConstraint +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relationship, backref +from mlflow.utils.mlflow_tags import MLFLOW_USER + +# revision identifiers, used by Alembic. +revision = '90e64c465722' +down_revision = '451aebb31d03' +branch_labels = None +depends_on = None + + +Base = declarative_base() + + +class SqlRun(Base): + __tablename__ = 'runs' + run_uuid = Column(String(32), nullable=False) + user_id = Column(String(256), nullable=True, default=None) + experiment_id = Column(Integer) + + __table_args__ = ( + PrimaryKeyConstraint('experiment_id', name='experiment_pk'), + ) + + +class SqlTag(Base): + __tablename__ = 'tags' + key = Column(String(250)) + value = Column(String(250), nullable=True) + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + run = relationship('SqlRun', backref=backref('tags', cascade='all')) + + __table_args__ = ( + PrimaryKeyConstraint('key', 'run_uuid', name='tag_pk'), + ) + + +def upgrade(): + bind = op.get_bind() + session = orm.Session(bind=bind) + runs = session.query(SqlRun).all() + for run in runs: + if not run.user_id: + continue + + tag_exists = False + for tag in run.tags: + if tag.key == MLFLOW_USER: + tag_exists = True + if tag_exists: + continue + + session.merge(SqlTag(run_uuid=run.run_uuid, key=MLFLOW_USER, value=run.user_id)) + session.commit() + + +def downgrade(): + pass diff --git a/mlflow/store/db_migrations/versions/__init__.py b/mlflow/store/db_migrations/versions/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/mlflow/store/dbfs_artifact_repo.py b/mlflow/store/dbfs_artifact_repo.py index d2c2db73312e6..25f7586e1a30e 100644 --- a/mlflow/store/dbfs_artifact_repo.py +++ b/mlflow/store/dbfs_artifact_repo.py @@ -1,31 +1,37 @@ -import json import os +import posixpath +import json from mlflow.entities import FileInfo -from mlflow.exceptions import IllegalArtifactPathError, MlflowException +from mlflow.exceptions import MlflowException from mlflow.store.artifact_repo import ArtifactRepository -from mlflow.utils.file_utils import build_path, get_relative_path +from mlflow.store.rest_store import RestStore +from mlflow.store.local_artifact_repo import LocalArtifactRepository +from mlflow.tracking import utils +from mlflow.utils.file_utils import relative_path_to_artifact_path from mlflow.utils.rest_utils import http_request, http_request_safe, RESOURCE_DOES_NOT_EXIST from mlflow.utils.string_utils import strip_prefix +import mlflow.utils.databricks_utils LIST_API_ENDPOINT = '/api/2.0/dbfs/list' GET_STATUS_ENDPOINT = '/api/2.0/dbfs/get-status' DOWNLOAD_CHUNK_SIZE = 1024 +USE_FUSE_ENV_VAR = "MLFLOW_ENABLE_DBFS_FUSE_ARTIFACT_REPO" -class DbfsArtifactRepository(ArtifactRepository): +class DbfsRestArtifactRepository(ArtifactRepository): """ - Stores artifacts on DBFS. + Stores artifacts on DBFS using the DBFS REST API. This repository is used with URIs of the form ``dbfs:/``. The repository can only be used together with the RestStore. """ - - def __init__(self, artifact_uri, get_host_creds): - cleaned_artifact_uri = artifact_uri.rstrip('/') - super(DbfsArtifactRepository, self).__init__(cleaned_artifact_uri) - self.get_host_creds = get_host_creds - if not cleaned_artifact_uri.startswith('dbfs:/'): + def __init__(self, artifact_uri): + super(DbfsRestArtifactRepository, self).__init__(artifact_uri) + # NOTE: if we ever need to support databricks profiles different from that set for + # tracking, we could pass in the databricks profile name into this class. + self.get_host_creds = _get_host_creds_from_default_store() + if not artifact_uri.startswith('dbfs:/'): raise MlflowException('DbfsArtifactRepository URI must start with dbfs:/') def _databricks_api_request(self, endpoint, **kwargs): @@ -64,31 +70,33 @@ def _get_dbfs_endpoint(self, artifact_path): def log_artifact(self, local_file, artifact_path=None): basename = os.path.basename(local_file) - if artifact_path == '': - raise IllegalArtifactPathError('artifact_path cannot be the empty string.') if artifact_path: - http_endpoint = self._get_dbfs_endpoint(os.path.join(artifact_path, basename)) + http_endpoint = self._get_dbfs_endpoint( + posixpath.join(artifact_path, basename)) else: - http_endpoint = self._get_dbfs_endpoint(os.path.basename(local_file)) - with open(local_file, 'rb') as f: + http_endpoint = self._get_dbfs_endpoint(basename) + if os.stat(local_file).st_size == 0: + # The API frontend doesn't like it when we post empty files to it using + # `requests.request`, potentially due to the bug described in + # https://github.com/requests/requests/issues/4215 self._databricks_api_request( - endpoint=http_endpoint, method='POST', data=f, allow_redirects=False) + endpoint=http_endpoint, method='POST', data="", allow_redirects=False) + else: + with open(local_file, 'rb') as f: + self._databricks_api_request( + endpoint=http_endpoint, method='POST', data=f, allow_redirects=False) def log_artifacts(self, local_dir, artifact_path=None): - if artifact_path: - root_http_endpoint = self._get_dbfs_endpoint(artifact_path) - else: - root_http_endpoint = self._get_dbfs_endpoint('') + artifact_path = artifact_path or '' for (dirpath, _, filenames) in os.walk(local_dir): - dir_http_endpoint = root_http_endpoint + artifact_subdir = artifact_path if dirpath != local_dir: - rel_path = get_relative_path(local_dir, dirpath) - dir_http_endpoint = build_path(root_http_endpoint, rel_path) + rel_path = os.path.relpath(dirpath, local_dir) + rel_path = relative_path_to_artifact_path(rel_path) + artifact_subdir = posixpath.join(artifact_path, rel_path) for name in filenames: - endpoint = build_path(dir_http_endpoint, name) - with open(build_path(dirpath, name), 'rb') as f: - self._databricks_api_request( - endpoint=endpoint, method='POST', data=f, allow_redirects=False) + file_path = os.path.join(dirpath, name) + self.log_artifact(file_path, artifact_subdir) def list_artifacts(self, path=None): if path: @@ -124,3 +132,31 @@ def list_artifacts(self, path=None): def _download_file(self, remote_file_path, local_path): self._dbfs_download(output_path=local_path, endpoint=self._get_dbfs_endpoint(remote_file_path)) + + +def _get_host_creds_from_default_store(): + store = utils._get_store() + if not isinstance(store, RestStore): + raise MlflowException('Failed to get credentials for DBFS; they are read from the ' + + 'Databricks CLI credentials or MLFLOW_TRACKING* environment ' + + 'variables.') + return store.get_host_creds + + +def dbfs_artifact_repo_factory(artifact_uri): + """ + Returns an ArtifactRepository subclass for storing artifacts on DBFS. + + This factory method is used with URIs of the form ``dbfs:/``. DBFS-backed artifact + storage can only be used together with the RestStore. + :param artifact_uri: DBFS root artifact URI (string). + :return: Subclass of ArtifactRepository capable of storing artifacts on DBFS. + """ + cleaned_artifact_uri = artifact_uri.rstrip('/') + if mlflow.utils.databricks_utils.is_dbfs_fuse_available() \ + and os.environ.get(USE_FUSE_ENV_VAR, "").lower() != "false": + # If the DBFS FUSE mount is available, write artifacts directly to /dbfs/... using + # local filesystem APIs + file_uri = "file:///dbfs/{}".format(strip_prefix(cleaned_artifact_uri, "dbfs:/")) + return LocalArtifactRepository(file_uri) + return DbfsRestArtifactRepository(cleaned_artifact_uri) diff --git a/mlflow/store/dbmodels/__init__.py b/mlflow/store/dbmodels/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/mlflow/store/dbmodels/db_types.py b/mlflow/store/dbmodels/db_types.py new file mode 100644 index 0000000000000..3c1bc82ed9f4a --- /dev/null +++ b/mlflow/store/dbmodels/db_types.py @@ -0,0 +1,15 @@ +""" +Set of SQLAlchemy database schemas supported in MLflow for tracking server backends. +""" + +POSTGRES = 'postgresql' +MYSQL = 'mysql' +SQLITE = 'sqlite' +MSSQL = 'mssql' + +DATABASE_ENGINES = [ + POSTGRES, + MYSQL, + SQLITE, + MSSQL +] diff --git a/mlflow/store/dbmodels/initial_models.py b/mlflow/store/dbmodels/initial_models.py new file mode 100644 index 0000000000000..9ac0b48f1b21b --- /dev/null +++ b/mlflow/store/dbmodels/initial_models.py @@ -0,0 +1,240 @@ +# Snapshot of MLflow DB models as of the 0.9.1 release, prior to the first database migration. +# Used to standardize initial database state. +# Copied with modifications from +# https://github.com/mlflow/mlflow/blob/v0.9.1/mlflow/store/dbmodels/models.py, which +# is the first database schema that users could be running. In particular, modifications have +# been made to substitute constants from MLflow with hard-coded values (e.g. replacing +# SourceType.to_string(SourceType.NOTEBOOK) with the constant "NOTEBOOK") and ensure +# that all constraint names are unique. Note that pre-1.0 database schemas did not have unique +# constraint names - we provided a one-time migration script for pre-1.0 users so that their +# database schema matched the schema in this file. +import time +from sqlalchemy.orm import relationship, backref +from sqlalchemy import ( + Column, String, Float, ForeignKey, Integer, CheckConstraint, + BigInteger, PrimaryKeyConstraint) +from sqlalchemy.ext.declarative import declarative_base + +Base = declarative_base() + + +SourceTypes = [ + "NOTEBOOK", + "JOB", + "LOCAL", + "UNKNOWN", + "PROJECT", +] + +RunStatusTypes = [ + "SCHEDULED", + "FAILED", + "FINISHED", + "RUNNING", +] + + +class SqlExperiment(Base): + """ + DB model for :py:class:`mlflow.entities.Experiment`. These are recorded in ``experiment`` table. + """ + __tablename__ = 'experiments' + + experiment_id = Column(Integer, autoincrement=True) + """ + Experiment ID: `Integer`. *Primary Key* for ``experiment`` table. + """ + name = Column(String(256), unique=True, nullable=False) + """ + Experiment name: `String` (limit 256 characters). Defined as *Unique* and *Non null* in + table schema. + """ + artifact_location = Column(String(256), nullable=True) + """ + Default artifact location for this experiment: `String` (limit 256 characters). Defined as + *Non null* in table schema. + """ + lifecycle_stage = Column(String(32), default="active") + """ + Lifecycle Stage of experiment: `String` (limit 32 characters). + Can be either ``active`` (default) or ``deleted``. + """ + + __table_args__ = ( + CheckConstraint( + lifecycle_stage.in_(["active", "deleted"]), + name='experiments_lifecycle_stage'), + PrimaryKeyConstraint('experiment_id', name='experiment_pk') + ) + + def __repr__(self): + return ''.format(self.experiment_id, self.name) + + +class SqlRun(Base): + """ + DB model for :py:class:`mlflow.entities.Run`. These are recorded in ``runs`` table. + """ + __tablename__ = 'runs' + + run_uuid = Column(String(32), nullable=False) + """ + Run UUID: `String` (limit 32 characters). *Primary Key* for ``runs`` table. + """ + name = Column(String(250)) + """ + Run name: `String` (limit 250 characters). + """ + source_type = Column(String(20), default="LOCAL") + """ + Source Type: `String` (limit 20 characters). Can be one of ``NOTEBOOK``, ``JOB``, ``PROJECT``, + ``LOCAL`` (default), or ``UNKNOWN``. + """ + source_name = Column(String(500)) + """ + Name of source recording the run: `String` (limit 500 characters). + """ + entry_point_name = Column(String(50)) + """ + Entry-point name that launched the run run: `String` (limit 50 characters). + """ + user_id = Column(String(256), nullable=True, default=None) + """ + User ID: `String` (limit 256 characters). Defaults to ``null``. + """ + status = Column(String(20), default="SCHEDULED") + """ + Run Status: `String` (limit 20 characters). Can be one of ``RUNNING``, ``SCHEDULED`` (default), + ``FINISHED``, ``FAILED``. + """ + start_time = Column(BigInteger, default=int(time.time())) + """ + Run start time: `BigInteger`. Defaults to current system time. + """ + end_time = Column(BigInteger, nullable=True, default=None) + """ + Run end time: `BigInteger`. + """ + source_version = Column(String(50)) + """ + Source version: `String` (limit 50 characters). + """ + lifecycle_stage = Column(String(20), default="active") + """ + Lifecycle Stage of run: `String` (limit 32 characters). + Can be either ``active`` (default) or ``deleted``. + """ + artifact_uri = Column(String(200), default=None) + """ + Default artifact location for this run: `String` (limit 200 characters). + """ + experiment_id = Column(Integer, ForeignKey('experiments.experiment_id')) + """ + Experiment ID to which this run belongs to: *Foreign Key* into ``experiment`` table. + """ + experiment = relationship('SqlExperiment', backref=backref('runs', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlExperiment`. + """ + + __table_args__ = ( + CheckConstraint(source_type.in_(SourceTypes), name='source_type'), + CheckConstraint(status.in_(RunStatusTypes), name='status'), + CheckConstraint(lifecycle_stage.in_(["active", "deleted"]), + name='runs_lifecycle_stage'), + PrimaryKeyConstraint('run_uuid', name='run_pk') + ) + + +class SqlTag(Base): + """ + DB model for :py:class:`mlflow.entities.RunTag`. These are recorded in ``tags`` table. + """ + __tablename__ = 'tags' + + key = Column(String(250)) + """ + Tag key: `String` (limit 250 characters). *Primary Key* for ``tags`` table. + """ + value = Column(String(250), nullable=True) + """ + Value associated with tag: `String` (limit 250 characters). Could be *null*. + """ + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + """ + Run UUID to which this tag belongs to: *Foreign Key* into ``runs`` table. + """ + run = relationship('SqlRun', backref=backref('tags', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`. + """ + + __table_args__ = ( + PrimaryKeyConstraint('key', 'run_uuid', name='tag_pk'), + ) + + def __repr__(self): + return ''.format(self.key, self.value) + + +class SqlMetric(Base): + __tablename__ = 'metrics' + + key = Column(String(250)) + """ + Metric key: `String` (limit 250 characters). Part of *Primary Key* for ``metrics`` table. + """ + value = Column(Float, nullable=False) + """ + Metric value: `Float`. Defined as *Non-null* in schema. + """ + timestamp = Column(BigInteger, default=lambda: int(time.time())) + """ + Timestamp recorded for this metric entry: `BigInteger`. Part of *Primary Key* for + ``metrics`` table. + """ + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + """ + Run UUID to which this metric belongs to: Part of *Primary Key* for ``metrics`` table. + *Foreign Key* into ``runs`` table. + """ + run = relationship('SqlRun', backref=backref('metrics', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`. + """ + + __table_args__ = ( + PrimaryKeyConstraint('key', 'timestamp', 'run_uuid', name='metric_pk'), + ) + + def __repr__(self): + return ''.format(self.key, self.value, self.timestamp) + + +class SqlParam(Base): + __tablename__ = 'params' + + key = Column(String(250)) + """ + Param key: `String` (limit 250 characters). Part of *Primary Key* for ``params`` table. + """ + value = Column(String(250), nullable=False) + """ + Param value: `String` (limit 250 characters). Defined as *Non-null* in schema. + """ + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + """ + Run UUID to which this metric belongs to: Part of *Primary Key* for ``params`` table. + *Foreign Key* into ``runs`` table. + """ + run = relationship('SqlRun', backref=backref('params', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`. + """ + + __table_args__ = ( + PrimaryKeyConstraint('key', 'run_uuid', name='param_pk'), + ) + + def __repr__(self): + return ''.format(self.key, self.value) diff --git a/mlflow/store/dbmodels/models.py b/mlflow/store/dbmodels/models.py new file mode 100644 index 0000000000000..ef75fe2ac51a3 --- /dev/null +++ b/mlflow/store/dbmodels/models.py @@ -0,0 +1,317 @@ +import time +from sqlalchemy.orm import relationship, backref +from sqlalchemy import ( + Column, String, Float, ForeignKey, Integer, CheckConstraint, + BigInteger, PrimaryKeyConstraint) +from sqlalchemy.ext.declarative import declarative_base +from mlflow.entities import ( + Experiment, RunTag, Metric, Param, RunData, RunInfo, + SourceType, RunStatus, Run, ViewType) +from mlflow.entities.lifecycle_stage import LifecycleStage + +Base = declarative_base() + + +SourceTypes = [ + SourceType.to_string(SourceType.NOTEBOOK), + SourceType.to_string(SourceType.JOB), + SourceType.to_string(SourceType.LOCAL), + SourceType.to_string(SourceType.UNKNOWN), + SourceType.to_string(SourceType.PROJECT) +] + +RunStatusTypes = [ + RunStatus.to_string(RunStatus.SCHEDULED), + RunStatus.to_string(RunStatus.FAILED), + RunStatus.to_string(RunStatus.FINISHED), + RunStatus.to_string(RunStatus.RUNNING) +] + + +class SqlExperiment(Base): + """ + DB model for :py:class:`mlflow.entities.Experiment`. These are recorded in ``experiment`` table. + """ + __tablename__ = 'experiments' + + experiment_id = Column(Integer, autoincrement=True) + """ + Experiment ID: `Integer`. *Primary Key* for ``experiment`` table. + """ + name = Column(String(256), unique=True, nullable=False) + """ + Experiment name: `String` (limit 256 characters). Defined as *Unique* and *Non null* in + table schema. + """ + artifact_location = Column(String(256), nullable=True) + """ + Default artifact location for this experiment: `String` (limit 256 characters). Defined as + *Non null* in table schema. + """ + lifecycle_stage = Column(String(32), default=LifecycleStage.ACTIVE) + """ + Lifecycle Stage of experiment: `String` (limit 32 characters). + Can be either ``active`` (default) or ``deleted``. + """ + + __table_args__ = ( + CheckConstraint( + lifecycle_stage.in_(LifecycleStage.view_type_to_stages(ViewType.ALL)), + name='experiments_lifecycle_stage'), + PrimaryKeyConstraint('experiment_id', name='experiment_pk') + ) + + def __repr__(self): + return ''.format(self.experiment_id, self.name) + + def to_mlflow_entity(self): + """ + Convert DB model to corresponding MLflow entity. + + :return: :py:class:`mlflow.entities.Experiment`. + """ + return Experiment( + experiment_id=str(self.experiment_id), + name=self.name, + artifact_location=self.artifact_location, + lifecycle_stage=self.lifecycle_stage) + + +class SqlRun(Base): + """ + DB model for :py:class:`mlflow.entities.Run`. These are recorded in ``runs`` table. + """ + __tablename__ = 'runs' + + run_uuid = Column(String(32), nullable=False) + """ + Run UUID: `String` (limit 32 characters). *Primary Key* for ``runs`` table. + """ + name = Column(String(250)) + """ + Run name: `String` (limit 250 characters). + """ + source_type = Column(String(20), default=SourceType.to_string(SourceType.LOCAL)) + """ + Source Type: `String` (limit 20 characters). Can be one of ``NOTEBOOK``, ``JOB``, ``PROJECT``, + ``LOCAL`` (default), or ``UNKNOWN``. + """ + source_name = Column(String(500)) + """ + Name of source recording the run: `String` (limit 500 characters). + """ + entry_point_name = Column(String(50)) + """ + Entry-point name that launched the run run: `String` (limit 50 characters). + """ + user_id = Column(String(256), nullable=True, default=None) + """ + User ID: `String` (limit 256 characters). Defaults to ``null``. + """ + status = Column(String(20), default=RunStatus.to_string(RunStatus.SCHEDULED)) + """ + Run Status: `String` (limit 20 characters). Can be one of ``RUNNING``, ``SCHEDULED`` (default), + ``FINISHED``, ``FAILED``. + """ + start_time = Column(BigInteger, default=int(time.time())) + """ + Run start time: `BigInteger`. Defaults to current system time. + """ + end_time = Column(BigInteger, nullable=True, default=None) + """ + Run end time: `BigInteger`. + """ + source_version = Column(String(50)) + """ + Source version: `String` (limit 50 characters). + """ + lifecycle_stage = Column(String(20), default=LifecycleStage.ACTIVE) + """ + Lifecycle Stage of run: `String` (limit 32 characters). + Can be either ``active`` (default) or ``deleted``. + """ + artifact_uri = Column(String(200), default=None) + """ + Default artifact location for this run: `String` (limit 200 characters). + """ + experiment_id = Column(Integer, ForeignKey('experiments.experiment_id')) + """ + Experiment ID to which this run belongs to: *Foreign Key* into ``experiment`` table. + """ + experiment = relationship('SqlExperiment', backref=backref('runs', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlExperiment`. + """ + + __table_args__ = ( + CheckConstraint(source_type.in_(SourceTypes), name='source_type'), + CheckConstraint(status.in_(RunStatusTypes), name='status'), + CheckConstraint(lifecycle_stage.in_(LifecycleStage.view_type_to_stages(ViewType.ALL)), + name='runs_lifecycle_stage'), + PrimaryKeyConstraint('run_uuid', name='run_pk') + ) + + def to_mlflow_entity(self): + """ + Convert DB model to corresponding MLflow entity. + + :return: :py:class:`mlflow.entities.Run`. + """ + run_info = RunInfo( + run_uuid=self.run_uuid, + run_id=self.run_uuid, + experiment_id=str(self.experiment_id), + user_id=self.user_id, + status=self.status, + start_time=self.start_time, + end_time=self.end_time, + lifecycle_stage=self.lifecycle_stage, + artifact_uri=self.artifact_uri) + + # only get latest recorded metrics per key + all_metrics = [m.to_mlflow_entity() for m in self.metrics] + metrics = {} + for m in all_metrics: + existing_metric = metrics.get(m.key) + if (existing_metric is None)\ + or ((m.step, m.timestamp, m.value) >= + (existing_metric.step, existing_metric.timestamp, + existing_metric.value)): + metrics[m.key] = m + + run_data = RunData( + metrics=list(metrics.values()), + params=[p.to_mlflow_entity() for p in self.params], + tags=[t.to_mlflow_entity() for t in self.tags]) + + return Run(run_info=run_info, run_data=run_data) + + +class SqlTag(Base): + """ + DB model for :py:class:`mlflow.entities.RunTag`. These are recorded in ``tags`` table. + """ + __tablename__ = 'tags' + + key = Column(String(250)) + """ + Tag key: `String` (limit 250 characters). *Primary Key* for ``tags`` table. + """ + value = Column(String(250), nullable=True) + """ + Value associated with tag: `String` (limit 250 characters). Could be *null*. + """ + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + """ + Run UUID to which this tag belongs to: *Foreign Key* into ``runs`` table. + """ + run = relationship('SqlRun', backref=backref('tags', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`. + """ + + __table_args__ = ( + PrimaryKeyConstraint('key', 'run_uuid', name='tag_pk'), + ) + + def __repr__(self): + return ''.format(self.key, self.value) + + def to_mlflow_entity(self): + """ + Convert DB model to corresponding MLflow entity. + + :return: :py:class:`mlflow.entities.RunTag`. + """ + return RunTag( + key=self.key, + value=self.value) + + +class SqlMetric(Base): + __tablename__ = 'metrics' + + key = Column(String(250)) + """ + Metric key: `String` (limit 250 characters). Part of *Primary Key* for ``metrics`` table. + """ + value = Column(Float, nullable=False) + """ + Metric value: `Float`. Defined as *Non-null* in schema. + """ + timestamp = Column(BigInteger, default=lambda: int(time.time())) + """ + Timestamp recorded for this metric entry: `BigInteger`. Part of *Primary Key* for + ``metrics`` table. + """ + step = Column(BigInteger, default=0, nullable=False) + """ + Step recorded for this metric entry: `BigInteger`. + """ + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + """ + Run UUID to which this metric belongs to: Part of *Primary Key* for ``metrics`` table. + *Foreign Key* into ``runs`` table. + """ + run = relationship('SqlRun', backref=backref('metrics', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`. + """ + + __table_args__ = ( + PrimaryKeyConstraint('key', 'timestamp', 'step', 'run_uuid', 'value', name='metric_pk'), + ) + + def __repr__(self): + return ''.format(self.key, self.value, self.timestamp, self.step) + + def to_mlflow_entity(self): + """ + Convert DB model to corresponding MLflow entity. + + :return: :py:class:`mlflow.entities.Metric`. + """ + return Metric( + key=self.key, + value=self.value, + timestamp=self.timestamp, + step=self.step) + + +class SqlParam(Base): + __tablename__ = 'params' + + key = Column(String(250)) + """ + Param key: `String` (limit 250 characters). Part of *Primary Key* for ``params`` table. + """ + value = Column(String(250), nullable=False) + """ + Param value: `String` (limit 250 characters). Defined as *Non-null* in schema. + """ + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + """ + Run UUID to which this metric belongs to: Part of *Primary Key* for ``params`` table. + *Foreign Key* into ``runs`` table. + """ + run = relationship('SqlRun', backref=backref('params', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`. + """ + + __table_args__ = ( + PrimaryKeyConstraint('key', 'run_uuid', name='param_pk'), + ) + + def __repr__(self): + return ''.format(self.key, self.value) + + def to_mlflow_entity(self): + """ + Convert DB model to corresponding MLflow entity. + + :return: :py:class:`mlflow.entities.Param`. + """ + return Param( + key=self.key, + value=self.value) diff --git a/mlflow/store/file_store.py b/mlflow/store/file_store.py index 492a91bf193d2..6b843df9e2916 100644 --- a/mlflow/store/file_store.py +++ b/mlflow/store/file_store.py @@ -1,32 +1,45 @@ +import logging import os +import posixpath +import sys import uuid import six from mlflow.entities import Experiment, Metric, Param, Run, RunData, RunInfo, RunStatus, RunTag, \ - ViewType -from mlflow.entities.run_info import check_run_is_active, \ - check_run_is_deleted -from mlflow.exceptions import MlflowException + ViewType, SourceType +from mlflow.entities.lifecycle_stage import LifecycleStage +from mlflow.entities.run_info import check_run_is_active, check_run_is_deleted +from mlflow.exceptions import MlflowException, MissingConfigException import mlflow.protos.databricks_pb2 as databricks_pb2 +from mlflow.protos.databricks_pb2 import INTERNAL_ERROR, RESOURCE_DOES_NOT_EXIST +from mlflow.store import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH, SEARCH_MAX_RESULTS_THRESHOLD from mlflow.store.abstract_store import AbstractStore from mlflow.utils.validation import _validate_metric_name, _validate_param_name, _validate_run_id, \ - _validate_tag_name - + _validate_tag_name, _validate_experiment_id, \ + _validate_batch_log_limits, _validate_batch_log_data from mlflow.utils.env import get_env from mlflow.utils.file_utils import (is_directory, list_subdirs, mkdir, exists, write_yaml, - read_yaml, find, read_file_lines, read_file, build_path, + read_yaml, find, read_file_lines, read_file, write_to, append_to, make_containing_dirs, mv, get_parent_dir, - list_all) -from mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME, MLFLOW_PARENT_RUN_ID - -from mlflow.utils.search_utils import does_run_match_clause + list_all, local_file_uri_to_path, path_to_local_file_uri) +from mlflow.utils.search_utils import SearchUtils _TRACKING_DIR_ENV_VAR = "MLFLOW_TRACKING_DIR" def _default_root_dir(): - return get_env(_TRACKING_DIR_ENV_VAR) or os.path.abspath("mlruns") + return get_env(_TRACKING_DIR_ENV_VAR) or os.path.abspath(DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH) + + +def _read_persisted_experiment_dict(experiment_dict): + dict_copy = experiment_dict.copy() + + # 'experiment_id' was changed from int to string, so we must cast to string + # when reading legacy experiments + if isinstance(dict_copy['experiment_id'], int): + dict_copy['experiment_id'] = str(dict_copy['experiment_id']) + return Experiment.from_dictionary(dict_copy) def _make_persisted_run_info_dict(run_info): @@ -34,13 +47,33 @@ def _make_persisted_run_info_dict(run_info): # old mlflow versions to read run_info_dict = dict(run_info) run_info_dict['tags'] = [] + run_info_dict['name'] = '' + if 'status' in run_info_dict: + # 'status' is stored as an integer enum in meta file, but RunInfo.status field is a string. + # Convert from string to enum/int before storing. + run_info_dict['status'] = RunStatus.from_string(run_info.status) + else: + run_info_dict['status'] = RunStatus.RUNNING + run_info_dict['source_type'] = SourceType.LOCAL + run_info_dict['source_name'] = '' + run_info_dict['entry_point_name'] = '' + run_info_dict['source_version'] = '' return run_info_dict def _read_persisted_run_info_dict(run_info_dict): dict_copy = run_info_dict.copy() if 'lifecycle_stage' not in dict_copy: - dict_copy['lifecycle_stage'] = RunInfo.ACTIVE_LIFECYCLE + dict_copy['lifecycle_stage'] = LifecycleStage.ACTIVE + # 'status' is stored as an integer enum in meta file, but RunInfo.status field is a string. + # converting to string before hydrating RunInfo. + # If 'status' value not recorded in files, mark it as 'RUNNING' (default) + dict_copy['status'] = RunStatus.to_string(run_info_dict.get('status', RunStatus.RUNNING)) + + # 'experiment_id' was changed from int to string, so we must cast to string + # when reading legacy run_infos + if isinstance(dict_copy["experiment_id"], int): + dict_copy["experiment_id"] = str(dict_copy["experiment_id"]) return RunInfo.from_dictionary(dict_copy) @@ -51,21 +84,21 @@ class FileStore(AbstractStore): PARAMS_FOLDER_NAME = "params" TAGS_FOLDER_NAME = "tags" META_DATA_FILE_NAME = "meta.yaml" + DEFAULT_EXPERIMENT_ID = "0" def __init__(self, root_directory=None, artifact_root_uri=None): """ Create a new FileStore with the given root directory and a given default artifact root URI. """ super(FileStore, self).__init__() - self.root_directory = root_directory or _default_root_dir() - self.artifact_root_uri = artifact_root_uri or self.root_directory - self.trash_folder = build_path(self.root_directory, FileStore.TRASH_FOLDER_NAME) + self.root_directory = local_file_uri_to_path(root_directory or _default_root_dir()) + self.artifact_root_uri = artifact_root_uri or path_to_local_file_uri(self.root_directory) + self.trash_folder = os.path.join(self.root_directory, FileStore.TRASH_FOLDER_NAME) # Create root directory if needed if not exists(self.root_directory): mkdir(self.root_directory) - print("here") - self._create_experiment_with_id(name="Default", - experiment_id=Experiment.DEFAULT_EXPERIMENT_ID, + self._create_experiment_with_id(name=Experiment.DEFAULT_EXPERIMENT_NAME, + experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, artifact_uri=None) # Create trash folder if needed if not exists(self.trash_folder): @@ -87,7 +120,7 @@ def _get_experiment_path(self, experiment_id, view_type=ViewType.ALL, assert_exi if view_type == ViewType.DELETED_ONLY or view_type == ViewType.ALL: parents.append(self.trash_folder) for parent in parents: - exp_list = find(parent, str(experiment_id), full_path=True) + exp_list = find(parent, experiment_id, full_path=True) if len(exp_list) > 0: return exp_list[0] if assert_exists: @@ -97,31 +130,36 @@ def _get_experiment_path(self, experiment_id, view_type=ViewType.ALL, assert_exi def _get_run_dir(self, experiment_id, run_uuid): _validate_run_id(run_uuid) - return build_path(self._get_experiment_path(experiment_id, assert_exists=True), run_uuid) + if not self._has_experiment(experiment_id): + return None + return os.path.join(self._get_experiment_path(experiment_id, assert_exists=True), + run_uuid) def _get_metric_path(self, experiment_id, run_uuid, metric_key): _validate_run_id(run_uuid) _validate_metric_name(metric_key) - return build_path(self._get_run_dir(experiment_id, run_uuid), FileStore.METRICS_FOLDER_NAME, - metric_key) + return os.path.join(self._get_run_dir(experiment_id, run_uuid), + FileStore.METRICS_FOLDER_NAME, + metric_key) def _get_param_path(self, experiment_id, run_uuid, param_name): _validate_run_id(run_uuid) _validate_param_name(param_name) - return build_path(self._get_run_dir(experiment_id, run_uuid), FileStore.PARAMS_FOLDER_NAME, - param_name) + return os.path.join(self._get_run_dir(experiment_id, run_uuid), + FileStore.PARAMS_FOLDER_NAME, + param_name) def _get_tag_path(self, experiment_id, run_uuid, tag_name): _validate_run_id(run_uuid) _validate_tag_name(tag_name) - return build_path(self._get_run_dir(experiment_id, run_uuid), FileStore.TAGS_FOLDER_NAME, - tag_name) + return os.path.join(self._get_run_dir(experiment_id, run_uuid), FileStore.TAGS_FOLDER_NAME, + tag_name) def _get_artifact_dir(self, experiment_id, run_uuid): _validate_run_id(run_uuid) - artifacts_dir = build_path(self.get_experiment(experiment_id).artifact_location, - run_uuid, - FileStore.ARTIFACTS_FOLDER_NAME) + artifacts_dir = posixpath.join(self.get_experiment(experiment_id).artifact_location, + run_uuid, + FileStore.ARTIFACTS_FOLDER_NAME) return artifacts_dir def _get_active_experiments(self, full_path=False): @@ -138,13 +176,24 @@ def list_experiments(self, view_type=ViewType.ACTIVE_ONLY): rsl += self._get_active_experiments(full_path=False) if view_type == ViewType.DELETED_ONLY or view_type == ViewType.ALL: rsl += self._get_deleted_experiments(full_path=False) - return [self._get_experiment(exp_id, view_type) for exp_id in rsl] + experiments = [] + for exp_id in rsl: + try: + # trap and warn known issues, will raise unexpected exceptions to caller + experiment = self._get_experiment(exp_id, view_type) + if experiment: + experiments.append(experiment) + except MissingConfigException as rnfe: + # Trap malformed experiments and log warnings. + logging.warning("Malformed experiment '%s'. Detailed error %s", + str(exp_id), str(rnfe), exc_info=True) + return experiments def _create_experiment_with_id(self, name, experiment_id, artifact_uri): + artifact_uri = artifact_uri or posixpath.join(self.artifact_root_uri, str(experiment_id)) self._check_root_dir() meta_dir = mkdir(self.root_directory, str(experiment_id)) - artifact_uri = artifact_uri or build_path(self.artifact_root_uri, str(experiment_id)) - experiment = Experiment(experiment_id, name, artifact_uri, Experiment.ACTIVE_LIFECYCLE) + experiment = Experiment(experiment_id, name, artifact_uri, LifecycleStage.ACTIVE) write_yaml(meta_dir, FileStore.META_DATA_FILE_NAME, dict(experiment)) return experiment_id @@ -155,45 +204,59 @@ def create_experiment(self, name, artifact_location=None): databricks_pb2.INVALID_PARAMETER_VALUE) experiment = self.get_experiment_by_name(name) if experiment is not None: - raise MlflowException("Experiment '%s' already exists." % experiment.name, - databricks_pb2.RESOURCE_ALREADY_EXISTS) + if experiment.lifecycle_stage == LifecycleStage.DELETED: + raise MlflowException( + "Experiment '%s' already exists in deleted state. " + "You can restore the experiment, or permanently delete the experiment " + "from the .trash folder (under tracking server's root folder) before " + "creating a new one with the same name." % experiment.name, + databricks_pb2.RESOURCE_ALREADY_EXISTS) + else: + raise MlflowException("Experiment '%s' already exists." % experiment.name, + databricks_pb2.RESOURCE_ALREADY_EXISTS) # Get all existing experiments and find the one with largest ID. # len(list_all(..)) would not work when experiments are deleted. - experiments_ids = [e.experiment_id for e in self.list_experiments(ViewType.ALL)] - experiment_id = max(experiments_ids) + 1 - return self._create_experiment_with_id(name, experiment_id, artifact_location) + experiments_ids = [int(e.experiment_id) for e in self.list_experiments(ViewType.ALL)] + experiment_id = max(experiments_ids) + 1 if experiments_ids else 0 + return self._create_experiment_with_id(name, str(experiment_id), artifact_location) def _has_experiment(self, experiment_id): return self._get_experiment_path(experiment_id) is not None def _get_experiment(self, experiment_id, view_type=ViewType.ALL): self._check_root_dir() + _validate_experiment_id(experiment_id) experiment_dir = self._get_experiment_path(experiment_id, view_type) if experiment_dir is None: raise MlflowException("Could not find experiment with ID %s" % experiment_id, databricks_pb2.RESOURCE_DOES_NOT_EXIST) meta = read_yaml(experiment_dir, FileStore.META_DATA_FILE_NAME) if experiment_dir.startswith(self.trash_folder): - meta['lifecycle_stage'] = Experiment.DELETED_LIFECYCLE + meta['lifecycle_stage'] = LifecycleStage.DELETED else: - meta['lifecycle_stage'] = Experiment.ACTIVE_LIFECYCLE - return Experiment.from_dictionary(meta) + meta['lifecycle_stage'] = LifecycleStage.ACTIVE + experiment = _read_persisted_experiment_dict(meta) + if experiment_id != experiment.experiment_id: + logging.warning("Experiment ID mismatch for exp %s. ID recorded as '%s' in meta data. " + "Experiment will be ignored.", + experiment_id, experiment.experiment_id, exc_info=True) + return None + return experiment def get_experiment(self, experiment_id): """ - Fetches the experiment. This will search for active as well as deleted experiments. + Fetch the experiment. + Note: This API will search for active as well as deleted experiments. :param experiment_id: Integer id for the experiment :return: A single Experiment object if it exists, otherwise raises an Exception. """ - return self._get_experiment(experiment_id) - - def get_experiment_by_name(self, name): - self._check_root_dir() - for experiment in self.list_experiments(ViewType.ALL): - if experiment.name == name: - return experiment - return None + experiment_id = FileStore.DEFAULT_EXPERIMENT_ID if experiment_id is None else experiment_id + experiment = self._get_experiment(experiment_id) + if experiment is None: + raise MlflowException("Experiment '%s' does not exist." % experiment_id, + databricks_pb2.RESOURCE_DOES_NOT_EXIST) + return experiment def delete_experiment(self, experiment_id): experiment_dir = self._get_experiment_path(experiment_id, ViewType.ACTIVE_ONLY) @@ -210,30 +273,40 @@ def restore_experiment(self, experiment_id): conflict_experiment = self._get_experiment_path(experiment_id, ViewType.ACTIVE_ONLY) if conflict_experiment is not None: raise MlflowException( - "Cannot restore eperiment with ID %d. " - "An experiment with same ID already exists." % experiment_id, - databricks_pb2.RESOURCE_ALREADY_EXISTS) + "Cannot restore eperiment with ID %d. " + "An experiment with same ID already exists." % experiment_id, + databricks_pb2.RESOURCE_ALREADY_EXISTS) mv(experiment_dir, self.root_directory) def rename_experiment(self, experiment_id, new_name): - meta_dir = os.path.join(self.root_directory, str(experiment_id)) + meta_dir = os.path.join(self.root_directory, experiment_id) + # if experiment is malformed, will raise error experiment = self._get_experiment(experiment_id) + if experiment is None: + raise MlflowException("Experiment '%s' does not exist." % experiment_id, + databricks_pb2.RESOURCE_DOES_NOT_EXIST) experiment._set_name(new_name) - if experiment.lifecycle_stage != Experiment.ACTIVE_LIFECYCLE: + if experiment.lifecycle_stage != LifecycleStage.ACTIVE: raise Exception("Cannot rename experiment in non-active lifecycle stage." " Current stage: %s" % experiment.lifecycle_stage) write_yaml(meta_dir, FileStore.META_DATA_FILE_NAME, dict(experiment), overwrite=True) def delete_run(self, run_id): run_info = self._get_run_info(run_id) + if run_info is None: + raise MlflowException("Run '%s' metadata is in invalid state." % run_id, + databricks_pb2.INVALID_STATE) check_run_is_active(run_info) - new_info = run_info._copy_with_overrides(lifecycle_stage=RunInfo.DELETED_LIFECYCLE) + new_info = run_info._copy_with_overrides(lifecycle_stage=LifecycleStage.DELETED) self._overwrite_run_info(new_info) def restore_run(self, run_id): run_info = self._get_run_info(run_id) + if run_info is None: + raise MlflowException("Run '%s' metadata is in invalid state." % run_id, + databricks_pb2.INVALID_STATE) check_run_is_deleted(run_info) - new_info = run_info._copy_with_overrides(lifecycle_stage=RunInfo.ACTIVE_LIFECYCLE) + new_info = run_info._copy_with_overrides(lifecycle_stage=LifecycleStage.ACTIVE) self._overwrite_run_info(new_info) def _find_experiment_folder(self, run_path): @@ -253,87 +326,90 @@ def _find_run_root(self, run_uuid): runs = find(experiment_dir, run_uuid, full_path=True) if len(runs) == 0: continue - return runs[0] - return None + return os.path.basename(os.path.abspath(experiment_dir)), runs[0] + return None, None - def update_run_info(self, run_uuid, run_status, end_time): - _validate_run_id(run_uuid) - run_info = self.get_run(run_uuid).info + def update_run_info(self, run_id, run_status, end_time): + _validate_run_id(run_id) + run_info = self.get_run(run_id).info check_run_is_active(run_info) new_info = run_info._copy_with_overrides(run_status, end_time) self._overwrite_run_info(new_info) return new_info - def create_run(self, experiment_id, user_id, run_name, source_type, - source_name, entry_point_name, start_time, source_version, tags, parent_run_id): + def create_run(self, experiment_id, user_id, start_time, tags): """ Creates a run with the specified attributes. """ + experiment_id = FileStore.DEFAULT_EXPERIMENT_ID if experiment_id is None else experiment_id experiment = self.get_experiment(experiment_id) if experiment is None: raise MlflowException( - "Could not create run under experiment with ID %s - no such experiment " - "exists." % experiment_id, - databricks_pb2.RESOURCE_DOES_NOT_EXIST) - if experiment.lifecycle_stage != Experiment.ACTIVE_LIFECYCLE: + "Could not create run under experiment with ID %s - no such experiment " + "exists." % experiment_id, + databricks_pb2.RESOURCE_DOES_NOT_EXIST) + if experiment.lifecycle_stage != LifecycleStage.ACTIVE: raise MlflowException( - "Could not create run under non-active experiment with ID " - "%s." % experiment_id, - databricks_pb2.INVALID_STATE) + "Could not create run under non-active experiment with ID " + "%s." % experiment_id, + databricks_pb2.INVALID_STATE) run_uuid = uuid.uuid4().hex artifact_uri = self._get_artifact_dir(experiment_id, run_uuid) - run_info = RunInfo(run_uuid=run_uuid, experiment_id=experiment_id, - name="", - artifact_uri=artifact_uri, source_type=source_type, - source_name=source_name, - entry_point_name=entry_point_name, user_id=user_id, - status=RunStatus.RUNNING, start_time=start_time, end_time=None, - source_version=source_version, lifecycle_stage=RunInfo.ACTIVE_LIFECYCLE) + run_info = RunInfo(run_uuid=run_uuid, run_id=run_uuid, experiment_id=experiment_id, + artifact_uri=artifact_uri, user_id=user_id, + status=RunStatus.to_string(RunStatus.RUNNING), + start_time=start_time, end_time=None, + lifecycle_stage=LifecycleStage.ACTIVE) # Persist run metadata and create directories for logging metrics, parameters, artifacts - run_dir = self._get_run_dir(run_info.experiment_id, run_info.run_uuid) + run_dir = self._get_run_dir(run_info.experiment_id, run_info.run_id) mkdir(run_dir) - write_yaml(run_dir, FileStore.META_DATA_FILE_NAME, _make_persisted_run_info_dict(run_info)) + run_info_dict = _make_persisted_run_info_dict(run_info) + write_yaml(run_dir, FileStore.META_DATA_FILE_NAME, run_info_dict) mkdir(run_dir, FileStore.METRICS_FOLDER_NAME) mkdir(run_dir, FileStore.PARAMS_FOLDER_NAME) mkdir(run_dir, FileStore.ARTIFACTS_FOLDER_NAME) for tag in tags: self.set_tag(run_uuid, tag) - if parent_run_id: - self.set_tag(run_uuid, RunTag(key=MLFLOW_PARENT_RUN_ID, value=parent_run_id)) - if run_name: - self.set_tag(run_uuid, RunTag(key=MLFLOW_RUN_NAME, value=run_name)) - return Run(run_info=run_info, run_data=None) - - def _make_experiment_dict(self, experiment): - # Don't persist lifecycle_stage since it's inferred from the ".trash" folder. - experiment_dict = dict(experiment) - del experiment_dict['lifecycle_stage'] - return experiment_dict - - def get_run(self, run_uuid): + return self.get_run(run_id=run_uuid) + + def get_run(self, run_id): """ - Will get both active and deleted runs. + Note: Will get both active and deleted runs. """ - _validate_run_id(run_uuid) - run_info = self._get_run_info(run_uuid) - metrics = self.get_all_metrics(run_uuid) - params = self.get_all_params(run_uuid) - tags = self.get_all_tags(run_uuid) + _validate_run_id(run_id) + run_info = self._get_run_info(run_id) + if run_info is None: + raise MlflowException("Run '%s' metadata is in invalid state." % run_id, + databricks_pb2.INVALID_STATE) + metrics = self.get_all_metrics(run_id) + params = self.get_all_params(run_id) + tags = self.get_all_tags(run_id) return Run(run_info, RunData(metrics, params, tags)) def _get_run_info(self, run_uuid): """ - Will get both active and deleted runs. + Note: Will get both active and deleted runs. """ - run_dir = self._find_run_root(run_uuid) - if run_dir is not None: - meta = read_yaml(run_dir, FileStore.META_DATA_FILE_NAME) - return _read_persisted_run_info_dict(meta) - raise MlflowException("Run '%s' not found" % run_uuid, - databricks_pb2.RESOURCE_DOES_NOT_EXIST) + exp_id, run_dir = self._find_run_root(run_uuid) + if run_dir is None: + raise MlflowException("Run '%s' not found" % run_uuid, + databricks_pb2.RESOURCE_DOES_NOT_EXIST) + + meta = read_yaml(run_dir, FileStore.META_DATA_FILE_NAME) + run_info = _read_persisted_run_info_dict(meta) + if run_info.experiment_id != exp_id: + logging.warning("Wrong experiment ID (%s) recorded for run '%s'. It should be %s. " + "Run will be ignored.", str(run_info.experiment_id), + str(run_info.run_id), str(exp_id), exc_info=True) + return None + return run_info def _get_run_files(self, run_uuid, resource_type): _validate_run_id(run_uuid) + run_info = self._get_run_info(run_uuid) + if run_info is None: + raise MlflowException("Run '%s' metadata is in invalid state." % run_uuid, + databricks_pb2.INVALID_STATE) if resource_type == "metric": subfolder_name = FileStore.METRICS_FOLDER_NAME elif resource_type == "param": @@ -342,10 +418,8 @@ def _get_run_files(self, run_uuid, resource_type): subfolder_name = FileStore.TAGS_FOLDER_NAME else: raise Exception("Looking for unknown resource under run.") - run_dir = self._find_run_root(run_uuid) - if run_dir is None: - raise MlflowException("Run '%s' not found" % run_uuid, - databricks_pb2.RESOURCE_DOES_NOT_EXIST) + _, run_dir = self._find_run_root(run_uuid) + # run_dir exists since run validity has been confirmed above. source_dirs = find(run_dir, subfolder_name, full_path=True) if len(source_dirs) == 0: return run_dir, [] @@ -354,26 +428,27 @@ def _get_run_files(self, run_uuid, resource_type): for name in files: abspath = os.path.join(root, name) file_names.append(os.path.relpath(abspath, source_dirs[0])) + if sys.platform == "win32": + # Turn metric relative path into metric name. + # Metrics can have '/' in the name. On windows, '/' is interpreted as a separator. + # When the metric is read back the path will use '\' for separator. + # We need to translate the path into posix path. + from mlflow.utils.file_utils import relative_path_to_artifact_path + file_names = [relative_path_to_artifact_path(x) for x in file_names] return source_dirs[0], file_names @staticmethod def _get_metric_from_file(parent_path, metric_name): _validate_metric_name(metric_name) - metric_data = read_file_lines(parent_path, metric_name) - if len(metric_data) == 0: - raise Exception("Metric '%s' is malformed. No data found." % metric_name) - last_line = metric_data[-1] - timestamp, val = last_line.strip().split(" ") - return Metric(metric_name, float(val), int(timestamp)) - - def get_metric(self, run_uuid, metric_key): - _validate_run_id(run_uuid) - _validate_metric_name(metric_key) - parent_path, metric_files = self._get_run_files(run_uuid, "metric") - if metric_key not in metric_files: - raise MlflowException("Metric '%s' not found under run '%s'" % (metric_key, run_uuid), - databricks_pb2.RESOURCE_DOES_NOT_EXIST) - return self._get_metric_from_file(parent_path, metric_key) + metric_objs = [FileStore._get_metric_from_line(metric_name, line) + for line in read_file_lines(parent_path, metric_name)] + if len(metric_objs) == 0: + raise ValueError("Metric '%s' is malformed. No data found." % metric_name) + # Python performs element-wise comparison of equal-length tuples, ordering them + # based on their first differing element. Therefore, we use max() operator to find the + # largest value at the largest timestamp. For more information, see + # https://docs.python.org/3/reference/expressions.html#value-comparisons + return max(metric_objs, key=lambda m: (m.step, m.timestamp, m.value)) def get_all_metrics(self, run_uuid): _validate_run_id(run_uuid) @@ -383,30 +458,39 @@ def get_all_metrics(self, run_uuid): metrics.append(self._get_metric_from_file(parent_path, metric_file)) return metrics - def get_metric_history(self, run_uuid, metric_key): - _validate_run_id(run_uuid) + @staticmethod + def _get_metric_from_line(metric_name, metric_line): + metric_parts = metric_line.strip().split(" ") + if len(metric_parts) != 2 and len(metric_parts) != 3: + raise MlflowException("Metric '%s' is malformed; persisted metric data contained %s " + "fields. Expected 2 or 3 fields." % + (metric_name, len(metric_parts)), databricks_pb2.INTERNAL_ERROR) + ts = int(metric_parts[0]) + val = float(metric_parts[1]) + step = int(metric_parts[2]) if len(metric_parts) == 3 else 0 + return Metric(key=metric_name, value=val, timestamp=ts, step=step) + + def get_metric_history(self, run_id, metric_key): + _validate_run_id(run_id) _validate_metric_name(metric_key) - parent_path, metric_files = self._get_run_files(run_uuid, "metric") + parent_path, metric_files = self._get_run_files(run_id, "metric") if metric_key not in metric_files: - raise MlflowException("Metric '%s' not found under run '%s'" % (metric_key, run_uuid), + raise MlflowException("Metric '%s' not found under run '%s'" % (metric_key, run_id), databricks_pb2.RESOURCE_DOES_NOT_EXIST) - metric_data = read_file_lines(parent_path, metric_key) - rsl = [] - for pair in metric_data: - ts, val = pair.strip().split(" ") - rsl.append(Metric(metric_key, float(val), int(ts))) - return rsl + return [FileStore._get_metric_from_line(metric_key, line) + for line in read_file_lines(parent_path, metric_key)] @staticmethod def _get_param_from_file(parent_path, param_name): _validate_param_name(param_name) param_data = read_file_lines(parent_path, param_name) - if len(param_data) == 0: - raise Exception("Param '%s' is malformed. No data found." % param_name) if len(param_data) > 1: raise Exception("Unexpected data for param '%s'. Param recorded more than once" % param_name) - return Param(param_name, str(param_data[0].strip())) + # The only cause for param_data's length to be zero is the param's + # value is an empty string + value = '' if len(param_data) == 0 else str(param_data[0].strip()) + return Param(param_name, value) @staticmethod def _get_tag_from_file(parent_path, tag_name): @@ -414,15 +498,6 @@ def _get_tag_from_file(parent_path, tag_name): tag_data = read_file(parent_path, tag_name) return RunTag(tag_name, tag_data) - def get_param(self, run_uuid, param_name): - _validate_run_id(run_uuid) - _validate_param_name(param_name) - parent_path, param_files = self._get_run_files(run_uuid, "param") - if param_name not in param_files: - raise MlflowException("Param '%s' not found under run '%s'" % (param_name, run_uuid), - databricks_pb2.RESOURCE_DOES_NOT_EXIST) - return self._get_param_from_file(parent_path, param_name) - def get_all_params(self, run_uuid): parent_path, param_files = self._get_run_files(run_uuid, "param") params = [] @@ -437,46 +512,51 @@ def get_all_tags(self, run_uuid): tags.append(self._get_tag_from_file(parent_path, tag_file)) return tags - def _list_run_uuids(self, experiment_id, run_view_type): + def _list_run_infos(self, experiment_id, view_type): self._check_root_dir() + if not self._has_experiment(experiment_id): + return [] experiment_dir = self._get_experiment_path(experiment_id, assert_exists=True) run_uuids = list_all(experiment_dir, os.path.isdir, full_path=False) - if run_view_type == ViewType.ALL: - return run_uuids - elif run_view_type == ViewType.ACTIVE_ONLY: - return [r_id for r_id in run_uuids - if self._get_run_info(r_id).lifecycle_stage == RunInfo.ACTIVE_LIFECYCLE] - else: - return [r_id for r_id in run_uuids - if self._get_run_info(r_id).lifecycle_stage == RunInfo.DELETED_LIFECYCLE] - - def search_runs(self, experiment_ids, search_expressions, run_view_type): - run_uuids = [] - if len(search_expressions) == 0: - for experiment_id in experiment_ids: - run_uuids.extend(self._list_run_uuids(experiment_id, run_view_type)) - else: - for experiment_id in experiment_ids: - for run_uuid in self._list_run_uuids(experiment_id, run_view_type): - run = self.get_run(run_uuid) - if all([does_run_match_clause(run, s) for s in search_expressions]): - run_uuids.append(run_uuid) - return [self.get_run(run_uuid) for run_uuid in run_uuids] - - def list_run_infos(self, experiment_id, run_view_type): run_infos = [] - for run_uuid in self._list_run_uuids(experiment_id, run_view_type): - run_infos.append(self._get_run_info(run_uuid)) + for r_id in run_uuids: + try: + # trap and warn known issues, will raise unexpected exceptions to caller + run_info = self._get_run_info(r_id) + if run_info is None: + continue + if LifecycleStage.matches_view_type(view_type, run_info.lifecycle_stage): + run_infos.append(run_info) + except MissingConfigException as rnfe: + # trap malformed run exception and log warning + logging.warning("Malformed run '%s'. Detailed error %s", r_id, str(rnfe), + exc_info=True) return run_infos - def log_metric(self, run_uuid, metric): - _validate_run_id(run_uuid) + def _search_runs(self, experiment_ids, filter_string, run_view_type, max_results, order_by, + page_token): + if max_results > SEARCH_MAX_RESULTS_THRESHOLD: + raise MlflowException("Invalid value for request parameter max_results. It must be at " + "most {}, but got value {}".format(SEARCH_MAX_RESULTS_THRESHOLD, + max_results), + databricks_pb2.INVALID_PARAMETER_VALUE) + runs = [] + for experiment_id in experiment_ids: + run_infos = self._list_run_infos(experiment_id, run_view_type) + runs.extend(self.get_run(r.run_id) for r in run_infos) + filtered = SearchUtils.filter(runs, filter_string) + sorted_runs = SearchUtils.sort(filtered, order_by) + runs, next_page_token = SearchUtils.paginate(sorted_runs, page_token, max_results) + return runs, next_page_token + + def log_metric(self, run_id, metric): + _validate_run_id(run_id) _validate_metric_name(metric.key) - run = self.get_run(run_uuid) + run = self.get_run(run_id) check_run_is_active(run.info) - metric_path = self._get_metric_path(run.info.experiment_id, run_uuid, metric.key) + metric_path = self._get_metric_path(run.info.experiment_id, run_id, metric.key) make_containing_dirs(metric_path) - append_to(metric_path, "%s %s\n" % (metric.timestamp, metric.value)) + append_to(metric_path, "%s %s %s\n" % (metric.timestamp, metric.value, metric.step)) def _writeable_value(self, tag_value): if tag_value is None: @@ -486,26 +566,57 @@ def _writeable_value(self, tag_value): else: return "%s" % tag_value - def log_param(self, run_uuid, param): - _validate_run_id(run_uuid) + def log_param(self, run_id, param): + _validate_run_id(run_id) _validate_param_name(param.key) - run = self.get_run(run_uuid) + run = self.get_run(run_id) check_run_is_active(run.info) - param_path = self._get_param_path(run.info.experiment_id, run_uuid, param.key) + param_path = self._get_param_path(run.info.experiment_id, run_id, param.key) make_containing_dirs(param_path) write_to(param_path, self._writeable_value(param.value)) - def set_tag(self, run_uuid, tag): - _validate_run_id(run_uuid) + def set_tag(self, run_id, tag): + _validate_run_id(run_id) _validate_tag_name(tag.key) - run = self.get_run(run_uuid) + run = self.get_run(run_id) check_run_is_active(run.info) - tag_path = self._get_tag_path(run.info.experiment_id, run_uuid, tag.key) + tag_path = self._get_tag_path(run.info.experiment_id, run_id, tag.key) make_containing_dirs(tag_path) # Don't add trailing newline write_to(tag_path, self._writeable_value(tag.value)) + def delete_tag(self, run_id, key): + """ + Delete a tag from a run. This is irreversible. + :param run_id: String ID of the run + :param key: Name of the tag + """ + _validate_run_id(run_id) + run = self.get_run(run_id) + check_run_is_active(run.info) + if key not in run.data.tags.keys(): + raise MlflowException("No tag with name: {} in run with id {}".format(key, run_id), + error_code=RESOURCE_DOES_NOT_EXIST) + tag_path = self._get_tag_path(run.info.experiment_id, run_id, key) + os.remove(tag_path) + def _overwrite_run_info(self, run_info): - run_dir = self._get_run_dir(run_info.experiment_id, run_info.run_uuid) + run_dir = self._get_run_dir(run_info.experiment_id, run_info.run_id) run_info_dict = _make_persisted_run_info_dict(run_info) write_yaml(run_dir, FileStore.META_DATA_FILE_NAME, run_info_dict, overwrite=True) + + def log_batch(self, run_id, metrics, params, tags): + _validate_run_id(run_id) + _validate_batch_log_data(metrics, params, tags) + _validate_batch_log_limits(metrics, params, tags) + run = self.get_run(run_id) + check_run_is_active(run.info) + try: + for param in params: + self.log_param(run_id, param) + for metric in metrics: + self.log_metric(run_id, metric) + for tag in tags: + self.set_tag(run_id, tag) + except Exception as e: + raise MlflowException(e, INTERNAL_ERROR) diff --git a/mlflow/store/ftp_artifact_repo.py b/mlflow/store/ftp_artifact_repo.py new file mode 100644 index 0000000000000..139f7fd4d6e6d --- /dev/null +++ b/mlflow/store/ftp_artifact_repo.py @@ -0,0 +1,124 @@ +import os +import ftplib +from ftplib import FTP +from contextlib import contextmanager + +import posixpath +from six.moves import urllib + +from mlflow.entities.file_info import FileInfo +from mlflow.store.artifact_repo import ArtifactRepository +from mlflow.utils.file_utils import relative_path_to_artifact_path + + +class FTPArtifactRepository(ArtifactRepository): + """Stores artifacts as files in a remote directory, via ftp.""" + + def __init__(self, artifact_uri): + self.uri = artifact_uri + parsed = urllib.parse.urlparse(artifact_uri) + self.config = { + 'host': parsed.hostname, + 'port': 21 if parsed.port is None else parsed.port, + 'username': parsed.username, + 'password': parsed.password + } + self.path = parsed.path + + if self.config['host'] is None: + self.config['host'] = 'localhost' + + super(FTPArtifactRepository, self).__init__(artifact_uri) + + @contextmanager + def get_ftp_client(self): + ftp = FTP() + ftp.connect(self.config['host'], self.config['port']) + ftp.login(self.config['username'], self.config['password']) + yield ftp + ftp.close() + + @staticmethod + def _is_dir(ftp, full_file_path): + try: + ftp.cwd(full_file_path) + return True + except ftplib.error_perm: + return False + + @staticmethod + def _mkdir(ftp, artifact_dir): + try: + if not FTPArtifactRepository._is_dir(ftp, artifact_dir): + ftp.mkd(artifact_dir) + except ftplib.error_perm: + head, _ = posixpath.split(artifact_dir) + FTPArtifactRepository._mkdir(ftp, head) + FTPArtifactRepository._mkdir(ftp, artifact_dir) + + @staticmethod + def _size(ftp, full_file_path): + ftp.voidcmd('TYPE I') + size = ftp.size(full_file_path) + ftp.voidcmd('TYPE A') + return size + + def log_artifact(self, local_file, artifact_path=None): + with self.get_ftp_client() as ftp: + artifact_dir = posixpath.join(self.path, artifact_path) \ + if artifact_path else self.path + self._mkdir(ftp, artifact_dir) + with open(local_file, 'rb') as f: + ftp.cwd(artifact_dir) + ftp.storbinary('STOR ' + os.path.basename(local_file), f) + + def log_artifacts(self, local_dir, artifact_path=None): + dest_path = posixpath.join(self.path, artifact_path) \ + if artifact_path else self.path + + dest_path = posixpath.join( + dest_path, os.path.split(local_dir)[1]) + dest_path_re = os.path.split(local_dir)[1] + if artifact_path: + dest_path_re = posixpath.join( + artifact_path, os.path.split(local_dir)[1]) + + local_dir = os.path.abspath(local_dir) + for (root, _, filenames) in os.walk(local_dir): + upload_path = dest_path + if root != local_dir: + rel_path = os.path.relpath(root, local_dir) + rel_path = relative_path_to_artifact_path(rel_path) + upload_path = posixpath.join(dest_path_re, rel_path) + if not filenames: + with self.get_ftp_client() as ftp: + self._mkdir(ftp, posixpath.join(self.path, upload_path)) + for f in filenames: + if os.path.isfile(os.path.join(root, f)): + self.log_artifact(os.path.join(root, f), upload_path) + + def list_artifacts(self, path=None): + with self.get_ftp_client() as ftp: + artifact_dir = self.path + list_dir = posixpath.join(artifact_dir, path) if path else artifact_dir + if not self._is_dir(ftp, list_dir): + return [] + artifact_files = ftp.nlst(list_dir) + infos = [] + for file_name in artifact_files: + file_path = (file_name if path is None + else posixpath.join(path, file_name)) + full_file_path = posixpath.join(list_dir, file_name) + if self._is_dir(ftp, full_file_path): + infos.append(FileInfo(file_path, True, None)) + else: + size = self._size(ftp, full_file_path) + infos.append(FileInfo(file_path, False, size)) + return infos + + def _download_file(self, remote_file_path, local_path): + remote_full_path = posixpath.join(self.path, remote_file_path) \ + if remote_file_path else self.path + with self.get_ftp_client() as ftp: + with open(local_path, 'wb') as f: + ftp.retrbinary('RETR ' + remote_full_path, f.write) diff --git a/mlflow/store/gcs_artifact_repo.py b/mlflow/store/gcs_artifact_repo.py index e403da4030e46..d61df0ae0efcb 100644 --- a/mlflow/store/gcs_artifact_repo.py +++ b/mlflow/store/gcs_artifact_repo.py @@ -1,10 +1,11 @@ import os +import posixpath from six.moves import urllib from mlflow.entities import FileInfo from mlflow.store.artifact_repo import ArtifactRepository -from mlflow.utils.file_utils import build_path, get_relative_path +from mlflow.utils.file_utils import relative_path_to_artifact_path class GCSArtifactRepository(ArtifactRepository): @@ -37,8 +38,9 @@ def parse_gcs_uri(uri): def log_artifact(self, local_file, artifact_path=None): (bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri) if artifact_path: - dest_path = build_path(dest_path, artifact_path) - dest_path = build_path(dest_path, os.path.basename(local_file)) + dest_path = posixpath.join(dest_path, artifact_path) + dest_path = posixpath.join( + dest_path, os.path.basename(local_file)) gcs_bucket = self.gcs.Client().get_bucket(bucket) blob = gcs_bucket.blob(dest_path) @@ -47,24 +49,25 @@ def log_artifact(self, local_file, artifact_path=None): def log_artifacts(self, local_dir, artifact_path=None): (bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri) if artifact_path: - dest_path = build_path(dest_path, artifact_path) + dest_path = posixpath.join(dest_path, artifact_path) gcs_bucket = self.gcs.Client().get_bucket(bucket) local_dir = os.path.abspath(local_dir) for (root, _, filenames) in os.walk(local_dir): upload_path = dest_path if root != local_dir: - rel_path = get_relative_path(local_dir, root) - upload_path = build_path(dest_path, rel_path) + rel_path = os.path.relpath(root, local_dir) + rel_path = relative_path_to_artifact_path(rel_path) + upload_path = posixpath.join(dest_path, rel_path) for f in filenames: - path = build_path(upload_path, f) - gcs_bucket.blob(path).upload_from_filename(build_path(root, f)) + path = posixpath.join(upload_path, f) + gcs_bucket.blob(path).upload_from_filename(os.path.join(root, f)) def list_artifacts(self, path=None): (bucket, artifact_path) = self.parse_gcs_uri(self.artifact_uri) dest_path = artifact_path if path: - dest_path = build_path(dest_path, path) + dest_path = posixpath.join(dest_path, path) prefix = dest_path + "/" bkt = self.gcs.Client().get_bucket(bucket) @@ -73,7 +76,7 @@ def list_artifacts(self, path=None): results = bkt.list_blobs(prefix=prefix, delimiter="/") for result in results: - blob_path = result.name[len(artifact_path)+1:] + blob_path = result.name[len(artifact_path) + 1:] infos.append(FileInfo(blob_path, False, result.size)) return sorted(infos, key=lambda f: f.path) @@ -84,10 +87,10 @@ def _list_folders(self, bkt, prefix, artifact_path): for page in results.pages: dir_paths.update(page.prefixes) - return [FileInfo(path[len(artifact_path)+1:-1], True, None)for path in dir_paths] + return [FileInfo(path[len(artifact_path) + 1:-1], True, None) for path in dir_paths] def _download_file(self, remote_file_path, local_path): (bucket, remote_root_path) = self.parse_gcs_uri(self.artifact_uri) - remote_full_path = build_path(remote_root_path, remote_file_path) + remote_full_path = posixpath.join(remote_root_path, remote_file_path) gcs_bucket = self.gcs.Client().get_bucket(bucket) gcs_bucket.get_blob(remote_full_path).download_to_filename(local_path) diff --git a/mlflow/store/hdfs_artifact_repo.py b/mlflow/store/hdfs_artifact_repo.py new file mode 100644 index 0000000000000..caf6ceb26dfa9 --- /dev/null +++ b/mlflow/store/hdfs_artifact_repo.py @@ -0,0 +1,217 @@ +import os +import posixpath +import tempfile +from contextlib import contextmanager + +from six.moves import urllib + +from mlflow.entities import FileInfo +from mlflow.exceptions import MlflowException +from mlflow.store.artifact_repo import ArtifactRepository +from mlflow.utils.file_utils import mkdir, relative_path_to_artifact_path + + +class HdfsArtifactRepository(ArtifactRepository): + """ + Stores artifacts on HDFS. + + This repository is used with URIs of the form ``hdfs:/``. The repository can only be used + together with the RestStore. + """ + + def __init__(self, artifact_uri): + self.host, self.port, self.path = _resolve_connection_params(artifact_uri) + super(HdfsArtifactRepository, self).__init__(artifact_uri) + + def log_artifact(self, local_file, artifact_path=None): + """ + Log artifact in hdfs. + :param local_file: source file path + :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path + """ + hdfs_base_path = _resolve_base_path(self.path, artifact_path) + + with hdfs_system(host=self.host, port=self.port) as hdfs: + _, file_name = os.path.split(local_file) + destination = posixpath.join(hdfs_base_path, file_name) + with hdfs.open(destination, 'wb') as output: + output.write(open(local_file, "rb").read()) + + def log_artifacts(self, local_dir, artifact_path=None): + """ + Log artifacts in hdfs. + Missing remote sub-directories will be created if needed. + :param local_dir: source dir path + :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path + """ + hdfs_base_path = _resolve_base_path(self.path, artifact_path) + + with hdfs_system(host=self.host, port=self.port) as hdfs: + + if not hdfs.exists(hdfs_base_path): + hdfs.mkdir(hdfs_base_path) + + for subdir_path, _, files in os.walk(local_dir): + + relative_path = _relative_path_local(local_dir, subdir_path) + + hdfs_subdir_path = posixpath.join(hdfs_base_path, relative_path) \ + if relative_path else hdfs_base_path + + if not hdfs.exists(hdfs_subdir_path): + hdfs.mkdir(hdfs_subdir_path) + + for each_file in files: + source = os.path.join(subdir_path, each_file) + destination = posixpath.join(hdfs_subdir_path, each_file) + with hdfs.open(destination, 'wb') as output_stream: + output_stream.write(open(source, "rb").read()) + + def list_artifacts(self, path=None): + """ + Lists files and directories under artifacts directory for the current run_id. + (self.path contains the base path - hdfs:/some/path/run_id/artifacts) + + :param path: Relative source path. Possible subdirectory existing under + hdfs:/some/path/run_id/artifacts + :return: List of files and directories under given path - + example: + ['conda.yaml', 'MLmodel', 'model.pkl'] + """ + hdfs_base_path = _resolve_base_path(self.path, path) + base_path_len = len(hdfs_base_path) + 1 + + with hdfs_system(host=self.host, port=self.port) as hdfs: + paths = [] + for path, is_dir, size in self._walk_path(hdfs, hdfs_base_path): + paths.append(FileInfo(path[base_path_len:], is_dir, size)) + return sorted(paths, key=lambda f: paths) + + def _walk_path(self, hdfs, hdfs_path): + if hdfs.exists(hdfs_path): + if hdfs.isdir(hdfs_path): + for subdir, _, files in hdfs.walk(hdfs_path): + if subdir != hdfs_path: + yield subdir, hdfs.isdir(subdir), hdfs.info(subdir).get("size") + for f in files: + file_path = posixpath.join(subdir, f) + yield file_path, hdfs.isdir(file_path), hdfs.info(file_path).get("size") + else: + yield hdfs_path, False, hdfs.info(hdfs_path).get("size") + + def download_artifacts(self, artifact_path, dst_path=None): + """ + Download an artifact file or directory to a local directory/file if applicable, and + return a local path for it. + The caller is responsible for managing the lifecycle of the downloaded artifacts. + + (self.path contains the base path - hdfs:/some/path/run_id/artifacts) + + :param artifact_path: Relative source path to the desired artifacts file or directory. + :param dst_path: Absolute path of the local filesystem destination directory to which + to download the specified artifacts. This directory must already + exist. If unspecified, the artifacts will be downloaded to a new, + uniquely-named + directory on the local filesystem. + + :return: Absolute path of the local filesystem location containing the downloaded + artifacts - file/directory. + """ + + hdfs_base_path = _resolve_base_path(self.path, artifact_path) + local_dir = _tmp_dir(dst_path) + + with hdfs_system(host=self.host, port=self.port) as hdfs: + + if not hdfs.isdir(hdfs_base_path): + local_path = os.path.join(local_dir, os.path.normpath(artifact_path)) + _download_hdfs_file(hdfs, hdfs_base_path, local_path) + return local_path + + for path, is_dir, _ in self._walk_path(hdfs, hdfs_base_path): + + relative_path = _relative_path_remote(hdfs_base_path, path) + local_path = os.path.join(local_dir, relative_path) \ + if relative_path else local_dir + + if is_dir: + mkdir(local_path) + else: + _download_hdfs_file(hdfs, path, local_path) + return local_dir + + def _download_file(self, remote_file_path, local_path): + raise MlflowException('This is not implemented. Should never be called.') + + +@contextmanager +def hdfs_system(host, port): + """ + hdfs system context - Attempt to establish the connection to hdfs + and yields HadoopFileSystem + + :param host: hostname or when relaying on the core-site.xml config use 'default' + :param port: port or when relaying on the core-site.xml config use 0 + """ + import pyarrow as pa + + driver = os.getenv('MLFLOW_HDFS_DRIVER') or 'libhdfs' + kerb_ticket = os.getenv('MLFLOW_KERBEROS_TICKET_CACHE') + kerberos_user = os.getenv('MLFLOW_KERBEROS_USER') + extra_conf = _parse_extra_conf(os.getenv('MLFLOW_PYARROW_EXTRA_CONF')) + + connected = pa.hdfs.connect(host=host or 'default', + port=port or 0, + user=kerberos_user, + driver=driver, + kerb_ticket=kerb_ticket, + extra_conf=extra_conf) + yield connected + connected.close() + + +def _resolve_connection_params(artifact_uri): + parsed = urllib.parse.urlparse(artifact_uri) + return parsed.hostname, parsed.port, parsed.path + + +def _resolve_base_path(path, artifact_path): + if path == artifact_path: + return path + if artifact_path: + return posixpath.join(path, artifact_path) + return path + + +def _relative_path(base_dir, subdir_path, path_module): + relative_path = path_module.relpath(subdir_path, base_dir) + return relative_path if relative_path is not '.' else None + + +def _relative_path_local(base_dir, subdir_path): + rel_path = _relative_path(base_dir, subdir_path, os.path) + return relative_path_to_artifact_path(rel_path) if rel_path is not None else None + + +def _relative_path_remote(base_dir, subdir_path): + return _relative_path(base_dir, subdir_path, posixpath) + + +def _tmp_dir(local_path): + return os.path.abspath(tempfile.mkdtemp(dir=local_path)) + + +def _download_hdfs_file(hdfs, remote_file_path, local_file_path): + with open(local_file_path, 'wb') as f: + f.write(hdfs.open(remote_file_path, 'rb').read()) + + +def _parse_extra_conf(extra_conf): + if extra_conf: + def as_pair(config): + key, val = config.split('=') + return key, val + + list_of_key_val = [as_pair(conf) for conf in extra_conf.split(',')] + return dict(list_of_key_val) + return None diff --git a/mlflow/store/local_artifact_repo.py b/mlflow/store/local_artifact_repo.py index be3aa77b40fdf..69d836c488376 100644 --- a/mlflow/store/local_artifact_repo.py +++ b/mlflow/store/local_artifact_repo.py @@ -1,45 +1,88 @@ -import os import distutils.dir_util as dir_util +import os import shutil -from mlflow.store.artifact_repo import ArtifactRepository -from mlflow.utils.file_utils import (build_path, exists, mkdir, list_all, get_file_info, - get_relative_path) -from mlflow.utils.validation import path_not_unique, bad_path_message +from mlflow.store.artifact_repo import ArtifactRepository, verify_artifact_path +from mlflow.utils.file_utils import mkdir, list_all, get_file_info, local_file_uri_to_path, \ + relative_path_to_artifact_path class LocalArtifactRepository(ArtifactRepository): """Stores artifacts as files in a local directory.""" + def __init__(self, *args, **kwargs): + super(LocalArtifactRepository, self).__init__(*args, **kwargs) + self._artifact_dir = local_file_uri_to_path(self.artifact_uri) + + @property + def artifact_dir(self): + return self._artifact_dir + def log_artifact(self, local_file, artifact_path=None): - if artifact_path and path_not_unique(artifact_path): - raise Exception("Invalid artifact path: '%s'. %s" % (artifact_path, - bad_path_message(artifact_path))) - artifact_dir = build_path(self.artifact_uri, artifact_path) \ - if artifact_path else self.artifact_uri - if not exists(artifact_dir): + verify_artifact_path(artifact_path) + # NOTE: The artifact_path is expected to be in posix format. + # Posix paths work fine on windows but just in case we normalize it here. + if artifact_path: + artifact_path = os.path.normpath(artifact_path) + + artifact_dir = os.path.join(self.artifact_dir, artifact_path) if artifact_path else \ + self.artifact_dir + if not os.path.exists(artifact_dir): mkdir(artifact_dir) shutil.copy(local_file, artifact_dir) def log_artifacts(self, local_dir, artifact_path=None): - if artifact_path and path_not_unique(artifact_path): - raise Exception("Invalid artifact path: '%s'. %s" % (artifact_path, - bad_path_message(artifact_path))) - artifact_dir = build_path(self.artifact_uri, artifact_path) \ - if artifact_path else self.artifact_uri - if not exists(artifact_dir): + verify_artifact_path(artifact_path) + # NOTE: The artifact_path is expected to be in posix format. + # Posix paths work fine on windows but just in case we normalize it here. + if artifact_path: + artifact_path = os.path.normpath(artifact_path) + artifact_dir = os.path.join(self.artifact_dir, artifact_path) if artifact_path else \ + self.artifact_dir + if not os.path.exists(artifact_dir): mkdir(artifact_dir) dir_util.copy_tree(src=local_dir, dst=artifact_dir) + def download_artifacts(self, artifact_path, dst_path=None): + """ + Artifacts tracked by ``LocalArtifactRepository`` already exist on the local filesystem. + If ``dst_path`` is ``None``, the absolute filesystem path of the specified artifact is + returned. If ``dst_path`` is not ``None``, the local artifact is copied to ``dst_path``. + + :param artifact_path: Relative source path to the desired artifacts. + :param dst_path: Absolute path of the local filesystem destination directory to which to + download the specified artifacts. This directory must already exist. If + unspecified, the absolute path of the local artifact will be returned. + + :return: Absolute path of the local filesystem location containing the desired artifacts. + """ + if dst_path: + return super(LocalArtifactRepository, self).download_artifacts(artifact_path, dst_path) + # NOTE: The artifact_path is expected to be in posix format. + # Posix paths work fine on windows but just in case we normalize it here. + local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path)) + if not os.path.exists(local_artifact_path): + raise IOError('No such file or directory: \'{}\''.format(local_artifact_path)) + return os.path.abspath(local_artifact_path) + def list_artifacts(self, path=None): - artifact_dir = self.artifact_uri - list_dir = build_path(artifact_dir, path) if path else artifact_dir + # NOTE: The path is expected to be in posix format. + # Posix paths work fine on windows but just in case we normalize it here. + if path: + path = os.path.normpath(path) + list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir if os.path.isdir(list_dir): artifact_files = list_all(list_dir, full_path=True) - infos = [get_file_info(f, get_relative_path(artifact_dir, f)) for f in artifact_files] + infos = [get_file_info(f, + relative_path_to_artifact_path( + os.path.relpath(f, self.artifact_dir))) + for f in artifact_files] return sorted(infos, key=lambda f: f.path) else: return [] def _download_file(self, remote_file_path, local_path): - shutil.copyfile(os.path.join(self.artifact_uri, remote_file_path), local_path) + # NOTE: The remote_file_path is expected to be in posix format. + # Posix paths work fine on windows but just in case we normalize it here. + remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path)) + shutil.copyfile(remote_file_path, local_path) diff --git a/mlflow/store/rest_store.py b/mlflow/store/rest_store.py index 76fe8a84b5ad6..988090a7f19bb 100644 --- a/mlflow/store/rest_store.py +++ b/mlflow/store/rest_store.py @@ -1,19 +1,14 @@ import json -from mlflow.store.abstract_store import AbstractStore - -from mlflow.entities import Experiment, Run, RunInfo, RunTag, Param, Metric, ViewType - -from mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME -from mlflow.utils.proto_json_utils import message_to_json, parse_dict -from mlflow.utils.rest_utils import http_request_safe - +from mlflow.entities import Experiment, Run, RunInfo, Metric, ViewType +from mlflow.protos import databricks_pb2 from mlflow.protos.service_pb2 import CreateExperiment, MlflowService, GetExperiment, \ GetRun, SearchRuns, ListExperiments, GetMetricHistory, LogMetric, LogParam, SetTag, \ - UpdateRun, CreateRun, GetMetric, GetParam, DeleteRun, RestoreRun, DeleteExperiment, \ - RestoreExperiment, UpdateExperiment - -from mlflow.protos import databricks_pb2 + UpdateRun, CreateRun, DeleteRun, RestoreRun, DeleteExperiment, RestoreExperiment, \ + UpdateExperiment, LogBatch, DeleteTag +from mlflow.store.abstract_store import AbstractStore +from mlflow.utils.proto_json_utils import message_to_json, parse_dict +from mlflow.utils.rest_utils import http_request, verify_rest_response def _get_path(endpoint_path): @@ -21,7 +16,7 @@ def _get_path(endpoint_path): def _api_method_to_info(): - """ Returns a dictionary mapping each API method to a tuple (path, HTTP method)""" + """ Return a dictionary mapping each API method to a tuple (path, HTTP method)""" service_methods = MlflowService.DESCRIPTOR.methods res = {} for service_method in service_methods: @@ -38,6 +33,7 @@ def _api_method_to_info(): class RestStore(AbstractStore): """ Client for a remote tracking server accessed via REST API calls + :param get_host_creds: Method to be invoked prior to every REST request to get the :py:class:`mlflow.rest_utils.MlflowHostCreds` for the request. Note that this is a function so that we can obtain fresh credentials in the case of expiry. @@ -47,6 +43,9 @@ def __init__(self, get_host_creds): super(RestStore, self).__init__() self.get_host_creds = get_host_creds + def _verify_rest_response(self, response, endpoint): + return verify_rest_response(response, endpoint) + def _call_endpoint(self, api, json_body): endpoint, method = _METHOD_TO_INFO[api] response_proto = api.Response() @@ -54,8 +53,16 @@ def _call_endpoint(self, api, json_body): if json_body: json_body = json.loads(json_body) host_creds = self.get_host_creds() - response = http_request_safe( - host_creds=host_creds, endpoint=endpoint, method=method, json=json_body) + + if method == 'GET': + response = http_request( + host_creds=host_creds, endpoint=endpoint, method=method, params=json_body) + else: + response = http_request( + host_creds=host_creds, endpoint=endpoint, method=method, json=json_body) + + response = self._verify_rest_response(response, endpoint) + js_dict = json.loads(response.text) parse_dict(js_dict=js_dict, message=response_proto) return response_proto @@ -71,11 +78,12 @@ def list_experiments(self, view_type=ViewType.ACTIVE_ONLY): def create_experiment(self, name, artifact_location=None): """ - Creates a new experiment. + Create a new experiment. If an experiment with the given name already exists, throws exception. :param name: Desired name for an experiment - :return: experiment_id (integer) for the newly created experiment if successful, else None + + :return: experiment_id (string) for the newly created experiment if successful, else None """ req_body = message_to_json(CreateExperiment( name=name, artifact_location=artifact_location)) @@ -84,170 +92,143 @@ def create_experiment(self, name, artifact_location=None): def get_experiment(self, experiment_id): """ - Fetches the experiment from the backend store. + Fetch the experiment from the backend store. + + :param experiment_id: String id for the experiment - :param experiment_id: Integer id for the experiment - :return: A single Experiment object if it exists, otherwise raises an Exception. + :return: A single :py:class:`mlflow.entities.Experiment` object if it exists, + otherwise raises an Exception. """ - req_body = message_to_json(GetExperiment(experiment_id=experiment_id)) + req_body = message_to_json(GetExperiment(experiment_id=str(experiment_id))) response_proto = self._call_endpoint(GetExperiment, req_body) return Experiment.from_proto(response_proto.experiment) - def get_experiment_by_name(self, name): - for experiment in self.list_experiments(ViewType.ALL): - if experiment.name == name: - return experiment - return None - def delete_experiment(self, experiment_id): - req_body = message_to_json(DeleteExperiment(experiment_id=experiment_id)) + req_body = message_to_json(DeleteExperiment(experiment_id=str(experiment_id))) self._call_endpoint(DeleteExperiment, req_body) def restore_experiment(self, experiment_id): - req_body = message_to_json(RestoreExperiment(experiment_id=experiment_id)) + req_body = message_to_json(RestoreExperiment(experiment_id=str(experiment_id))) self._call_endpoint(RestoreExperiment, req_body) def rename_experiment(self, experiment_id, new_name): req_body = message_to_json(UpdateExperiment( - experiment_id=experiment_id, new_name=new_name)) + experiment_id=str(experiment_id), new_name=new_name)) self._call_endpoint(UpdateExperiment, req_body) - def get_run(self, run_uuid): + def get_run(self, run_id): """ - Fetches the run from backend store + Fetch the run from backend store + + :param run_id: Unique identifier for the run - :param run_uuid: Unique identifier for the run :return: A single Run object if it exists, otherwise raises an Exception """ - req_body = message_to_json(GetRun(run_uuid=run_uuid)) + req_body = message_to_json(GetRun(run_uuid=run_id, run_id=run_id)) response_proto = self._call_endpoint(GetRun, req_body) return Run.from_proto(response_proto.run) - def update_run_info(self, run_uuid, run_status, end_time): + def update_run_info(self, run_id, run_status, end_time): """ Updates the metadata of the specified run. """ - req_body = message_to_json(UpdateRun(run_uuid=run_uuid, status=run_status, + req_body = message_to_json(UpdateRun(run_uuid=run_id, run_id=run_id, status=run_status, end_time=end_time)) response_proto = self._call_endpoint(UpdateRun, req_body) return RunInfo.from_proto(response_proto.run_info) - def create_run(self, experiment_id, user_id, run_name, source_type, source_name, - entry_point_name, start_time, source_version, tags, parent_run_id): + def create_run(self, experiment_id, user_id, start_time, tags): """ - Creates a run under the specified experiment ID, setting the run's status to "RUNNING" + Create a run under the specified experiment ID, setting the run's status to "RUNNING" and the start time to the current time. :param experiment_id: ID of the experiment for this run :param user_id: ID of the user launching this run :param source_type: Enum (integer) describing the source of the run + :return: The created Run object """ tag_protos = [tag.to_proto() for tag in tags] req_body = message_to_json(CreateRun( - experiment_id=experiment_id, user_id=user_id, run_name="", - source_type=source_type, source_name=source_name, entry_point_name=entry_point_name, - start_time=start_time, source_version=source_version, tags=tag_protos, - parent_run_id=parent_run_id)) + experiment_id=str(experiment_id), user_id=user_id, + start_time=start_time, tags=tag_protos)) response_proto = self._call_endpoint(CreateRun, req_body) run = Run.from_proto(response_proto.run) - if run_name: - self.set_tag(run.info.run_uuid, RunTag(key=MLFLOW_RUN_NAME, value=run_name)) return run - def log_metric(self, run_uuid, metric): + def log_metric(self, run_id, metric): """ - Logs a metric for the specified run - :param run_uuid: String id for the run + Log a metric for the specified run + + :param run_id: String id for the run :param metric: Metric instance to log """ req_body = message_to_json(LogMetric( - run_uuid=run_uuid, key=metric.key, value=metric.value, timestamp=metric.timestamp)) + run_uuid=run_id, run_id=run_id, + key=metric.key, value=metric.value, timestamp=metric.timestamp, + step=metric.step)) self._call_endpoint(LogMetric, req_body) - def log_param(self, run_uuid, param): + def log_param(self, run_id, param): """ - Logs a param for the specified run - :param run_uuid: String id for the run + Log a param for the specified run + + :param run_id: String id for the run :param param: Param instance to log """ - req_body = message_to_json(LogParam(run_uuid=run_uuid, key=param.key, value=param.value)) + req_body = message_to_json(LogParam( + run_uuid=run_id, run_id=run_id, key=param.key, value=param.value)) self._call_endpoint(LogParam, req_body) - def set_tag(self, run_uuid, tag): + def set_tag(self, run_id, tag): """ - Sets a tag for the specified run - :param run_uuid: String id for the run + Set a tag for the specified run + + :param run_id: String ID of the run :param tag: RunTag instance to log """ - req_body = message_to_json(SetTag(run_uuid=run_uuid, key=tag.key, value=tag.value)) + req_body = message_to_json(SetTag( + run_uuid=run_id, run_id=run_id, key=tag.key, value=tag.value)) self._call_endpoint(SetTag, req_body) - def get_metric(self, run_uuid, metric_key): + def delete_tag(self, run_id, key): """ - Returns the last logged value for a given metric. - - :param run_uuid: Unique identifier for run - :param metric_key: Metric name within the run - - :return: A single float value for the give metric if logged, else None + Delete a tag from a run. This is irreversible. + :param run_id: String ID of the run + :param key: Name of the tag """ - req_body = message_to_json(GetMetric(run_uuid=run_uuid, metric_key=metric_key)) - response_proto = self._call_endpoint(GetMetric, req_body) - return Metric.from_proto(response_proto.metric) + req_body = message_to_json(DeleteTag(run_id=run_id, key=key)) + self._call_endpoint(DeleteTag, req_body) - def get_param(self, run_uuid, param_name): + def get_metric_history(self, run_id, metric_key): """ - Returns the value of the specified parameter. - - :param run_uuid: Unique identifier for run - :param param_name: Parameter name within the run + Return all logged values for a given metric. - :return: Value of the given parameter if logged, else None - """ - req_body = message_to_json(GetParam(run_uuid=run_uuid, param_name=param_name)) - response_proto = self._call_endpoint(GetParam, req_body) - return Param.from_proto(response_proto.parameter) - - def get_metric_history(self, run_uuid, metric_key): - """ - Returns all logged value for a given metric. - - :param run_uuid: Unique identifier for run + :param run_id: Unique identifier for run :param metric_key: Metric name within the run - :return: A list of float values logged for the give metric if logged, else empty list + :return: A list of :py:class:`mlflow.entities.Metric` entities if logged, else empty list """ - req_body = message_to_json(GetMetricHistory(run_uuid=run_uuid, metric_key=metric_key)) + req_body = message_to_json(GetMetricHistory( + run_uuid=run_id, run_id=run_id, metric_key=metric_key)) response_proto = self._call_endpoint(GetMetricHistory, req_body) - return [Metric.from_proto(metric).value for metric in response_proto.metrics] - - def search_runs(self, experiment_ids, search_expressions, run_view_type): - """ - Returns runs that match the given list of search expressions within the experiments. - Given multiple search expressions, all these expressions are ANDed together for search. - - :param experiment_ids: List of experiment ids to scope the search - :param search_expression: list of search expressions - - :return: A list of Run objects that satisfy the search expressions - """ - search_expressions_protos = [expr.to_proto() for expr in search_expressions] - req_body = message_to_json(SearchRuns(experiment_ids=experiment_ids, - anded_expressions=search_expressions_protos, - run_view_type=ViewType.to_proto(run_view_type))) + return [Metric.from_proto(metric) for metric in response_proto.metrics] + + def _search_runs(self, experiment_ids, filter_string, run_view_type, max_results, order_by, + page_token): + experiment_ids = [str(experiment_id) for experiment_id in experiment_ids] + sr = SearchRuns(experiment_ids=experiment_ids, + filter=filter_string, + run_view_type=ViewType.to_proto(run_view_type), + max_results=max_results, + order_by=order_by, + page_token=page_token) + req_body = message_to_json(sr) response_proto = self._call_endpoint(SearchRuns, req_body) - return [Run.from_proto(proto_run) for proto_run in response_proto.runs] - - def list_run_infos(self, experiment_id, run_view_type): - """ - Returns run information for runs which belong to the experiment_id - - :param experiment_id: The experiment id which to search. - - :return: A list of RunInfo objects that satisfy the search expressions - """ - runs = self.search_runs(experiment_ids=[experiment_id], search_expressions=[], - run_view_type=run_view_type) - return [run.info for run in runs] + runs = [Run.from_proto(proto_run) for proto_run in response_proto.runs] + # If next_page_token is not set, we will see it as "". We need to convert this to None. + next_page_token = None + if response_proto.next_page_token: + next_page_token = response_proto.next_page_token + return runs, next_page_token def delete_run(self, run_id): req_body = message_to_json(DeleteRun(run_id=run_id)) @@ -256,3 +237,11 @@ def delete_run(self, run_id): def restore_run(self, run_id): req_body = message_to_json(RestoreRun(run_id=run_id)) self._call_endpoint(RestoreRun, req_body) + + def log_batch(self, run_id, metrics, params, tags): + metric_protos = [metric.to_proto() for metric in metrics] + param_protos = [param.to_proto() for param in params] + tag_protos = [tag.to_proto() for tag in tags] + req_body = message_to_json( + LogBatch(metrics=metric_protos, params=param_protos, tags=tag_protos, run_id=run_id)) + self._call_endpoint(LogBatch, req_body) diff --git a/mlflow/store/runs_artifact_repo.py b/mlflow/store/runs_artifact_repo.py new file mode 100644 index 0000000000000..4f371a421ad32 --- /dev/null +++ b/mlflow/store/runs_artifact_repo.py @@ -0,0 +1,115 @@ +from six.moves import urllib + +from mlflow.exceptions import MlflowException +from mlflow.store.artifact_repo import ArtifactRepository + + +class RunsArtifactRepository(ArtifactRepository): + """ + Handles artifacts associated with a Run via URIs of the form + `runs://run-relative/path/to/artifact`. + It is a light wrapper that resolves the artifact path to an absolute URI then instantiates + and uses the artifact repository for that URI. + + The relative path part of ``artifact_uri`` is expected to be in posixpath format, so Windows + users should take special care when constructing the URI. + """ + + def __init__(self, artifact_uri): + from mlflow.tracking.artifact_utils import get_artifact_uri + from mlflow.store.artifact_repository_registry import get_artifact_repository + (run_id, artifact_path) = RunsArtifactRepository.parse_runs_uri(artifact_uri) + uri = get_artifact_uri(run_id, artifact_path) + assert urllib.parse.urlparse(uri).scheme != "runs" # avoid an infinite loop + super(RunsArtifactRepository, self).__init__(artifact_uri) + self.repo = get_artifact_repository(uri) + + @staticmethod + def parse_runs_uri(run_uri): + parsed = urllib.parse.urlparse(run_uri) + if parsed.scheme != "runs": + raise MlflowException( + "Not a proper runs:/ URI: %s. " % run_uri + + "Runs URIs must be of the form 'runs://run-relative/path/to/artifact'") + # hostname = parsed.netloc # TODO: support later + + path = parsed.path + if not path.startswith('/') or len(path) <= 1: + raise MlflowException( + "Not a proper runs:/ URI: %s. " % run_uri + + "Runs URIs must be of the form 'runs://run-relative/path/to/artifact'") + path = path[1:] + + path_parts = path.split('/') + run_id = path_parts[0] + if run_id == '': + raise MlflowException( + "Not a proper runs:/ URI: %s. " % run_uri + + "Runs URIs must be of the form 'runs://run-relative/path/to/artifact'") + + artifact_path = '/'.join(path_parts[1:]) if len(path_parts) > 1 else None + artifact_path = artifact_path if artifact_path != '' else None + + return run_id, artifact_path + + def log_artifact(self, local_file, artifact_path=None): + """ + Log a local file as an artifact, optionally taking an ``artifact_path`` to place it in + within the run's artifacts. Run artifacts can be organized into directories, so you can + place the artifact in a directory this way. + + :param local_file: Path to artifact to log + :param artifact_path: Directory within the run's artifact directory in which to log the + artifact + """ + self.repo.log_artifact(local_file, artifact_path) + + def log_artifacts(self, local_dir, artifact_path=None): + """ + Log the files in the specified local directory as artifacts, optionally taking + an ``artifact_path`` to place them in within the run's artifacts. + + :param local_dir: Directory of local artifacts to log + :param artifact_path: Directory within the run's artifact directory in which to log the + artifacts + """ + self.repo.log_artifacts(local_dir, artifact_path) + + def list_artifacts(self, path): + """ + Return all the artifacts for this run_id directly under path. If path is a file, returns + an empty list. Will error if path is neither a file nor directory. + + :param path: Relative source path that contain desired artifacts + + :return: List of artifacts as FileInfo listed directly under path. + """ + return self.repo.list_artifacts(path) + + def download_artifacts(self, artifact_path, dst_path=None): + """ + Download an artifact file or directory to a local directory if applicable, and return a + local path for it. + The caller is responsible for managing the lifecycle of the downloaded artifacts. + + :param artifact_path: Relative source path to the desired artifacts. + :param dst_path: Absolute path of the local filesystem destination directory to which to + download the specified artifacts. This directory must already exist. + If unspecified, the artifacts will either be downloaded to a new + uniquely-named directory on the local filesystem or will be returned + directly in the case of the LocalArtifactRepository. + + :return: Absolute path of the local filesystem location containing the desired artifacts. + """ + return self.repo.download_artifacts(artifact_path, dst_path) + + def _download_file(self, remote_file_path, local_path): + """ + Download the file at the specified relative remote path and saves + it at the specified local path. + + :param remote_file_path: Source path to the remote file, relative to the root + directory of the artifact repository. + :param local_path: The path to which to save the downloaded file. + """ + self.repo._download_file(remote_file_path, local_path) diff --git a/mlflow/store/s3_artifact_repo.py b/mlflow/store/s3_artifact_repo.py index 93d0bf829ae37..730f799f67b00 100644 --- a/mlflow/store/s3_artifact_repo.py +++ b/mlflow/store/s3_artifact_repo.py @@ -1,17 +1,17 @@ import os -import boto3 +import posixpath from six.moves import urllib from mlflow import data from mlflow.entities import FileInfo +from mlflow.exceptions import MlflowException from mlflow.store.artifact_repo import ArtifactRepository -from mlflow.utils.file_utils import build_path, get_relative_path +from mlflow.utils.file_utils import relative_path_to_artifact_path class S3ArtifactRepository(ArtifactRepository): """Stores artifacts on Amazon S3.""" - @staticmethod def parse_s3_uri(uri): """Parse an S3 URI, returning (bucket, path)""" @@ -24,36 +24,42 @@ def parse_s3_uri(uri): return parsed.netloc, path def _get_s3_client(self): + import boto3 s3_endpoint_url = os.environ.get('MLFLOW_S3_ENDPOINT_URL') return boto3.client('s3', endpoint_url=s3_endpoint_url) def log_artifact(self, local_file, artifact_path=None): (bucket, dest_path) = data.parse_s3_uri(self.artifact_uri) if artifact_path: - dest_path = build_path(dest_path, artifact_path) - dest_path = build_path(dest_path, os.path.basename(local_file)) + dest_path = posixpath.join(dest_path, artifact_path) + dest_path = posixpath.join( + dest_path, os.path.basename(local_file)) s3_client = self._get_s3_client() s3_client.upload_file(local_file, bucket, dest_path) def log_artifacts(self, local_dir, artifact_path=None): (bucket, dest_path) = data.parse_s3_uri(self.artifact_uri) if artifact_path: - dest_path = build_path(dest_path, artifact_path) + dest_path = posixpath.join(dest_path, artifact_path) s3_client = self._get_s3_client() local_dir = os.path.abspath(local_dir) for (root, _, filenames) in os.walk(local_dir): upload_path = dest_path if root != local_dir: - rel_path = get_relative_path(local_dir, root) - upload_path = build_path(dest_path, rel_path) + rel_path = os.path.relpath(root, local_dir) + rel_path = relative_path_to_artifact_path(rel_path) + upload_path = posixpath.join(dest_path, rel_path) for f in filenames: - s3_client.upload_file(build_path(root, f), bucket, build_path(upload_path, f)) + s3_client.upload_file( + os.path.join(root, f), + bucket, + posixpath.join(upload_path, f)) def list_artifacts(self, path=None): (bucket, artifact_path) = data.parse_s3_uri(self.artifact_uri) dest_path = artifact_path if path: - dest_path = build_path(dest_path, path) + dest_path = posixpath.join(dest_path, path) infos = [] prefix = dest_path + "/" s3_client = self._get_s3_client() @@ -62,19 +68,35 @@ def list_artifacts(self, path=None): for result in results: # Subdirectories will be listed as "common prefixes" due to the way we made the request for obj in result.get("CommonPrefixes", []): - subdir = obj.get("Prefix")[len(artifact_path)+1:] - if subdir.endswith("/"): - subdir = subdir[:-1] - infos.append(FileInfo(subdir, True, None)) + subdir_path = obj.get("Prefix") + self._verify_listed_object_contains_artifact_path_prefix( + listed_object_path=subdir_path, artifact_path=artifact_path) + subdir_rel_path = posixpath.relpath( + path=subdir_path, start=artifact_path) + if subdir_rel_path.endswith("/"): + subdir_rel_path = subdir_rel_path[:-1] + infos.append(FileInfo(subdir_rel_path, True, None)) # Objects listed directly will be files for obj in result.get('Contents', []): - name = obj.get("Key")[len(artifact_path)+1:] - size = int(obj.get('Size')) - infos.append(FileInfo(name, False, size)) + file_path = obj.get("Key") + self._verify_listed_object_contains_artifact_path_prefix( + listed_object_path=file_path, artifact_path=artifact_path) + file_rel_path = posixpath.relpath(path=file_path, start=artifact_path) + file_size = int(obj.get('Size')) + infos.append(FileInfo(file_rel_path, False, file_size)) return sorted(infos, key=lambda f: f.path) + @staticmethod + def _verify_listed_object_contains_artifact_path_prefix(listed_object_path, artifact_path): + if not listed_object_path.startswith(artifact_path): + raise MlflowException( + "The path of the listed S3 object does not begin with the specified" + " artifact path. Artifact path: {artifact_path}. Object path:" + " {object_path}.".format( + artifact_path=artifact_path, object_path=listed_object_path)) + def _download_file(self, remote_file_path, local_path): (bucket, s3_root_path) = data.parse_s3_uri(self.artifact_uri) - s3_full_path = build_path(s3_root_path, remote_file_path) + s3_full_path = posixpath.join(s3_root_path, remote_file_path) s3_client = self._get_s3_client() s3_client.download_file(bucket, s3_full_path, local_path) diff --git a/mlflow/store/sftp_artifact_repo.py b/mlflow/store/sftp_artifact_repo.py index 2e762a6fc2262..e5c03daf88981 100644 --- a/mlflow/store/sftp_artifact_repo.py +++ b/mlflow/store/sftp_artifact_repo.py @@ -1,8 +1,10 @@ import os +import posixpath +from six.moves import urllib + from mlflow.entities import FileInfo from mlflow.store.artifact_repo import ArtifactRepository -from six.moves import urllib class SFTPArtifactRepository(ArtifactRepository): @@ -50,25 +52,29 @@ def __init__(self, artifact_uri, client=None): super(SFTPArtifactRepository, self).__init__(artifact_uri) def log_artifact(self, local_file, artifact_path=None): - artifact_dir = os.path.join(self.path, artifact_path) \ + artifact_dir = posixpath.join(self.path, artifact_path) \ if artifact_path else self.path self.sftp.makedirs(artifact_dir) - self.sftp.put(local_file, os.path.join(artifact_dir, os.path.basename(local_file))) + self.sftp.put(local_file, + posixpath.join( + artifact_dir, os.path.basename(local_file))) def log_artifacts(self, local_dir, artifact_path=None): - artifact_dir = os.path.join(self.path, artifact_path) \ + artifact_dir = posixpath.join(self.path, artifact_path) \ if artifact_path else self.path self.sftp.makedirs(artifact_dir) self.sftp.put_r(local_dir, artifact_dir) def list_artifacts(self, path=None): artifact_dir = self.path - list_dir = os.path.join(artifact_dir, path) if path else artifact_dir + list_dir = posixpath.join(artifact_dir, path) if path else artifact_dir + if not self.sftp.isdir(list_dir): + return [] artifact_files = self.sftp.listdir(list_dir) infos = [] for file_name in artifact_files: - file_path = file_name if path is None else os.path.join(path, file_name) - full_file_path = os.path.join(list_dir, file_name) + file_path = file_name if path is None else posixpath.join(path, file_name) + full_file_path = posixpath.join(list_dir, file_name) if self.sftp.isdir(full_file_path): infos.append(FileInfo(file_path, True, None)) else: @@ -76,5 +82,5 @@ def list_artifacts(self, path=None): return infos def _download_file(self, remote_file_path, local_path): - remote_full_path = os.path.join(self.path, remote_file_path) + remote_full_path = posixpath.join(self.path, remote_file_path) self.sftp.get(remote_full_path, local_path) diff --git a/mlflow/store/sqlalchemy_store.py b/mlflow/store/sqlalchemy_store.py new file mode 100644 index 0000000000000..650f1249d48c2 --- /dev/null +++ b/mlflow/store/sqlalchemy_store.py @@ -0,0 +1,528 @@ +import logging +import uuid +from contextlib import contextmanager + +import posixpath +from alembic.script import ScriptDirectory +import sqlalchemy + +from mlflow.entities.lifecycle_stage import LifecycleStage +from mlflow.store import SEARCH_MAX_RESULTS_THRESHOLD +from mlflow.store.dbmodels.db_types import MYSQL +from mlflow.store.dbmodels.models import Base, SqlExperiment, SqlRun, SqlMetric, SqlParam, SqlTag +from mlflow.entities import RunStatus, SourceType, Experiment +from mlflow.store.abstract_store import AbstractStore +from mlflow.entities import ViewType +from mlflow.exceptions import MlflowException +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS, \ + INVALID_STATE, RESOURCE_DOES_NOT_EXIST, INTERNAL_ERROR +from mlflow.tracking.utils import _is_local_uri +from mlflow.utils import extract_db_type_from_uri +from mlflow.utils.file_utils import mkdir, local_file_uri_to_path +from mlflow.utils.search_utils import SearchUtils +from mlflow.utils.validation import _validate_batch_log_limits, _validate_batch_log_data, \ + _validate_run_id, _validate_metric +from mlflow.store.db.utils import _upgrade_db, _get_alembic_config, _get_schema_version +from mlflow.store.dbmodels.initial_models import Base as InitialBase + + +_logger = logging.getLogger(__name__) + + +class SqlAlchemyStore(AbstractStore): + """ + SQLAlchemy compliant backend store for tracking meta data for MLflow entities. MLflow + supports the database dialects ``mysql``, ``mssql``, ``sqlite``, and ``postgresql``. + As specified in the + `SQLAlchemy docs `_ , + the database URI is expected in the format + ``+://:@:/``. If you do not + specify a driver, SQLAlchemy uses a dialect's default driver. + + This store interacts with SQL store using SQLAlchemy abstractions defined for MLflow entities. + :py:class:`mlflow.store.dbmodels.models.SqlExperiment`, + :py:class:`mlflow.store.dbmodels.models.SqlRun`, + :py:class:`mlflow.store.dbmodels.models.SqlTag`, + :py:class:`mlflow.store.dbmodels.models.SqlMetric`, and + :py:class:`mlflow.store.dbmodels.models.SqlParam`. + + Run artifacts are stored in a separate location using artifact stores conforming to + :py:class:`mlflow.store.artifact_repo.ArtifactRepository`. Default artifact locations for + user experiments are stored in the database along with metadata. Each run artifact location + is recorded in :py:class:`mlflow.store.dbmodels.models.SqlRun` and stored in the backend DB. + """ + ARTIFACTS_FOLDER_NAME = "artifacts" + DEFAULT_EXPERIMENT_ID = "0" + + def __init__(self, db_uri, default_artifact_root): + """ + Create a database backed store. + + :param db_uri: The SQLAlchemy database URI string to connect to the database. See + the `SQLAlchemy docs + `_ + for format specifications. Mlflow supports the dialects ``mysql``, + ``mssql``, ``sqlite``, and ``postgresql``. + :param default_artifact_root: Path/URI to location suitable for large data (such as a blob + store object, DBFS path, or shared NFS file system). + """ + super(SqlAlchemyStore, self).__init__() + self.db_uri = db_uri + self.db_type = extract_db_type_from_uri(db_uri) + self.artifact_root_uri = default_artifact_root + self.engine = sqlalchemy.create_engine(db_uri) + insp = sqlalchemy.inspect(self.engine) + # On a completely fresh MLflow installation against an empty database (verify database + # emptiness by checking that 'experiments' etc aren't in the list of table names), run all + # DB migrations + expected_tables = set([ + SqlExperiment.__tablename__, + SqlRun.__tablename__, + SqlMetric.__tablename__, + SqlParam.__tablename__, + SqlTag.__tablename__ + ]) + if len(expected_tables & set(insp.get_table_names())) == 0: + SqlAlchemyStore._initialize_tables(self.engine) + Base.metadata.bind = self.engine + SessionMaker = sqlalchemy.orm.sessionmaker(bind=self.engine) + self.ManagedSessionMaker = self._get_managed_session_maker(SessionMaker) + SqlAlchemyStore._verify_schema(self.engine) + + if _is_local_uri(default_artifact_root): + mkdir(local_file_uri_to_path(default_artifact_root)) + + if len(self.list_experiments()) == 0: + with self.ManagedSessionMaker() as session: + self._create_default_experiment(session) + + @staticmethod + def _initialize_tables(engine): + _logger.info("Creating initial MLflow database tables...") + InitialBase.metadata.create_all(engine) + engine_url = str(engine.url) + _upgrade_db(engine_url) + + @staticmethod + def _get_latest_schema_revision(): + """Get latest schema revision as a string.""" + # We aren't executing any commands against a DB, so we leave the DB URL unspecified + config = _get_alembic_config(db_url="") + script = ScriptDirectory.from_config(config) + heads = script.get_heads() + if len(heads) != 1: + raise MlflowException("Migration script directory was in unexpected state. Got %s head " + "database versions but expected only 1. Found versions: %s" + % (len(heads), heads)) + return heads[0] + + @staticmethod + def _verify_schema(engine): + head_revision = SqlAlchemyStore._get_latest_schema_revision() + current_rev = _get_schema_version(engine) + if current_rev != head_revision: + raise MlflowException( + "Detected out-of-date database schema (found version %s, but expected %s). " + "Take a backup of your database, then run 'mlflow db upgrade ' " + "to migrate your database to the latest schema. NOTE: schema migration may " + "result in database downtime - please consult your database's documentation for " + "more detail." % (current_rev, head_revision)) + + @staticmethod + def _get_managed_session_maker(SessionMaker): + """ + Creates a factory for producing exception-safe SQLAlchemy sessions that are made available + using a context manager. Any session produced by this factory is automatically committed + if no exceptions are encountered within its associated context. If an exception is + encountered, the session is rolled back. Finally, any session produced by this factory is + automatically closed when the session's associated context is exited. + """ + + @contextmanager + def make_managed_session(): + """Provide a transactional scope around a series of operations.""" + session = SessionMaker() + try: + yield session + session.commit() + except MlflowException: + session.rollback() + raise + except Exception as e: + session.rollback() + raise MlflowException(message=e, error_code=INTERNAL_ERROR) + finally: + session.close() + + return make_managed_session + + def _set_no_auto_for_zero_values(self, session): + if self.db_type == MYSQL: + session.execute("SET @@SESSION.sql_mode='NO_AUTO_VALUE_ON_ZERO';") + + # DB helper methods to allow zero values for columns with auto increments + def _unset_no_auto_for_zero_values(self, session): + if self.db_type == MYSQL: + session.execute("SET @@SESSION.sql_mode='';") + + def _create_default_experiment(self, session): + """ + MLflow UI and client code expects a default experiment with ID 0. + This method uses SQL insert statement to create the default experiment as a hack, since + experiment table uses 'experiment_id' column is a PK and is also set to auto increment. + MySQL and other implementation do not allow value '0' for such cases. + + ToDo: Identify a less hacky mechanism to create default experiment 0 + """ + table = SqlExperiment.__tablename__ + default_experiment = { + SqlExperiment.experiment_id.name: int(SqlAlchemyStore.DEFAULT_EXPERIMENT_ID), + SqlExperiment.name.name: Experiment.DEFAULT_EXPERIMENT_NAME, + SqlExperiment.artifact_location.name: self._get_artifact_location(0), + SqlExperiment.lifecycle_stage.name: LifecycleStage.ACTIVE + } + + def decorate(s): + if isinstance(s, str): + return "'{}'".format(s) + else: + return "{}".format(s) + + # Get a list of keys to ensure we have a deterministic ordering + columns = list(default_experiment.keys()) + values = ", ".join([decorate(default_experiment.get(c)) for c in columns]) + + try: + self._set_no_auto_for_zero_values(session) + session.execute("INSERT INTO {} ({}) VALUES ({});".format( + table, ", ".join(columns), values)) + finally: + self._unset_no_auto_for_zero_values(session) + + def _save_to_db(self, session, objs): + """ + Store in db + """ + if type(objs) is list: + session.add_all(objs) + else: + # single object + session.add(objs) + + def _get_or_create(self, session, model, **kwargs): + instance = session.query(model).filter_by(**kwargs).first() + created = False + + if instance: + return instance, created + else: + instance = model(**kwargs) + self._save_to_db(objs=instance, session=session) + created = True + + return instance, created + + def _get_artifact_location(self, experiment_id): + return posixpath.join(self.artifact_root_uri, str(experiment_id)) + + def create_experiment(self, name, artifact_location=None): + if name is None or name == '': + raise MlflowException('Invalid experiment name', INVALID_PARAMETER_VALUE) + + with self.ManagedSessionMaker() as session: + try: + experiment = SqlExperiment( + name=name, lifecycle_stage=LifecycleStage.ACTIVE, + artifact_location=artifact_location + ) + session.add(experiment) + if not artifact_location: + # this requires a double write. The first one to generate an autoincrement-ed ID + eid = session.query(SqlExperiment).filter_by(name=name).first().experiment_id + experiment.artifact_location = self._get_artifact_location(eid) + except sqlalchemy.exc.IntegrityError as e: + raise MlflowException('Experiment(name={}) already exists. ' + 'Error: {}'.format(name, str(e)), RESOURCE_ALREADY_EXISTS) + + session.flush() + return str(experiment.experiment_id) + + def _list_experiments(self, session, ids=None, names=None, view_type=ViewType.ACTIVE_ONLY): + stages = LifecycleStage.view_type_to_stages(view_type) + conditions = [SqlExperiment.lifecycle_stage.in_(stages)] + + if ids and len(ids) > 0: + int_ids = [int(eid) for eid in ids] + conditions.append(SqlExperiment.experiment_id.in_(int_ids)) + + if names and len(names) > 0: + conditions.append(SqlExperiment.name.in_(names)) + + return session.query(SqlExperiment).filter(*conditions) + + def list_experiments(self, view_type=ViewType.ACTIVE_ONLY): + with self.ManagedSessionMaker() as session: + return [exp.to_mlflow_entity() for exp in + self._list_experiments(session=session, view_type=view_type)] + + def _get_experiment(self, session, experiment_id, view_type): + experiment_id = experiment_id or SqlAlchemyStore.DEFAULT_EXPERIMENT_ID + experiments = self._list_experiments( + session=session, ids=[experiment_id], view_type=view_type).all() + if len(experiments) == 0: + raise MlflowException('No Experiment with id={} exists'.format(experiment_id), + RESOURCE_DOES_NOT_EXIST) + if len(experiments) > 1: + raise MlflowException('Expected only 1 experiment with id={}. Found {}.'.format( + experiment_id, len(experiments)), INVALID_STATE) + + return experiments[0] + + def get_experiment(self, experiment_id): + with self.ManagedSessionMaker() as session: + return self._get_experiment(session, experiment_id, ViewType.ALL).to_mlflow_entity() + + def get_experiment_by_name(self, experiment_name): + """ + Specialized implementation for SQL backed store. + """ + with self.ManagedSessionMaker() as session: + experiments = self._list_experiments( + names=[experiment_name], view_type=ViewType.ALL, session=session).all() + if len(experiments) == 0: + return None + + if len(experiments) > 1: + raise MlflowException('Expected only 1 experiment with name={}. Found {}.'.format( + experiment_name, len(experiments)), INVALID_STATE) + + return experiments[0].to_mlflow_entity() + + def delete_experiment(self, experiment_id): + with self.ManagedSessionMaker() as session: + experiment = self._get_experiment(session, experiment_id, ViewType.ACTIVE_ONLY) + experiment.lifecycle_stage = LifecycleStage.DELETED + self._save_to_db(objs=experiment, session=session) + + def restore_experiment(self, experiment_id): + with self.ManagedSessionMaker() as session: + experiment = self._get_experiment(session, experiment_id, ViewType.DELETED_ONLY) + experiment.lifecycle_stage = LifecycleStage.ACTIVE + self._save_to_db(objs=experiment, session=session) + + def rename_experiment(self, experiment_id, new_name): + with self.ManagedSessionMaker() as session: + experiment = self._get_experiment(session, experiment_id, ViewType.ALL) + if experiment.lifecycle_stage != LifecycleStage.ACTIVE: + raise MlflowException('Cannot rename a non-active experiment.', INVALID_STATE) + + experiment.name = new_name + self._save_to_db(objs=experiment, session=session) + + def create_run(self, experiment_id, user_id, start_time, tags): + with self.ManagedSessionMaker() as session: + experiment = self.get_experiment(experiment_id) + + if experiment.lifecycle_stage != LifecycleStage.ACTIVE: + raise MlflowException('Experiment id={} must be active'.format(experiment_id), + INVALID_STATE) + + run_id = uuid.uuid4().hex + artifact_location = posixpath.join(experiment.artifact_location, run_id, + SqlAlchemyStore.ARTIFACTS_FOLDER_NAME) + run = SqlRun(name="", artifact_uri=artifact_location, run_uuid=run_id, + experiment_id=experiment_id, + source_type=SourceType.to_string(SourceType.UNKNOWN), + source_name="", entry_point_name="", + user_id=user_id, status=RunStatus.to_string(RunStatus.RUNNING), + start_time=start_time, end_time=None, + source_version="", lifecycle_stage=LifecycleStage.ACTIVE) + + tags_dict = {} + for tag in tags: + tags_dict[tag.key] = tag.value + run.tags = [SqlTag(key=key, value=value) for key, value in tags_dict.items()] + self._save_to_db(objs=run, session=session) + + return run.to_mlflow_entity() + + def _get_run(self, session, run_uuid): + runs = session.query(SqlRun).filter(SqlRun.run_uuid == run_uuid).all() + + if len(runs) == 0: + raise MlflowException('Run with id={} not found'.format(run_uuid), + RESOURCE_DOES_NOT_EXIST) + if len(runs) > 1: + raise MlflowException('Expected only 1 run with id={}. Found {}.'.format(run_uuid, + len(runs)), + INVALID_STATE) + + return runs[0] + + def _check_run_is_active(self, run): + if run.lifecycle_stage != LifecycleStage.ACTIVE: + raise MlflowException("The run {} must be in 'active' state. Current state is {}." + .format(run.run_uuid, run.lifecycle_stage), + INVALID_PARAMETER_VALUE) + + def _check_run_is_deleted(self, run): + if run.lifecycle_stage != LifecycleStage.DELETED: + raise MlflowException("The run {} must be in 'deleted' state. Current state is {}." + .format(run.run_uuid, run.lifecycle_stage), + INVALID_PARAMETER_VALUE) + + def update_run_info(self, run_id, run_status, end_time): + with self.ManagedSessionMaker() as session: + run = self._get_run(run_uuid=run_id, session=session) + self._check_run_is_active(run) + run.status = RunStatus.to_string(run_status) + run.end_time = end_time + + self._save_to_db(objs=run, session=session) + run = run.to_mlflow_entity() + + return run.info + + def get_run(self, run_id): + with self.ManagedSessionMaker() as session: + run = self._get_run(run_uuid=run_id, session=session) + return run.to_mlflow_entity() + + def restore_run(self, run_id): + with self.ManagedSessionMaker() as session: + run = self._get_run(run_uuid=run_id, session=session) + self._check_run_is_deleted(run) + run.lifecycle_stage = LifecycleStage.ACTIVE + self._save_to_db(objs=run, session=session) + + def delete_run(self, run_id): + with self.ManagedSessionMaker() as session: + run = self._get_run(run_uuid=run_id, session=session) + self._check_run_is_active(run) + run.lifecycle_stage = LifecycleStage.DELETED + self._save_to_db(objs=run, session=session) + + def log_metric(self, run_id, metric): + _validate_metric(metric.key, metric.value, metric.timestamp, metric.step) + with self.ManagedSessionMaker() as session: + run = self._get_run(run_uuid=run_id, session=session) + self._check_run_is_active(run) + # ToDo: Consider prior checks for null, type, metric name validations, ... etc. + self._get_or_create(model=SqlMetric, run_uuid=run_id, key=metric.key, + value=metric.value, timestamp=metric.timestamp, step=metric.step, + session=session) + + def get_metric_history(self, run_id, metric_key): + with self.ManagedSessionMaker() as session: + metrics = session.query(SqlMetric).filter_by(run_uuid=run_id, key=metric_key).all() + return [metric.to_mlflow_entity() for metric in metrics] + + def log_param(self, run_id, param): + with self.ManagedSessionMaker() as session: + run = self._get_run(run_uuid=run_id, session=session) + self._check_run_is_active(run) + # if we try to update the value of an existing param this will fail + # because it will try to create it with same run_uuid, param key + try: + # This will check for various integrity checks for params table. + # ToDo: Consider prior checks for null, type, param name validations, ... etc. + self._get_or_create(model=SqlParam, session=session, run_uuid=run_id, + key=param.key, value=param.value) + # Explicitly commit the session in order to catch potential integrity errors + # while maintaining the current managed session scope ("commit" checks that + # a transaction satisfies uniqueness constraints and throws integrity errors + # when they are violated; "get_or_create()" does not perform these checks). It is + # important that we maintain the same session scope because, in the case of + # an integrity error, we want to examine the uniqueness of parameter values using + # the same database state that the session uses during "commit". Creating a new + # session synchronizes the state with the database. As a result, if the conflicting + # parameter value were to be removed prior to the creation of a new session, + # we would be unable to determine the cause of failure for the first session's + # "commit" operation. + session.commit() + except sqlalchemy.exc.IntegrityError: + # Roll back the current session to make it usable for further transactions. In the + # event of an error during "commit", a rollback is required in order to continue + # using the session. In this case, we re-use the session because the SqlRun, `run`, + # is lazily evaluated during the invocation of `run.params`. + session.rollback() + existing_params = [p.value for p in run.params if p.key == param.key] + if len(existing_params) > 0: + old_value = existing_params[0] + raise MlflowException( + "Changing param value is not allowed. Param with key='{}' was already" + " logged with value='{}' for run ID='{}. Attempted logging new value" + " '{}'.".format( + param.key, old_value, run_id, param.value), INVALID_PARAMETER_VALUE) + else: + raise + + def set_tag(self, run_id, tag): + with self.ManagedSessionMaker() as session: + run = self._get_run(run_uuid=run_id, session=session) + self._check_run_is_active(run) + session.merge(SqlTag(run_uuid=run_id, key=tag.key, value=tag.value)) + + def delete_tag(self, run_id, key): + """ + Delete a tag from a run. This is irreversible. + :param run_id: String ID of the run + :param key: Name of the tag + """ + with self.ManagedSessionMaker() as session: + run = self._get_run(run_uuid=run_id, session=session) + self._check_run_is_active(run) + filtered_tags = session.query(SqlTag).filter_by(run_uuid=run_id, key=key).all() + if len(filtered_tags) == 0: + raise MlflowException( + "No tag with name: {} in run with id {}".format(key, run_id), + error_code=RESOURCE_DOES_NOT_EXIST) + elif len(filtered_tags) > 1: + raise MlflowException( + "Bad data in database - tags for a specific run must have " + "a single unique value." + "See https://mlflow.org/docs/latest/tracking.html#adding-tags-to-runs", + error_code=INVALID_STATE) + session.delete(filtered_tags[0]) + + def _search_runs(self, experiment_ids, filter_string, run_view_type, max_results, order_by, + page_token): + # TODO: push search query into backend database layer + if max_results > SEARCH_MAX_RESULTS_THRESHOLD: + raise MlflowException("Invalid value for request parameter max_results. It must be at " + "most {}, but got value {}".format(SEARCH_MAX_RESULTS_THRESHOLD, + max_results), + INVALID_PARAMETER_VALUE) + with self.ManagedSessionMaker() as session: + runs = [run.to_mlflow_entity() + for exp in experiment_ids + for run in self._list_runs(session, exp, run_view_type)] + filtered = SearchUtils.filter(runs, filter_string) + sorted_runs = SearchUtils.sort(filtered, order_by) + runs, next_page_token = SearchUtils.paginate(sorted_runs, page_token, max_results) + return runs, next_page_token + + def _list_runs(self, session, experiment_id, run_view_type): + exp = self._list_experiments( + ids=[experiment_id], view_type=ViewType.ALL, session=session).first() + stages = set(LifecycleStage.view_type_to_stages(run_view_type)) + return [run for run in exp.runs if run.lifecycle_stage in stages] + + def log_batch(self, run_id, metrics, params, tags): + _validate_run_id(run_id) + _validate_batch_log_data(metrics, params, tags) + _validate_batch_log_limits(metrics, params, tags) + with self.ManagedSessionMaker() as session: + run = self._get_run(run_uuid=run_id, session=session) + self._check_run_is_active(run) + try: + for param in params: + self.log_param(run_id, param) + for metric in metrics: + self.log_metric(run_id, metric) + for tag in tags: + self.set_tag(run_id, tag) + except MlflowException as e: + raise e + except Exception as e: + raise MlflowException(e, INTERNAL_ERROR) diff --git a/mlflow/temporary_db_migrations_for_pre_1_users/README b/mlflow/temporary_db_migrations_for_pre_1_users/README new file mode 100644 index 0000000000000..b4e657a2da63d --- /dev/null +++ b/mlflow/temporary_db_migrations_for_pre_1_users/README @@ -0,0 +1,4 @@ +This directory contains configuration scripts and database migration logic for upgrading +MLflow tracking databases created before MLflow 1.0 to an MLflow-1.0-compatible schema. +To run database migrations, use the `mlflow db upgrade` CLI command. Note that this directory will +be removed in MLflow 1.1. diff --git a/mlflow/temporary_db_migrations_for_pre_1_users/__init__.py b/mlflow/temporary_db_migrations_for_pre_1_users/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini b/mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini new file mode 100644 index 0000000000000..4cd5bd3bdf17b --- /dev/null +++ b/mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini @@ -0,0 +1,74 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +script_location = alembic + +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# timezone to use when rendering the date +# within the migration file as well as the filename. +# string value is passed to dateutil.tz.gettz() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the +# "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; this defaults +# to alembic/versions. When using multiple version +# directories, initial revisions must be specified with --version-path +# version_locations = %(here)s/bar %(here)s/bat alembic/versions + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +sqlalchemy.url = "" + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/mlflow/temporary_db_migrations_for_pre_1_users/env.py b/mlflow/temporary_db_migrations_for_pre_1_users/env.py new file mode 100644 index 0000000000000..c7174ed20064b --- /dev/null +++ b/mlflow/temporary_db_migrations_for_pre_1_users/env.py @@ -0,0 +1,77 @@ + +from logging.config import fileConfig + +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +from mlflow.store.dbmodels.models import Base +target_metadata = Base.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + # Try https://stackoverflow.com/questions/30378233/sqlite-lack-of-alter-support-alembic-migration-failing-because-of-this-solutio + context.configure( + url=url, target_metadata=target_metadata, literal_binds=True, render_as_batch=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata, render_as_batch=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/mlflow/temporary_db_migrations_for_pre_1_users/script.py.mako b/mlflow/temporary_db_migrations_for_pre_1_users/script.py.mako new file mode 100644 index 0000000000000..2c0156303a8df --- /dev/null +++ b/mlflow/temporary_db_migrations_for_pre_1_users/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/mlflow/temporary_db_migrations_for_pre_1_users/versions/__init__.py b/mlflow/temporary_db_migrations_for_pre_1_users/versions/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/mlflow/temporary_db_migrations_for_pre_1_users/versions/ff01da956556_ensure_unique_constraint_names.py b/mlflow/temporary_db_migrations_for_pre_1_users/versions/ff01da956556_ensure_unique_constraint_names.py new file mode 100644 index 0000000000000..8d5567c9cc7fb --- /dev/null +++ b/mlflow/temporary_db_migrations_for_pre_1_users/versions/ff01da956556_ensure_unique_constraint_names.py @@ -0,0 +1,191 @@ +"""ensure_unique_constraint_names + +Revision ID: ff01da956556 +Revises: +Create Date: 2019-05-18 22:58:06.487489 + +""" +import time + +from alembic import op +from sqlalchemy import column, CheckConstraint +from sqlalchemy.orm import relationship, backref +from sqlalchemy import ( + Column, String, Float, ForeignKey, Integer, CheckConstraint, + BigInteger, PrimaryKeyConstraint) +from sqlalchemy.ext.declarative import declarative_base + +# revision identifiers, used by Alembic. +revision = 'ff01da956556' +down_revision = None +branch_labels = None +depends_on = None + +# Inline initial runs and experiment table schema for use in migration logic +# Copied from https://github.com/mlflow/mlflow/blob/v0.9.1/mlflow/store/dbmodels/models.py, with +# modifications to substitute constants from MLflow with hard-coded values (e.g. replacing +# SourceType.to_string(SourceType.NOTEBOOK) with the constant "NOTEBOOK"). +Base = declarative_base() + + +SourceTypes = [ + "NOTEBOOK", + "JOB", + "LOCAL", + "UNKNOWN", + "PROJECT", +] + +RunStatusTypes = [ + "SCHEDULED", + "FAILED", + "FINISHED", + "RUNNING", +] + + +class SqlExperiment(Base): + """ + DB model for :py:class:`mlflow.entities.Experiment`. These are recorded in ``experiment`` table. + """ + __tablename__ = 'experiments' + + experiment_id = Column(Integer, autoincrement=True) + """ + Experiment ID: `Integer`. *Primary Key* for ``experiment`` table. + """ + name = Column(String(256), unique=True, nullable=False) + """ + Experiment name: `String` (limit 256 characters). Defined as *Unique* and *Non null* in + table schema. + """ + artifact_location = Column(String(256), nullable=True) + """ + Default artifact location for this experiment: `String` (limit 256 characters). Defined as + *Non null* in table schema. + """ + lifecycle_stage = Column(String(32), default="active") + """ + Lifecycle Stage of experiment: `String` (limit 32 characters). + Can be either ``active`` (default) or ``deleted``. + """ + + __table_args__ = ( + CheckConstraint( + lifecycle_stage.in_(["active", "deleted"]), + name='lifecycle_stage'), + PrimaryKeyConstraint('experiment_id', name='experiment_pk') + ) + + def __repr__(self): + return ''.format(self.experiment_id, self.name) + + +class SqlRun(Base): + """ + DB model for :py:class:`mlflow.entities.Run`. These are recorded in ``runs`` table. + """ + __tablename__ = 'runs' + + run_uuid = Column(String(32), nullable=False) + """ + Run UUID: `String` (limit 32 characters). *Primary Key* for ``runs`` table. + """ + name = Column(String(250)) + """ + Run name: `String` (limit 250 characters). + """ + source_type = Column(String(20), default="LOCAL") + """ + Source Type: `String` (limit 20 characters). Can be one of ``NOTEBOOK``, ``JOB``, ``PROJECT``, + ``LOCAL`` (default), or ``UNKNOWN``. + """ + source_name = Column(String(500)) + """ + Name of source recording the run: `String` (limit 500 characters). + """ + entry_point_name = Column(String(50)) + """ + Entry-point name that launched the run run: `String` (limit 50 characters). + """ + user_id = Column(String(256), nullable=True, default=None) + """ + User ID: `String` (limit 256 characters). Defaults to ``null``. + """ + status = Column(String(20), default="SCHEDULED") + """ + Run Status: `String` (limit 20 characters). Can be one of ``RUNNING``, ``SCHEDULED`` (default), + ``FINISHED``, ``FAILED``. + """ + start_time = Column(BigInteger, default=int(time.time())) + """ + Run start time: `BigInteger`. Defaults to current system time. + """ + end_time = Column(BigInteger, nullable=True, default=None) + """ + Run end time: `BigInteger`. + """ + source_version = Column(String(50)) + """ + Source version: `String` (limit 50 characters). + """ + lifecycle_stage = Column(String(20), default="active") + """ + Lifecycle Stage of run: `String` (limit 32 characters). + Can be either ``active`` (default) or ``deleted``. + """ + artifact_uri = Column(String(200), default=None) + """ + Default artifact location for this run: `String` (limit 200 characters). + """ + experiment_id = Column(Integer, ForeignKey('experiments.experiment_id')) + """ + Experiment ID to which this run belongs to: *Foreign Key* into ``experiment`` table. + """ + experiment = relationship('SqlExperiment', backref=backref('runs', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlExperiment`. + """ + + __table_args__ = ( + CheckConstraint(source_type.in_(SourceTypes), name='source_type'), + CheckConstraint(status.in_(RunStatusTypes), name='status'), + CheckConstraint(lifecycle_stage.in_(["active", "deleted"]), + name='lifecycle_stage'), + PrimaryKeyConstraint('run_uuid', name='run_pk') + ) + + +def upgrade(): + # Use batch mode so that we can run "ALTER TABLE" statements against SQLite + # databases (see more info at https://alembic.sqlalchemy.org/en/latest/ + # batch.html#running-batch-migrations-for-sqlite-and-other-databases). + # Also, we directly pass the schema of the table we're modifying to circumvent shortcomings + # in Alembic's ability to reflect CHECK constraints, as described in + # https://alembic.sqlalchemy.org/en/latest/batch.html#working-in-offline-mode + bind = op.get_bind() + with op.batch_alter_table("experiments", copy_from=SqlExperiment.__table__) as batch_op: + # We skip running drop_constraint for mysql, because it creates an invalid statement + # in alembic<=1.0.10 + if bind.engine.name != 'mysql': + batch_op.drop_constraint(constraint_name='lifecycle_stage', type_="check") + batch_op.create_check_constraint( + constraint_name="experiments_lifecycle_stage", + condition=column('lifecycle_stage').in_(["active", "deleted"]) + ) + with op.batch_alter_table("runs", copy_from=SqlRun.__table__) as batch_op: + # We skip running drop_constraint for mysql, because it creates an invalid statement + # in alembic<=1.0.10 + if bind.engine.name != 'mysql': + batch_op.drop_constraint(constraint_name='lifecycle_stage', type_="check") + batch_op.create_check_constraint( + constraint_name="runs_lifecycle_stage", + condition=column('lifecycle_stage').in_(["active", "deleted"]) + ) + + +def downgrade(): + # Omit downgrade logic for now - we don't currently provide users a command/API for + # reverting a database migration, instead recommending that they take a database backup + # before running the migration. + pass diff --git a/mlflow/tensorflow.py b/mlflow/tensorflow.py index 1a7ed1f98a2cd..a6a38f5ee01b8 100644 --- a/mlflow/tensorflow.py +++ b/mlflow/tensorflow.py @@ -1,88 +1,583 @@ """ -The ``mlflow.tensorflow`` module provides an API for logging and loading TensorFlow models -as :py:mod:`mlflow.pyfunc` models. +The ``mlflow.tensorflow`` module provides an API for logging and loading TensorFlow models. +This module exports TensorFlow models with the following flavors: -You must save your own ``saved_model`` and pass its -path to ``log_saved_model(saved_model_dir)``. To load the model to predict on it, you call -``model = pyfunc.load_pyfunc(saved_model_dir)`` followed by -``prediction = model.predict(pandas DataFrame)`` to obtain a prediction in a pandas DataFrame. - -The loaded :py:mod:`mlflow.pyfunc` model *does not* expose any APIs for model training. +TensorFlow (native) format + This is the main flavor that can be loaded back into TensorFlow. +:py:mod:`mlflow.pyfunc` + Produced for use by generic pyfunc-based deployment tools and batch inference. """ from __future__ import absolute_import import os +import shutil +import yaml +import logging +import gorilla +import concurrent.futures +import warnings +import atexit +import time +import tempfile import pandas -import tensorflow as tf +import mlflow +import tensorflow +import mlflow.keras +from tensorflow.keras.callbacks import Callback, TensorBoard # pylint: disable=import-error from mlflow import pyfunc +from mlflow.exceptions import MlflowException from mlflow.models import Model -from mlflow.tracking.fluent import _get_or_start_run, log_artifacts +from mlflow.protos.databricks_pb2 import DIRECTORY_NOT_EMPTY +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils import keyword_only, experimental +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.file_utils import _copy_file_or_tree +from mlflow.utils.model_utils import _get_flavor_configuration +from mlflow.entities import Metric + + +FLAVOR_NAME = "tensorflow" + +_logger = logging.getLogger(__name__) + +_MAX_METRIC_QUEUE_SIZE = 500 + +_LOG_EVERY_N_STEPS = 100 + +_metric_queue = [] + +_thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=1) + + +def get_default_conda_env(): + """ + :return: The default Conda environment for MLflow Models produced by calls to + :func:`save_model()` and :func:`log_model()`. + """ + return _mlflow_conda_env( + additional_conda_deps=[ + "tensorflow={}".format(tensorflow.__version__), + ], + additional_pip_deps=None, + additional_conda_channels=None) + + +@keyword_only +def log_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key, artifact_path, + conda_env=None): + """ + Log a *serialized* collection of TensorFlow graphs and variables as an MLflow model + for the current run. This method operates on TensorFlow variables and graphs that have been + serialized in TensorFlow's ``SavedModel`` format. For more information about ``SavedModel`` + format, see the TensorFlow documentation: + https://www.tensorflow.org/guide/saved_model#save_and_restore_models. + + :param tf_saved_model_dir: Path to the directory containing serialized TensorFlow variables and + graphs in ``SavedModel`` format. + :param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the + serialized ``SavedModel`` object. For more information, see the + ``tags`` parameter of the + ``tf.saved_model.builder.SavedModelBuilder`` method. + :param tf_signature_def_key: A string identifying the input/output signature associated with the + model. This is a key within the serialized ``SavedModel`` signature + definition mapping. For more information, see the + ``signature_def_map`` parameter of the + ``tf.saved_model.builder.SavedModelBuilder`` method. + :param artifact_path: The run-relative path to which to log model artifacts. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If ``None``, the default + :func:`get_default_conda_env()` environment is added to the model. The + following is an *example* dictionary representation of a Conda environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'tensorflow=1.8.0' + ] + } + + """ + return Model.log(artifact_path=artifact_path, flavor=mlflow.tensorflow, + tf_saved_model_dir=tf_saved_model_dir, tf_meta_graph_tags=tf_meta_graph_tags, + tf_signature_def_key=tf_signature_def_key, conda_env=conda_env) + + +@keyword_only +def save_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key, path, + mlflow_model=Model(), conda_env=None): + """ + Save a *serialized* collection of TensorFlow graphs and variables as an MLflow model + to a local path. This method operates on TensorFlow variables and graphs that have been + serialized in TensorFlow's ``SavedModel`` format. For more information about ``SavedModel`` + format, see the TensorFlow documentation: + https://www.tensorflow.org/guide/saved_model#save_and_restore_models. + + :param tf_saved_model_dir: Path to the directory containing serialized TensorFlow variables and + graphs in ``SavedModel`` format. + :param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the + serialized ``SavedModel`` object. For more information, see the + ``tags`` parameter of the + ``tf.saved_model.builder.savedmodelbuilder`` method. + :param tf_signature_def_key: A string identifying the input/output signature associated with the + model. This is a key within the serialized ``savedmodel`` + signature definition mapping. For more information, see the + ``signature_def_map`` parameter of the + ``tf.saved_model.builder.savedmodelbuilder`` method. + :param path: Local path where the MLflow model is to be saved. + :param mlflow_model: MLflow model configuration to which to add the ``tensorflow`` flavor. + :param conda_env: Either a dictionary representation of a Conda environment or the path to a + Conda environment yaml file. If provided, this decribes the environment + this model should be run in. At minimum, it should specify the dependencies + contained in :func:`get_default_conda_env()`. If ``None``, the default + :func:`get_default_conda_env()` environment is added to the model. The + following is an *example* dictionary representation of a Conda environment:: + + { + 'name': 'mlflow-env', + 'channels': ['defaults'], + 'dependencies': [ + 'python=3.7.0', + 'tensorflow=1.8.0' + ] + } + + """ + _logger.info( + "Validating the specified TensorFlow model by attempting to load it in a new TensorFlow" + " graph...") + _validate_saved_model(tf_saved_model_dir=tf_saved_model_dir, + tf_meta_graph_tags=tf_meta_graph_tags, + tf_signature_def_key=tf_signature_def_key) + _logger.info("Validation succeeded!") + + if os.path.exists(path): + raise MlflowException("Path '{}' already exists".format(path), DIRECTORY_NOT_EMPTY) + os.makedirs(path) + root_relative_path = _copy_file_or_tree(src=tf_saved_model_dir, dst=path, dst_dir=None) + model_dir_subpath = "tfmodel" + shutil.move(os.path.join(path, root_relative_path), os.path.join(path, model_dir_subpath)) + + conda_env_subpath = "conda.yaml" + if conda_env is None: + conda_env = get_default_conda_env() + elif not isinstance(conda_env, dict): + with open(conda_env, "r") as f: + conda_env = yaml.safe_load(f) + with open(os.path.join(path, conda_env_subpath), "w") as f: + yaml.safe_dump(conda_env, stream=f, default_flow_style=False) + + mlflow_model.add_flavor(FLAVOR_NAME, saved_model_dir=model_dir_subpath, + meta_graph_tags=tf_meta_graph_tags, + signature_def_key=tf_signature_def_key) + pyfunc.add_to_model(mlflow_model, loader_module="mlflow.tensorflow", env=conda_env_subpath) + mlflow_model.save(os.path.join(path, "MLmodel")) + + +def _validate_saved_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key): + """ + Validate the TensorFlow SavedModel by attempting to load it in a new TensorFlow graph. + If the loading process fails, any exceptions thrown by TensorFlow are propagated. + """ + validation_tf_graph = tensorflow.Graph() + validation_tf_sess = tensorflow.Session(graph=validation_tf_graph) + with validation_tf_graph.as_default(): + _load_tensorflow_saved_model(tf_saved_model_dir=tf_saved_model_dir, + tf_sess=validation_tf_sess, + tf_meta_graph_tags=tf_meta_graph_tags, + tf_signature_def_key=tf_signature_def_key) + + +def load_model(model_uri, tf_sess): + """ + Load an MLflow model that contains the TensorFlow flavor from the specified path. + + *This method must be called within a TensorFlow graph context.* + + :param model_uri: The location, in URI format, of the MLflow model. For example: + + - ``/Users/me/path/to/local/model`` + - ``relative/path/to/local/model`` + - ``s3://my_bucket/path/to/model`` + - ``runs://run-relative/path/to/model`` + + For more information about supported URI schemes, see + `Referencing Artifacts `_. + + :param tf_sess: The TensorFlow session in which to load the model. + :return: A TensorFlow signature definition of type: + ``tensorflow.core.protobuf.meta_graph_pb2.SignatureDef``. This defines the input and + output tensors for model inference. + + >>> import mlflow.tensorflow + >>> import tensorflow as tf + >>> tf_graph = tf.Graph() + >>> tf_sess = tf.Session(graph=tf_graph) + >>> with tf_graph.as_default(): + >>> signature_definition = mlflow.tensorflow.load_model(model_uri="model_uri", + >>> tf_sess=tf_sess) + >>> input_tensors = [tf_graph.get_tensor_by_name(input_signature.name) + >>> for _, input_signature in signature_def.inputs.items()] + >>> output_tensors = [tf_graph.get_tensor_by_name(output_signature.name) + >>> for _, output_signature in signature_def.outputs.items()] + """ + local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) + tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key =\ + _get_and_parse_flavor_configuration(model_path=local_model_path) + return _load_tensorflow_saved_model(tf_saved_model_dir=tf_saved_model_dir, tf_sess=tf_sess, + tf_meta_graph_tags=tf_meta_graph_tags, + tf_signature_def_key=tf_signature_def_key) + + +def _load_tensorflow_saved_model(tf_saved_model_dir, tf_sess, tf_meta_graph_tags, + tf_signature_def_key): + """ + Load a specified TensorFlow model consisting of a TensorFlow metagraph and signature definition + from a serialized TensorFlow ``SavedModel`` collection. + + :param tf_saved_model_dir: The local filesystem path or run-relative artifact path to the model. + :param tf_sess: The TensorFlow session in which to load the metagraph. + :param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the + serialized ``SavedModel`` object. For more information, see the + ``tags`` parameter of the `tf.saved_model.builder.SavedModelBuilder + method `_. + :param tf_signature_def_key: A string identifying the input/output signature associated with the + model. This is a key within the serialized ``SavedModel``'s + signature definition mapping. For more information, see the + ``signature_def_map`` parameter of the + ``tf.saved_model.builder.SavedModelBuilder`` method. + :return: A TensorFlow signature definition of type: + ``tensorflow.core.protobuf.meta_graph_pb2.SignatureDef``. This defines input and + output tensors within the specified metagraph for inference. + """ + meta_graph_def = tensorflow.saved_model.loader.load( + sess=tf_sess, + tags=tf_meta_graph_tags, + export_dir=tf_saved_model_dir) + if tf_signature_def_key not in meta_graph_def.signature_def: + raise MlflowException("Could not find signature def key %s" % tf_signature_def_key) + return meta_graph_def.signature_def[tf_signature_def_key] + + +def _get_and_parse_flavor_configuration(model_path): + """ + :param path: Local filesystem path to the MLflow Model with the ``tensorflow`` flavor. + :return: A triple containing the following elements: + + - ``tf_saved_model_dir``: The local filesystem path to the underlying TensorFlow + SavedModel directory. + - ``tf_meta_graph_tags``: A list of tags identifying the TensorFlow model's metagraph + within the serialized ``SavedModel`` object. + - ``tf_signature_def_key``: A string identifying the input/output signature associated + with the model. This is a key within the serialized + ``SavedModel``'s signature definition mapping. + """ + flavor_conf = _get_flavor_configuration(model_path=model_path, flavor_name=FLAVOR_NAME) + tf_saved_model_dir = os.path.join(model_path, flavor_conf['saved_model_dir']) + tf_meta_graph_tags = flavor_conf['meta_graph_tags'] + tf_signature_def_key = flavor_conf['signature_def_key'] + return tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key + + +def _load_pyfunc(path): + """ + Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. This function loads an MLflow + model with the TensorFlow flavor into a new TensorFlow graph and exposes it behind the + ``pyfunc.predict`` interface. + + :param path: Local filesystem path to the MLflow Model with the ``tensorflow`` flavor. + """ + tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key =\ + _get_and_parse_flavor_configuration(model_path=path) + + tf_graph = tensorflow.Graph() + tf_sess = tensorflow.Session(graph=tf_graph) + with tf_graph.as_default(): + signature_def = _load_tensorflow_saved_model( + tf_saved_model_dir=tf_saved_model_dir, tf_sess=tf_sess, + tf_meta_graph_tags=tf_meta_graph_tags, tf_signature_def_key=tf_signature_def_key) + + return _TFWrapper(tf_sess=tf_sess, tf_graph=tf_graph, signature_def=signature_def) class _TFWrapper(object): """ - Wrapper class that creates a predict function such that - predict(data: pandas.DataFrame) -> pandas.DataFrame + Wrapper class that exposes a TensorFlow model for inference via a ``predict`` function such that + ``predict(data: pandas.DataFrame) -> pandas.DataFrame``. """ - def __init__(self, saved_model_dir): - model = Model.load(os.path.join(saved_model_dir, "MLmodel")) - assert "tensorflow" in model.flavors - if "signature_def_key" not in model.flavors["tensorflow"]: - self._signature_def_key = tf.saved_model.signature_constants \ - .DEFAULT_SERVING_SIGNATURE_DEF_KEY - else: - self._signature_def_key = model.flavors["tensorflow"]["signature_def_key"] - self._saved_model_dir = model.flavors["tensorflow"]["saved_model_dir"] + def __init__(self, tf_sess, tf_graph, signature_def): + """ + :param tf_sess: The TensorFlow session used to evaluate the model. + :param tf_graph: The TensorFlow graph containing the model. + :param signature_def: The TensorFlow signature definition used to transform input dataframes + into tensors and output vectors into dataframes. + """ + self.tf_sess = tf_sess + self.tf_graph = tf_graph + # We assume that input keys in the signature definition correspond to input DataFrame column + # names + self.input_tensor_mapping = { + tensor_column_name: tf_graph.get_tensor_by_name(tensor_info.name) + for tensor_column_name, tensor_info in signature_def.inputs.items() + } + # We assume that output keys in the signature definition correspond to output DataFrame + # column names + self.output_tensors = { + sigdef_output: tf_graph.get_tensor_by_name(tnsr_info.name) + for sigdef_output, tnsr_info in signature_def.outputs.items() + } def predict(self, df): - graph = tf.Graph() - with tf.Session(graph=graph) as sess: - meta_graph_def = tf.saved_model.loader.load(sess, - [tf.saved_model.tag_constants.SERVING], - self._saved_model_dir) - sig_def = tf.contrib.saved_model.get_signature_def_by_key(meta_graph_def, - self._signature_def_key) - - # Determining output tensors. - fetch_mapping = {sigdef_output: graph.get_tensor_by_name(tnsr_info.name) - for sigdef_output, tnsr_info in sig_def.outputs.items()} - + with self.tf_graph.as_default(): # Build the feed dict, mapping input tensors to DataFrame column values. - # We assume that input arguments to the signature def correspond to DataFrame column - # names - feed_dict = {graph.get_tensor_by_name(tnsr_info.name): df[sigdef_input].values - for sigdef_input, tnsr_info in sig_def.inputs.items()} - raw_preds = sess.run(fetch_mapping, feed_dict=feed_dict) - pred_dict = {fetch_name: list(values) for fetch_name, values in raw_preds.items()} + feed_dict = { + self.input_tensor_mapping[tensor_column_name]: df[tensor_column_name].values + for tensor_column_name in self.input_tensor_mapping.keys() + } + raw_preds = self.tf_sess.run(self.output_tensors, feed_dict=feed_dict) + pred_dict = {column_name: values.ravel() for column_name, values in raw_preds.items()} return pandas.DataFrame(data=pred_dict) -def log_saved_model(saved_model_dir, signature_def_key, artifact_path): +class __MLflowTfKerasCallback(Callback): + """ + Callback for auto-logging parameters (we rely on TensorBoard for metrics). + Records model structural information as params after training finishes. + """ + def __init__(self): + if mlflow.active_run() is None: + mlflow.start_run() + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + def on_epoch_end(self, epoch, logs=None): + pass + + def on_train_end(self, logs=None): # pylint: disable=unused-argument + opt = self.model.optimizer + if hasattr(opt, 'optimizer'): + opt = opt.optimizer + mlflow.log_param('optimizer_name', type(opt).__name__) + if hasattr(opt, '_lr'): + lr = opt._lr if type(opt._lr) is float else tensorflow.keras.backend.eval(opt._lr) + mlflow.log_param('learning_rate', lr) + if hasattr(opt, '_epsilon'): + epsilon = opt._epsilon if type(opt._epsilon) is float \ + else tensorflow.keras.backend.eval(opt._epsilon) + mlflow.log_param('epsilon', epsilon) + l = [] + self.model.summary(print_fn=l.append) + summary = '\n'.join(l) + mlflow.set_tag('summary', summary) + mlflow.keras.log_model(self.model, artifact_path='model') + + +def _log_artifacts_with_warning(**kwargs): + try: + mlflow.log_artifacts(**kwargs) + except MlflowException as e: + warnings.warn("Logging to MLflow failed: " + str(e)) + + +def _assoc_list_to_map(lst): + """ + Convert an association list to a dictionary. + """ + d = {} + for run_id, metric in lst: + d[run_id] = d[run_id] + [metric] if run_id in d else [metric] + return d + + +def _flush_queue(): + """ + Flush the metric queue and log contents in batches to MLflow. + Queue is divided into batches according to run id. + """ + global _metric_queue + try: + client = mlflow.tracking.MlflowClient() + dic = _assoc_list_to_map(_metric_queue) + for key in dic: + client.log_batch(key, metrics=dic[key], params=[], tags=[]) + except MlflowException as e: + warnings.warn("Logging to MLflow failed: " + str(e)) + finally: + _metric_queue = [] + + +atexit.register(_flush_queue) + + +def _add_to_queue(key, value, step, time, run_id): + """ + Add a metric to the metric queue. Flush the queue if it exceeds + max size. """ - Log a TensorFlow model as an MLflow artifact for the current run. + met = Metric(key=key, value=value, timestamp=time, step=step) + _metric_queue.append((run_id, met)) + if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE: + _flush_queue() - :param saved_model_dir: Directory where the TensorFlow model is saved. - :param signature_def_key: The signature definition to use when loading the model again. - See `SignatureDefs in SavedModel for TensorFlow Serving - `_ for details. - :param artifact_path: Path (within the artifact directory for the current run) to which - artifacts of the model are saved. + +def _log_event(event): + """ + Extracts metric information from the event protobuf """ - run_id = _get_or_start_run().info.run_uuid - mlflow_model = Model(artifact_path=artifact_path, run_id=run_id) - pyfunc.add_to_model(mlflow_model, loader_module="mlflow.tensorflow") - mlflow_model.add_flavor("tensorflow", - saved_model_dir=saved_model_dir, - signature_def_key=signature_def_key) - mlflow_model.save(os.path.join(saved_model_dir, "MLmodel")) - log_artifacts(saved_model_dir, artifact_path) + if mlflow.active_run() is None: + mlflow.start_run() + if event.WhichOneof('what') == 'summary': + summary = event.summary + for v in summary.value: + if v.HasField('simple_value'): + if (event.step-1) % _LOG_EVERY_N_STEPS == 0: + _thread_pool.submit(_add_to_queue, key=v.tag, + value=v.simple_value, step=event.step, + time=int(time.time())*1000, + run_id=mlflow.active_run().info.run_id) + +def _get_tensorboard_callback(lst): + for x in lst: + if isinstance(x, tensorflow.keras.callbacks.TensorBoard): + return x + return None -def _load_pyfunc(saved_model_dir): + +def _setup_callbacks(lst): + """ + Adds TensorBoard and MlfLowTfKeras callbacks to the + input list, and returns the new list and appropriate log directory. + """ + tb = _get_tensorboard_callback(lst) + if tb is None: + log_dir = tempfile.mkdtemp() + l = lst + [TensorBoard(log_dir)] + else: + log_dir = tb.log_dir + l = lst + l += [__MLflowTfKerasCallback()] + return l, log_dir + + +@experimental +def autolog(metrics_every_n_steps=100): + # pylint: disable=E0611 """ - Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. + Enable automatic logging from TensorFlow to MLflow. If applicable, + model checkpoints are logged as artifacts to a 'models' directory, along + with any TensorBoard log data. + + Refer to the tracking documentation for + information on what is logged with different TensorFlow workflows. + + :param metrics_every_n_steps: The frequency with which metrics should be logged. + Defaults to 100. Ex: a value of 100 will log metrics + at step 0, 100, 200, etc. + """ - return _TFWrapper(saved_model_dir) + global _LOG_EVERY_N_STEPS + _LOG_EVERY_N_STEPS = metrics_every_n_steps + + from distutils.version import StrictVersion + + if StrictVersion(tensorflow.__version__) < StrictVersion('1.12') \ + or StrictVersion(tensorflow.__version__) >= StrictVersion('2.0'): + warnings.warn("Could not log to MLflow. Only TensorFlow versions" + + "1.12 <= v < 2.0.0 are supported.") + return + + try: + from tensorflow.python.summary.writer.event_file_writer import EventFileWriter + from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2 + from tensorflow.python.saved_model import tag_constants + from tensorflow.python.summary.writer.writer import FileWriter + except ImportError: + warnings.warn("Could not log to MLflow. Only TensorFlow versions" + + "1.12 <= v < 2.0.0 are supported.") + return + + @gorilla.patch(tensorflow.estimator.Estimator) + def export_saved_model(self, *args, **kwargs): + original = gorilla.get_original_attribute(tensorflow.estimator.Estimator, + 'export_saved_model') + serialized = original(self, *args, **kwargs) + try: + log_model(tf_saved_model_dir=serialized.decode('utf-8'), + tf_meta_graph_tags=[tag_constants.SERVING], + tf_signature_def_key='predict', + artifact_path='model') + except MlflowException as e: + warnings.warn("Logging to MLflow failed: " + str(e)) + return serialized + + @gorilla.patch(tensorflow.estimator.Estimator) + def export_savedmodel(self, *args, **kwargs): + original = gorilla.get_original_attribute(tensorflow.estimator.Estimator, + 'export_savedmodel') + serialized = original(self, *args, **kwargs) + try: + log_model(tf_saved_model_dir=serialized.decode('utf-8'), + tf_meta_graph_tags=[tag_constants.SERVING], + tf_signature_def_key='predict', + artifact_path='model') + except MlflowException as e: + warnings.warn("Logging to MLflow failed: " + str(e)) + return serialized + + @gorilla.patch(tensorflow.keras.Model) + def fit(self, *args, **kwargs): + original = gorilla.get_original_attribute(tensorflow.keras.Model, 'fit') + if len(args) >= 6: + l = list(args) + l[5], log_dir = _setup_callbacks(l[5]) + args = tuple(l) + elif 'callbacks' in kwargs: + kwargs['callbacks'], log_dir = _setup_callbacks(kwargs['callbacks']) + else: + kwargs['callbacks'], log_dir = _setup_callbacks([]) + result = original(self, *args, **kwargs) + _flush_queue() + _log_artifacts_with_warning(local_dir=log_dir, artifact_path='tensorboard_logs') + shutil.rmtree(log_dir) + return result + + @gorilla.patch(EventFileWriter) + def add_event(self, event): + _log_event(event) + original = gorilla.get_original_attribute(EventFileWriter, 'add_event') + return original(self, event) + + @gorilla.patch(FileWriter) + def add_summary(self, *args, **kwargs): + original = gorilla.get_original_attribute(FileWriter, 'add_summary') + result = original(self, *args, **kwargs) + _flush_queue() + return result + + settings = gorilla.Settings(allow_hit=True, store_hit=True) + patches = [ + gorilla.Patch(EventFileWriter, 'add_event', add_event, settings=settings), + gorilla.Patch(EventFileWriterV2, 'add_event', add_event, settings=settings), + gorilla.Patch(tensorflow.keras.Model, 'fit', fit, settings=settings), + gorilla.Patch(tensorflow.estimator.Estimator, 'export_saved_model', + export_saved_model, settings=settings), + gorilla.Patch(tensorflow.estimator.Estimator, 'export_savedmodel', + export_savedmodel, settings=settings), + gorilla.Patch(FileWriter, 'add_summary', add_summary, settings=settings), + ] + + for x in patches: + gorilla.apply(x) diff --git a/mlflow/tracking/__init__.py b/mlflow/tracking/__init__.py index edf5a2c4605f3..074902b1610ec 100644 --- a/mlflow/tracking/__init__.py +++ b/mlflow/tracking/__init__.py @@ -8,7 +8,7 @@ from mlflow.tracking.client import MlflowClient from mlflow.tracking.utils import set_tracking_uri, get_tracking_uri, _get_store, \ _TRACKING_URI_ENV_VAR -from mlflow.tracking.fluent import _EXPERIMENT_ID_ENV_VAR, _RUN_ID_ENV_VAR +from mlflow.tracking.fluent import _EXPERIMENT_ID_ENV_VAR, _EXPERIMENT_NAME_ENV_VAR, _RUN_ID_ENV_VAR __all__ = [ "MlflowClient", diff --git a/mlflow/tracking/artifact_utils.py b/mlflow/tracking/artifact_utils.py new file mode 100644 index 0000000000000..7d6c95ea834ae --- /dev/null +++ b/mlflow/tracking/artifact_utils.py @@ -0,0 +1,66 @@ +""" +Utilities for dealing with artifacts in the context of a Run. +""" +import posixpath + +from six.moves import urllib + +from mlflow.exceptions import MlflowException +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE +from mlflow.store.artifact_repository_registry import get_artifact_repository +from mlflow.tracking.utils import _get_store + + +def get_artifact_uri(run_id, artifact_path=None): + """ + Get the absolute URI of the specified artifact in the specified run. If `path` is not specified, + the artifact root URI of the specified run will be returned; calls to ``log_artifact`` + and ``log_artifacts`` write artifact(s) to subdirectories of the artifact root URI. + + :param run_id: The ID of the run for which to obtain an absolute artifact URI. + :param artifact_path: The run-relative artifact path. For example, + ``path/to/artifact``. If unspecified, the artifact root URI for the + specified run will be returned. + :return: An *absolute* URI referring to the specified artifact or the specified run's artifact + root. For example, if an artifact path is provided and the specified run uses an + S3-backed store, this may be a uri of the form + ``s3:///path/to/artifact/root/path/to/artifact``. If an artifact path + is not provided and the specified run uses an S3-backed store, this may be a URI of + the form ``s3:///path/to/artifact/root``. + """ + if not run_id: + raise MlflowException( + message="A run_id must be specified in order to obtain an artifact uri!", + error_code=INVALID_PARAMETER_VALUE) + + store = _get_store() + run = store.get_run(run_id) + # Maybe move this method to RunsArtifactRepository so the circular dependency is clearer. + assert urllib.parse.urlparse(run.info.artifact_uri).scheme != "runs" # avoid an infinite loop + if artifact_path is None: + return run.info.artifact_uri + else: + return posixpath.join(run.info.artifact_uri, artifact_path) + + +# TODO: This method does not require a Run and its internals should be moved to +# data.download_uri (requires confirming that Projects will not break with this change). +# Also this would be much simpler if artifact_repo.download_artifacts could take the absolute path +# or no path. +def _download_artifact_from_uri(artifact_uri, output_path=None): + """ + :param artifact_uri: The *absolute* URI of the artifact to download. + :param output_path: The local filesystem path to which to download the artifact. If unspecified, + a local output path will be created. + """ + parsed_uri = urllib.parse.urlparse(artifact_uri) + prefix = "" + if parsed_uri.scheme and not parsed_uri.path.startswith("/"): + # relative path is a special case, urllib does not reconstruct it properly + prefix = parsed_uri.scheme + ":" + parsed_uri = parsed_uri._replace(scheme="") + artifact_path = posixpath.basename(parsed_uri.path) + parsed_uri = parsed_uri._replace(path=posixpath.dirname(parsed_uri.path)) + root_uri = prefix + urllib.parse.urlunparse(parsed_uri) + return get_artifact_repository(artifact_uri=root_uri).download_artifacts( + artifact_path=artifact_path, dst_path=output_path) diff --git a/mlflow/tracking/client.py b/mlflow/tracking/client.py index 5ef5bedb91d9f..f62208e1f2be1 100644 --- a/mlflow/tracking/client.py +++ b/mlflow/tracking/client.py @@ -4,21 +4,21 @@ exposed in the :py:mod:`mlflow.tracking` module. """ -import os import time from six import iteritems -from mlflow.utils.validation import _validate_metric_name, _validate_param_name, \ - _validate_tag_name, _validate_run_id -from mlflow.entities import Param, Metric, RunStatus, RunTag, ViewType, SourceType -from mlflow.tracking.utils import _get_store -from mlflow.store.artifact_repo import ArtifactRepository - -_DEFAULT_USER_ID = "unknown" +from mlflow.store import SEARCH_MAX_RESULTS_DEFAULT +from mlflow.tracking import utils +from mlflow.utils.validation import _validate_param_name, _validate_tag_name, _validate_run_id, \ + _validate_experiment_artifact_location, _validate_experiment_name, _validate_metric +from mlflow.entities import Param, Metric, RunStatus, RunTag, ViewType +from mlflow.store.artifact_repository_registry import get_artifact_repository +from mlflow.utils.mlflow_tags import MLFLOW_USER class MlflowClient(object): - """Client of an MLflow Tracking Server that creates and manages experiments and runs. + """ + Client of an MLflow Tracking Server that creates and manages experiments and runs. """ def __init__(self, tracking_uri=None): @@ -28,17 +28,38 @@ def __init__(self, tracking_uri=None): `Where Runs Get Recorded <../tracking.html#where-runs-get-recorded>`_ for more info. """ - self.tracking_uri = tracking_uri - self.store = _get_store(tracking_uri) + self.tracking_uri = tracking_uri or utils.get_tracking_uri() + self.store = utils._get_store(self.tracking_uri) def get_run(self, run_id): - """:return: :py:class:`mlflow.entities.Run` associated with the run ID.""" + """ + Fetch the run from backend store. The resulting :py:class:`Run ` + contains a collection of run metadata -- :py:class:`RunInfo `, + as well as a collection of run parameters, tags, and metrics -- + :py:class:`RunData `. In the case where multiple metrics with the + same key are logged for the run, the :py:class:`RunData ` contains + the most recently logged value at the largest step for each metric. + + :param run_id: Unique identifier for the run. + + :return: A single :py:class:`mlflow.entities.Run` object, if the run exists. Otherwise, + raises an exception. + """ _validate_run_id(run_id) return self.store.get_run(run_id) - def create_run(self, experiment_id, user_id=None, run_name=None, source_type=None, - source_name=None, entry_point_name=None, start_time=None, - source_version=None, tags=None, parent_run_id=None): + def get_metric_history(self, run_id, key): + """ + Return a list of metric objects corresponding to all values logged for a given metric. + + :param run_id: Unique identifier for run + :param key: Metric name within the run + + :return: A list of :py:class:`mlflow.entities.Metric` entities if logged, else empty list + """ + return self.store.get_metric_history(run_id=run_id, metric_key=key) + + def create_run(self, experiment_id, start_time=None, tags=None): """ Create a :py:class:`mlflow.entities.Run` object that can be associated with metrics, parameters, artifacts, etc. @@ -46,33 +67,37 @@ def create_run(self, experiment_id, user_id=None, run_name=None, source_type=Non Unlike :py:func:`mlflow.start_run`, does not change the "active run" used by :py:func:`mlflow.log_param`. - :param user_id: If not provided, use the current user as a default. + :param experiment_id: The ID of then experiment to create a run in. :param start_time: If not provided, use the current timestamp. :param tags: A dictionary of key-value pairs that are converted into :py:class:`mlflow.entities.RunTag` objects. :return: :py:class:`mlflow.entities.Run` that was created. """ + tags = tags if tags else {} + + # Extract user from tags + # This logic is temporary; the user_id attribute of runs is deprecated and will be removed + # in a later release. + user_id = tags.get(MLFLOW_USER, "unknown") + return self.store.create_run( experiment_id=experiment_id, - user_id=user_id if user_id is not None else _get_user_id(), - run_name=run_name, - source_type=source_type if source_type is not None else SourceType.LOCAL, - source_name=source_name if source_name is not None else "Python Application", - entry_point_name=entry_point_name, + user_id=user_id, start_time=start_time or int(time.time() * 1000), - source_version=source_version, - tags=[RunTag(key, value) for (key, value) in iteritems(tags)], - parent_run_id=parent_run_id, + tags=[RunTag(key, value) for (key, value) in iteritems(tags)] ) def list_run_infos(self, experiment_id, run_view_type=ViewType.ACTIVE_ONLY): """:return: List of :py:class:`mlflow.entities.RunInfo`""" return self.store.list_run_infos(experiment_id, run_view_type) - def list_experiments(self): - """:return: List of :py:class:`mlflow.entities.Experiment`""" - return self.store.list_experiments() + def list_experiments(self, view_type=None): + """ + :return: List of :py:class:`mlflow.entities.Experiment` + """ + final_view_type = ViewType.ACTIVE_ONLY if view_type is None else view_type + return self.store.list_experiments(view_type=final_view_type) def get_experiment(self, experiment_id): """ @@ -96,6 +121,8 @@ def create_experiment(self, name, artifact_location=None): If not provided, the server picks an appropriate default. :return: Integer ID of the created experiment. """ + _validate_experiment_name(name) + _validate_experiment_artifact_location(artifact_location) return self.store.create_experiment( name=name, artifact_location=artifact_location, @@ -125,14 +152,15 @@ def rename_experiment(self, experiment_id, new_name): """ self.store.rename_experiment(experiment_id, new_name) - def log_metric(self, run_id, key, value, timestamp=None): + def log_metric(self, run_id, key, value, timestamp=None, step=None): """ - Log a metric against the run ID. If timestamp is not provided, uses - the current timestamp. + Log a metric against the run ID. The timestamp defaults to the current timestamp. + The step defaults to 0. """ - _validate_metric_name(key) timestamp = timestamp if timestamp is not None else int(time.time()) - metric = Metric(key, value, timestamp) + step = step if step is not None else 0 + _validate_metric(key, value, timestamp, step) + metric = Metric(key, value, timestamp, step) self.store.log_metric(run_id, metric) def log_param(self, run_id, key, value): @@ -151,6 +179,36 @@ def set_tag(self, run_id, key, value): tag = RunTag(key, str(value)) self.store.set_tag(run_id, tag) + def delete_tag(self, run_id, key): + """ + Delete a tag from a run. This is irreversible. + :param run_id: String ID of the run + :param key: Name of the tag + """ + self.store.delete_tag(run_id, key) + + def log_batch(self, run_id, metrics=(), params=(), tags=()): + """ + Log multiple metrics, params, and/or tags. + + :param run_id: String ID of the run + :param metrics: If provided, List of Metric(key, value, timestamp) instances. + :param params: If provided, List of Param(key, value) instances. + :param tags: If provided, List of RunTag(key, value) instances. + + Raises an MlflowException if any errors occur. + :return: None + """ + if len(metrics) == 0 and len(params) == 0 and len(tags) == 0: + return + for metric in metrics: + _validate_metric(metric.key, metric.value, metric.timestamp, metric.step) + for param in params: + _validate_param_name(param.key) + for tag in tags: + _validate_tag_name(tag.key) + self.store.log_batch(run_id=run_id, metrics=metrics, params=params, tags=tags) + def log_artifact(self, run_id, local_path, artifact_path=None): """ Write a local file to the remote ``artifact_uri``. @@ -159,7 +217,7 @@ def log_artifact(self, run_id, local_path, artifact_path=None): :param artifact_path: If provided, the directory in ``artifact_uri`` to write to. """ run = self.get_run(run_id) - artifact_repo = ArtifactRepository.from_artifact_uri(run.info.artifact_uri, self.store) + artifact_repo = get_artifact_repository(run.info.artifact_uri) artifact_repo.log_artifact(local_path, artifact_path) def log_artifacts(self, run_id, local_dir, artifact_path=None): @@ -170,7 +228,7 @@ def log_artifacts(self, run_id, local_dir, artifact_path=None): :param artifact_path: If provided, the directory in ``artifact_uri`` to write to. """ run = self.get_run(run_id) - artifact_repo = ArtifactRepository.from_artifact_uri(run.info.artifact_uri, self.store) + artifact_repo = get_artifact_repository(run.info.artifact_uri) artifact_repo.log_artifacts(local_dir, artifact_path) def list_artifacts(self, run_id, path=None): @@ -184,22 +242,27 @@ def list_artifacts(self, run_id, path=None): """ run = self.get_run(run_id) artifact_root = run.info.artifact_uri - artifact_repo = ArtifactRepository.from_artifact_uri(artifact_root, self.store) + artifact_repo = get_artifact_repository(artifact_root) return artifact_repo.list_artifacts(path) - def download_artifacts(self, run_id, path): + def download_artifacts(self, run_id, path, dst_path=None): """ Download an artifact file or directory from a run to a local directory if applicable, and return a local path for it. :param run_id: The run to download artifacts from. :param path: Relative source path to the desired artifact. + :param dst_path: Absolute path of the local filesystem destination directory to which to + download the specified artifacts. This directory must already exist. + If unspecified, the artifacts will either be downloaded to a new + uniquely-named directory on the local filesystem or will be returned + directly in the case of the LocalArtifactRepository. :return: Local path of desired artifact. """ run = self.get_run(run_id) artifact_root = run.info.artifact_uri - artifact_repo = ArtifactRepository.from_artifact_uri(artifact_root, self.store) - return artifact_repo.download_artifacts(path) + artifact_repo = get_artifact_repository(artifact_root) + return artifact_repo.download_artifacts(path, dst_path) def set_terminated(self, run_id, status=None, end_time=None): """Set a run's status to terminated. @@ -208,7 +271,7 @@ def set_terminated(self, run_id, status=None, end_time=None): Defaults to "FINISHED". :param end_time: If not provided, defaults to the current time.""" end_time = end_time if end_time else int(time.time() * 1000) - status = status if status else "FINISHED" + status = status if status else RunStatus.to_string(RunStatus.FINISHED) self.store.update_run_info(run_id, run_status=RunStatus.from_string(status), end_time=end_time) @@ -224,11 +287,27 @@ def restore_run(self, run_id): """ self.store.restore_run(run_id) - -def _get_user_id(): - """Get the ID of the user for the current run.""" - try: - import pwd - return pwd.getpwuid(os.getuid())[0] - except ImportError: - return _DEFAULT_USER_ID + def search_runs(self, experiment_ids, filter_string="", run_view_type=ViewType.ACTIVE_ONLY, + max_results=SEARCH_MAX_RESULTS_DEFAULT, order_by=None, page_token=None): + """ + Search experiments that fit the search criteria. + + :param experiment_ids: List of experiment IDs, or a single int or string id. + :param filter_string: Filter query string, defaults to searching all runs. + :param run_view_type: one of enum values ACTIVE_ONLY, DELETED_ONLY, or ALL runs + defined in :py:class:`mlflow.entities.ViewType`. + :param max_results: Maximum number of runs desired. + :param order_by: List of columns to order by (e.g., "metrics.rmse"). The default + ordering is to sort by start_time DESC, then run_id. + :param page_token: Token specifying the next page of results. It should be obtained from + a ``search_runs`` call. + + :return: A list of :py:class:`mlflow.entities.Run` objects that satisfy the search + expressions. If the underlying tracking store supports pagination, the token for + the next page may be obtained via the ``token`` attribute of the returned object. + """ + if isinstance(experiment_ids, int) or isinstance(experiment_ids, str): + experiment_ids = [experiment_ids] + return self.store.search_runs(experiment_ids=experiment_ids, filter_string=filter_string, + run_view_type=run_view_type, max_results=max_results, + order_by=order_by, page_token=page_token) diff --git a/mlflow/tracking/context/__init__.py b/mlflow/tracking/context/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/mlflow/tracking/context/abstract_context.py b/mlflow/tracking/context/abstract_context.py new file mode 100644 index 0000000000000..d8264264ad2aa --- /dev/null +++ b/mlflow/tracking/context/abstract_context.py @@ -0,0 +1,24 @@ +from abc import ABCMeta, abstractmethod + + +class RunContextProvider(object): + + __metaclass__ = ABCMeta + + @abstractmethod + def in_context(self): + """ + Determine if MLflow is running in this context. + + :return: bool indicating if in this context + """ + pass + + @abstractmethod + def tags(self): + """ + Generate context-specific tags. + + :return: dict of tags + """ + pass diff --git a/mlflow/tracking/context/databricks_notebook_context.py b/mlflow/tracking/context/databricks_notebook_context.py new file mode 100644 index 0000000000000..c7bdefe62802f --- /dev/null +++ b/mlflow/tracking/context/databricks_notebook_context.py @@ -0,0 +1,31 @@ +from mlflow.tracking.context.abstract_context import RunContextProvider +from mlflow.utils import databricks_utils +from mlflow.entities import SourceType +from mlflow.utils.mlflow_tags import ( + MLFLOW_SOURCE_TYPE, + MLFLOW_SOURCE_NAME, + MLFLOW_DATABRICKS_WEBAPP_URL, + MLFLOW_DATABRICKS_NOTEBOOK_PATH, + MLFLOW_DATABRICKS_NOTEBOOK_ID +) + + +class DatabricksNotebookRunContext(RunContextProvider): + def in_context(self): + return databricks_utils.is_in_databricks_notebook() + + def tags(self): + notebook_id = databricks_utils.get_notebook_id() + notebook_path = databricks_utils.get_notebook_path() + webapp_url = databricks_utils.get_webapp_url() + tags = { + MLFLOW_SOURCE_NAME: notebook_path, + MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK) + } + if notebook_id is not None: + tags[MLFLOW_DATABRICKS_NOTEBOOK_ID] = notebook_id + if notebook_path is not None: + tags[MLFLOW_DATABRICKS_NOTEBOOK_PATH] = notebook_path + if webapp_url is not None: + tags[MLFLOW_DATABRICKS_WEBAPP_URL] = webapp_url + return tags diff --git a/mlflow/tracking/context/default_context.py b/mlflow/tracking/context/default_context.py new file mode 100644 index 0000000000000..03e2a50e8a79e --- /dev/null +++ b/mlflow/tracking/context/default_context.py @@ -0,0 +1,50 @@ +import sys +import getpass + +from mlflow.tracking.context.abstract_context import RunContextProvider +from mlflow.entities import SourceType +from mlflow.utils.mlflow_tags import ( + MLFLOW_USER, + MLFLOW_SOURCE_TYPE, + MLFLOW_SOURCE_NAME, +) + + +_DEFAULT_USER = "unknown" + + +def _get_user(): + """Get the current computer username.""" + try: + return getpass.getuser() + except ImportError: + return _DEFAULT_USER + + +def _get_main_file(): + if len(sys.argv) > 0: + return sys.argv[0] + return None + + +def _get_source_name(): + main_file = _get_main_file() + if main_file is not None: + return main_file + return "" + + +def _get_source_type(): + return SourceType.LOCAL + + +class DefaultRunContext(RunContextProvider): + def in_context(self): + return True + + def tags(self): + return { + MLFLOW_USER: _get_user(), + MLFLOW_SOURCE_NAME: _get_source_name(), + MLFLOW_SOURCE_TYPE: SourceType.to_string(_get_source_type()) + } diff --git a/mlflow/tracking/context/git_context.py b/mlflow/tracking/context/git_context.py new file mode 100644 index 0000000000000..306a15db8cb73 --- /dev/null +++ b/mlflow/tracking/context/git_context.py @@ -0,0 +1,53 @@ +import os +import logging + +from mlflow.tracking.context.abstract_context import RunContextProvider +from mlflow.tracking.context.default_context import _get_main_file +from mlflow.utils.mlflow_tags import MLFLOW_GIT_COMMIT + +_logger = logging.getLogger(__name__) + + +def _get_git_commit(path): + try: + import git + except ImportError as e: + _logger.warning( + "Failed to import Git (the Git executable is probably not on your PATH)," + " so Git SHA is not available. Error: %s", e) + return None + try: + if os.path.isfile(path): + path = os.path.dirname(path) + repo = git.Repo(path, search_parent_directories=True) + commit = repo.head.commit.hexsha + return commit + except (git.InvalidGitRepositoryError, git.GitCommandNotFound, ValueError, git.NoSuchPathError): + return None + + +def _get_source_version(): + main_file = _get_main_file() + if main_file is not None: + return _get_git_commit(main_file) + return None + + +class GitRunContext(RunContextProvider): + + def __init__(self): + self._cache = {} + + @property + def _source_version(self): + if "source_version" not in self._cache: + self._cache["source_version"] = _get_source_version() + return self._cache["source_version"] + + def in_context(self): + return self._source_version is not None + + def tags(self): + return { + MLFLOW_GIT_COMMIT: self._source_version + } diff --git a/mlflow/tracking/context/registry.py b/mlflow/tracking/context/registry.py new file mode 100644 index 0000000000000..5a5c09e294285 --- /dev/null +++ b/mlflow/tracking/context/registry.py @@ -0,0 +1,74 @@ +import entrypoints +import warnings + +from mlflow.tracking.context.default_context import DefaultRunContext +from mlflow.tracking.context.git_context import GitRunContext +from mlflow.tracking.context.databricks_notebook_context import DatabricksNotebookRunContext + + +class RunContextProviderRegistry(object): + """Registry for run context provider implementations + + This class allows the registration of a run context provider which can be used to infer meta + information about the context of an MLflow experiment run. Implementations declared though the + entrypoints `mlflow.run_context_provider` group can be automatically registered through the + `register_entrypoints` method. + + Registered run context providers can return tags that override those implemented in the core + library, however the order in which plugins are resolved is undefined. + """ + + def __init__(self): + self._registry = [] + + def register(self, run_context_provider_cls): + self._registry.append(run_context_provider_cls()) + + def register_entrypoints(self): + """Register tracking stores provided by other packages""" + for entrypoint in entrypoints.get_group_all("mlflow.run_context_provider"): + try: + self.register(entrypoint.load()) + except (AttributeError, ImportError) as exc: + warnings.warn( + 'Failure attempting to register context provider "{}": {}'.format( + entrypoint.name, str(exc) + ), + stacklevel=2 + ) + + def __iter__(self): + return iter(self._registry) + + +_run_context_provider_registry = RunContextProviderRegistry() +_run_context_provider_registry.register(DefaultRunContext) +_run_context_provider_registry.register(GitRunContext) +_run_context_provider_registry.register(DatabricksNotebookRunContext) + +_run_context_provider_registry.register_entrypoints() + + +def resolve_tags(tags=None): + """Generate a set of tags for the current run context. Tags are resolved in the order, + contexts are registered. Argument tags are applied last. + + This function iterates through all run context providers in the registry. Additional context + providers can be registered as described in + :py:class:`mlflow.tracking.context.RunContextProvider`. + + :param tags: A dictionary of tags to override. If specified, tags passed in this argument will + override those inferred from the context. + :return: A dicitonary of resolved tags. + """ + + all_tags = {} + for provider in _run_context_provider_registry: + if provider.in_context(): + # TODO: Error out gracefully if provider's tags are not valid or have wrong types. + all_tags.update(provider.tags()) + + if tags is not None: + all_tags.update(tags) + + return all_tags diff --git a/mlflow/tracking/fluent.py b/mlflow/tracking/fluent.py index 3acff19010720..d5cec20d8372e 100644 --- a/mlflow/tracking/fluent.py +++ b/mlflow/tracking/fluent.py @@ -5,30 +5,36 @@ from __future__ import print_function -import numbers import os import atexit -import sys import time +import logging +import numpy as np +import pandas as pd -from mlflow.entities import Experiment, Run, SourceType, RunInfo +from mlflow.entities import Run, RunStatus, Param, RunTag, Metric, ViewType +from mlflow.entities.lifecycle_stage import LifecycleStage from mlflow.exceptions import MlflowException from mlflow.tracking.client import MlflowClient +from mlflow.tracking import artifact_utils +from mlflow.tracking.context import registry as context_registry from mlflow.utils import env -from mlflow.utils.databricks_utils import is_in_databricks_notebook, get_notebook_id, \ - get_notebook_path, get_webapp_url -from mlflow.utils.logging_utils import eprint -from mlflow.utils.mlflow_tags import MLFLOW_DATABRICKS_WEBAPP_URL, \ - MLFLOW_DATABRICKS_NOTEBOOK_PATH, \ - MLFLOW_DATABRICKS_NOTEBOOK_ID +from mlflow.utils.databricks_utils import is_in_databricks_notebook, get_notebook_id +from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID, MLFLOW_RUN_NAME from mlflow.utils.validation import _validate_run_id _EXPERIMENT_ID_ENV_VAR = "MLFLOW_EXPERIMENT_ID" +_EXPERIMENT_NAME_ENV_VAR = "MLFLOW_EXPERIMENT_NAME" _RUN_ID_ENV_VAR = "MLFLOW_RUN_ID" _active_run_stack = [] _active_experiment_id = None +SEARCH_MAX_RESULTS_PANDAS = 100000 +NUM_RUNS_PER_PAGE_PANDAS = 10000 + +_logger = logging.getLogger(__name__) + def set_experiment(experiment_name): """ @@ -40,9 +46,14 @@ def set_experiment(experiment_name): client = MlflowClient() experiment = client.get_experiment_by_name(experiment_name) exp_id = experiment.experiment_id if experiment else None - if not exp_id: + if exp_id is None: # id can be 0 print("INFO: '{}' does not exist. Creating a new experiment".format(experiment_name)) exp_id = client.create_experiment(experiment_name) + elif experiment.lifecycle_stage == LifecycleStage.DELETED: + raise MlflowException( + "Cannot set a deleted experiment '%s' as the active experiment." + " You can restore the experiment, or permanently delete the " + " experiment to create a new one." % experiment.name) global _active_experiment_id _active_experiment_id = exp_id @@ -57,100 +68,90 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - status = "FINISHED" if exc_type is None else "FAILED" - end_run(status) + status = RunStatus.FINISHED if exc_type is None else RunStatus.FAILED + end_run(RunStatus.to_string(status)) return exc_type is None -def start_run(run_uuid=None, experiment_id=None, source_name=None, source_version=None, - entry_point_name=None, source_type=None, run_name=None, nested=False): +def start_run(run_id=None, experiment_id=None, run_name=None, nested=False): """ Start a new MLflow run, setting it as the active run under which metrics and parameters will be logged. The return value can be used as a context manager within a ``with`` block; otherwise, you must call ``end_run()`` to terminate the current run. - If you pass a ``run_uuid`` or the ``MLFLOW_RUN_ID`` environment variable is set, + If you pass a ``run_id`` or the ``MLFLOW_RUN_ID`` environment variable is set, ``start_run`` attempts to resume a run with the specified run ID and - other parameters are ignored. ``run_uuid`` takes precedence over ``MLFLOW_RUN_ID``. + other parameters are ignored. ``run_id`` takes precedence over ``MLFLOW_RUN_ID``. + + MLflow sets a variety of default tags on the run, as defined in + :ref:`MLflow system tags `. - :param run_uuid: If specified, get the run with the specified UUID and log parameters + :param run_id: If specified, get the run with the specified UUID and log parameters and metrics under that run. The run's end time is unset and its status is set to running, but the run's other attributes (``source_version``, ``source_type``, etc.) are not changed. :param experiment_id: ID of the experiment under which to create the current run (applicable - only when ``run_uuid`` is not specified). If ``experiment_id`` argument + only when ``run_id`` is not specified). If ``experiment_id`` argument is unspecified, will look for valid experiment in the following order: - activated using ``set_experiment``, ``MLFLOW_EXPERIMENT_ID`` env variable, - or the default experiment. - :param source_name: Name of the source file or URI of the project to be associated with the run. - If none provided defaults to the current file. - :param source_version: Optional Git commit hash to associate with the run. - :param entry_point_name: Optional name of the entry point for the current run. - :param source_type: Integer :py:class:`mlflow.entities.SourceType` describing the type - of the run ("local", "project", etc.). Defaults to - :py:class:`mlflow.entities.SourceType.LOCAL` ("local"). - :param run_name: Name of new run. Used only when ``run_uuid`` is unspecified. - :param nested: Parameter which must be set to ``True`` to create nested runs. + activated using ``set_experiment``, ``MLFLOW_EXPERIMENT_NAME`` + environment variable, ``MLFLOW_EXPERIMENT_ID`` environment variable, + or the default experiment as defined by the tracking server. + :param run_name: Name of new run (stored as a ``mlflow.runName`` tag). + Used only when ``run_id`` is unspecified. + :param nested: Controls whether run is nested in parent run. ``True`` creates a nest run. :return: :py:class:`mlflow.ActiveRun` object that acts as a context manager wrapping the run's state. """ global _active_run_stack + # back compat for int experiment_id + experiment_id = str(experiment_id) if isinstance(experiment_id, int) else experiment_id if len(_active_run_stack) > 0 and not nested: raise Exception(("Run with UUID {} is already active. To start a nested " + - "run call start_run with nested=True").format( - _active_run_stack[0].info.run_uuid)) - existing_run_uuid = run_uuid or os.environ.get(_RUN_ID_ENV_VAR, None) - if existing_run_uuid: - _validate_run_id(existing_run_uuid) - active_run_obj = MlflowClient().get_run(existing_run_uuid) - if active_run_obj.info.lifecycle_stage == RunInfo.DELETED_LIFECYCLE: + "run, call start_run with nested=True").format( + _active_run_stack[0].info.run_id)) + if run_id: + existing_run_id = run_id + elif _RUN_ID_ENV_VAR in os.environ: + existing_run_id = os.environ[_RUN_ID_ENV_VAR] + del os.environ[_RUN_ID_ENV_VAR] + else: + existing_run_id = None + if existing_run_id: + _validate_run_id(existing_run_id) + active_run_obj = MlflowClient().get_run(existing_run_id) + if active_run_obj.info.lifecycle_stage == LifecycleStage.DELETED: raise MlflowException("Cannot start run with ID {} because it is in the " - "deleted state.".format(existing_run_uuid)) + "deleted state.".format(existing_run_id)) else: if len(_active_run_stack) > 0: - parent_run_id = _active_run_stack[-1].info.run_uuid + parent_run_id = _active_run_stack[-1].info.run_id else: parent_run_id = None - exp_id_for_run = experiment_id or _get_experiment_id() - if is_in_databricks_notebook(): - databricks_tags = {} - notebook_id = get_notebook_id() - notebook_path = get_notebook_path() - webapp_url = get_webapp_url() - if notebook_id is not None: - databricks_tags[MLFLOW_DATABRICKS_NOTEBOOK_ID] = notebook_id - if notebook_path is not None: - databricks_tags[MLFLOW_DATABRICKS_NOTEBOOK_PATH] = notebook_path - if webapp_url is not None: - databricks_tags[MLFLOW_DATABRICKS_WEBAPP_URL] = webapp_url - active_run_obj = MlflowClient().create_run( - experiment_id=exp_id_for_run, - run_name=run_name, - source_name=notebook_path, - source_version=source_version or _get_source_version(), - entry_point_name=entry_point_name, - source_type=SourceType.NOTEBOOK, - tags=databricks_tags, - parent_run_id=parent_run_id) - else: - active_run_obj = MlflowClient().create_run( - experiment_id=exp_id_for_run, - run_name=run_name, - source_name=source_name or _get_source_name(), - source_version=source_version or _get_source_version(), - entry_point_name=entry_point_name, - source_type=source_type or _get_source_type(), - parent_run_id=parent_run_id) + exp_id_for_run = experiment_id if experiment_id is not None else _get_experiment_id() + + user_specified_tags = {} + if parent_run_id is not None: + user_specified_tags[MLFLOW_PARENT_RUN_ID] = parent_run_id + if run_name is not None: + user_specified_tags[MLFLOW_RUN_NAME] = run_name + + tags = context_registry.resolve_tags(user_specified_tags) + + active_run_obj = MlflowClient().create_run( + experiment_id=exp_id_for_run, + tags=tags + ) + _active_run_stack.append(ActiveRun(active_run_obj)) return _active_run_stack[-1] -def end_run(status="FINISHED"): +def end_run(status=RunStatus.to_string(RunStatus.FINISHED)): """End an active MLflow run (if there is one).""" global _active_run_stack if len(_active_run_stack) > 0: - MlflowClient().set_terminated(_active_run_stack[-1].info.run_uuid, status) + MlflowClient().set_terminated(_active_run_stack[-1].info.run_id, status) # Clear out the global existing run environment variable as well. env.unset_variable(_RUN_ID_ENV_VAR) _active_run_stack.pop() @@ -171,7 +172,7 @@ def log_param(key, value): :param key: Parameter name (string) :param value: Parameter value (string, but will be string-ified if not) """ - run_id = _get_or_start_run().info.run_uuid + run_id = _get_or_start_run().info.run_id MlflowClient().log_param(run_id, key, value) @@ -182,33 +183,79 @@ def set_tag(key, value): :param key: Tag name (string) :param value: Tag value (string, but will be string-ified if not) """ - run_id = _get_or_start_run().info.run_uuid + run_id = _get_or_start_run().info.run_id + print(run_id) MlflowClient().set_tag(run_id, key, value) -def log_metric(key, value): +def delete_tag(key): + """ + Delete a tag from a run. This is irreversible. + :param key: Name of the tag + """ + run_id = _get_or_start_run().info.run_id + MlflowClient().delete_tag(run_id, key) + + +def log_metric(key, value, step=None): """ Log a metric under the current run, creating a run if necessary. :param key: Metric name (string). :param value: Metric value (float). + :param step: Metric step (int). Defaults to zero if unspecified. + """ + run_id = _get_or_start_run().info.run_id + MlflowClient().log_metric(run_id, key, value, int(time.time() * 1000), step or 0) + + +def log_metrics(metrics, step=None): + """ + Log multiple metrics for the current run, starting a run if no runs are active. + :param metrics: Dictionary of metric_name: String -> value: Float + :param step: A single integer step at which to log the specified + Metrics. If unspecified, each metric is logged at step zero. + + :returns: None """ - if not isinstance(value, numbers.Number): - eprint("WARNING: The metric {}={} was not logged because the value is not a number.".format( - key, value)) - return - run_id = _get_or_start_run().info.run_uuid - MlflowClient().log_metric(run_id, key, value, int(time.time())) + run_id = _get_or_start_run().info.run_id + timestamp = int(time.time() * 1000) + metrics_arr = [Metric(key, value, timestamp, step or 0) for key, value in metrics.items()] + MlflowClient().log_batch(run_id=run_id, metrics=metrics_arr, params=[], tags=[]) + + +def log_params(params): + """ + Log a batch of params for the current run, starting a run if no runs are active. + :param params: Dictionary of param_name: String -> value: (String, but will be string-ified if + not) + :returns: None + """ + run_id = _get_or_start_run().info.run_id + params_arr = [Param(key, str(value)) for key, value in params.items()] + MlflowClient().log_batch(run_id=run_id, metrics=[], params=params_arr, tags=[]) + + +def set_tags(tags): + """ + Log a batch of tags for the current run, starting a run if no runs are active. + :param tags: Dictionary of tag_name: String -> value: (String, but will be string-ified if + not) + :returns: None + """ + run_id = _get_or_start_run().info.run_id + tags_arr = [RunTag(key, str(value)) for key, value in tags.items()] + MlflowClient().log_batch(run_id=run_id, metrics=[], params=[], tags=tags_arr) def log_artifact(local_path, artifact_path=None): """ - Log a local file or directory as an artifact of the currently active run. + Log a local file as an artifact of the currently active run. :param local_path: Path to the file to write. :param artifact_path: If provided, the directory in ``artifact_uri`` to write to. """ - run_id = _get_or_start_run().info.run_uuid + run_id = _get_or_start_run().info.run_id MlflowClient().log_artifact(run_id, local_path, artifact_path) @@ -219,7 +266,7 @@ def log_artifacts(local_dir, artifact_path=None): :param local_dir: Path to the directory of files to write. :param artifact_path: If provided, the directory in ``artifact_uri`` to write to. """ - run_id = _get_or_start_run().info.run_uuid + run_id = _get_or_start_run().info.run_id MlflowClient().log_artifacts(run_id, local_dir, artifact_path) @@ -235,12 +282,125 @@ def create_experiment(name, artifact_location=None): return MlflowClient().create_experiment(name, artifact_location) -def get_artifact_uri(): +def get_artifact_uri(artifact_path=None): """ - Get the artifact URI of the currently active run. Calls to ``log_artifact`` and - ``log_artifacts`` write artifact(s) to subdirectories of the returned URI. + Get the absolute URI of the specified artifact in the currently active run. + If `path` is not specified, the artifact root URI of the currently active + run will be returned; calls to ``log_artifact`` and ``log_artifacts`` write + artifact(s) to subdirectories of the artifact root URI. + + :param artifact_path: The run-relative artifact path for which to obtain an absolute URI. + For example, "path/to/artifact". If unspecified, the artifact root URI + for the currently active run will be returned. + :return: An *absolute* URI referring to the specified artifact or the currently adtive run's + artifact root. For example, if an artifact path is provided and the currently active + run uses an S3-backed store, this may be a uri of the form + ``s3:///path/to/artifact/root/path/to/artifact``. If an artifact path + is not provided and the currently active run uses an S3-backed store, this may be a + URI of the form ``s3:///path/to/artifact/root``. """ - return _get_or_start_run().info.artifact_uri + return artifact_utils.get_artifact_uri(run_id=_get_or_start_run().info.run_id, + artifact_path=artifact_path) + + +def search_runs(experiment_ids=None, filter_string="", run_view_type=ViewType.ACTIVE_ONLY, + max_results=SEARCH_MAX_RESULTS_PANDAS, order_by=None): + """ + Get a pandas DataFrame of runs that fit the search criteria. + + :param experiment_ids: List of experiment IDs. None will default to the active experiment. + :param filter_string: Filter query string, defaults to searching all runs. + :param run_view_type: one of enum values ACTIVE_ONLY, DELETED_ONLY, or ALL runs + defined in :py:class:`mlflow.entities.ViewType`. + :param max_results: The maximum number of runs to put in the dataframe. Default is 100,000 + to avoid causing out-of-memory issues on the user's machine. + :param order_by: List of columns to order by (e.g., "metrics.rmse"). The default + ordering is to sort by start_time DESC, then run_id. + + :return: A pandas.DataFrame of runs, where each metric, parameter, and tag + are expanded into their own columns named metrics.*, params.*, and tags.* + respectively. For runs that don't have a particular metric, parameter, or tag, their + value will be (Numpy) Nan, None, or None respectively + """ + if not experiment_ids: + experiment_ids = _get_experiment_id() + runs = _get_paginated_runs(experiment_ids, filter_string, run_view_type, max_results, + order_by) + info = {'run_id': [], 'experiment_id': [], + 'status': [], 'artifact_uri': [], } + params, metrics, tags = ({}, {}, {}) + PARAM_NULL, METRIC_NULL, TAG_NULL = (None, np.nan, None) + for i, run in enumerate(runs): + info['run_id'].append(run.info.run_id) + info['experiment_id'].append(run.info.experiment_id) + info['status'].append(run.info.status) + info['artifact_uri'].append(run.info.artifact_uri) + + # Params + param_keys = set(params.keys()) + for key in param_keys: + if key in run.data.params: + params[key].append(run.data.params[key]) + else: + params[key].append(PARAM_NULL) + new_params = set(run.data.params.keys()) - param_keys + for p in new_params: + params[p] = [PARAM_NULL]*i # Fill in null values for all previous runs + params[p].append(run.data.params[p]) + + # Metrics + metric_keys = set(metrics.keys()) + for key in metric_keys: + if key in run.data.metrics: + metrics[key].append(run.data.metrics[key]) + else: + metrics[key].append(METRIC_NULL) + new_metrics = set(run.data.metrics.keys()) - metric_keys + for m in new_metrics: + metrics[m] = [METRIC_NULL]*i + metrics[m].append(run.data.metrics[m]) + + # Tags + tag_keys = set(tags.keys()) + for key in tag_keys: + if key in run.data.tags: + tags[key].append(run.data.tags[key]) + else: + tags[key].append(TAG_NULL) + new_tags = set(run.data.tags.keys()) - tag_keys + for t in new_tags: + tags[t] = [TAG_NULL]*i + tags[t].append(run.data.tags[t]) + + data = {} + data.update(info) + for key in metrics: + data['metrics.' + key] = metrics[key] + for key in params: + data['params.' + key] = params[key] + for key in tags: + data['tags.' + key] = tags[key] + return pd.DataFrame(data) + + +def _get_paginated_runs(experiment_ids, filter_string, run_view_type, max_results, + order_by): + all_runs = [] + next_page_token = None + while(len(all_runs) < max_results): + runs_to_get = max_results-len(all_runs) + if runs_to_get < NUM_RUNS_PER_PAGE_PANDAS: + runs = MlflowClient().search_runs(experiment_ids, filter_string, run_view_type, + runs_to_get, order_by, next_page_token) + else: + runs = MlflowClient().search_runs(experiment_ids, filter_string, run_view_type, + NUM_RUNS_PER_PAGE_PANDAS, order_by, next_page_token) + all_runs.extend(runs) + if hasattr(runs, 'token') and runs.token != '': + next_page_token = runs.token + else: + break + return all_runs def _get_or_start_run(): @@ -249,48 +409,18 @@ def _get_or_start_run(): return start_run() -def _get_main_file(): - if len(sys.argv) > 0: - return sys.argv[0] - return None - - -def _get_source_name(): - main_file = _get_main_file() - if main_file is not None: - return main_file - return "" - - -def _get_source_version(): - main_file = _get_main_file() - if main_file is not None: - return _get_git_commit(main_file) - return None - - -def _get_source_type(): - return SourceType.LOCAL +def _get_experiment_id_from_env(): + experiment_name = env.get_env(_EXPERIMENT_NAME_ENV_VAR) + if experiment_name is not None: + exp = MlflowClient().get_experiment_by_name(experiment_name) + return exp.experiment_id if exp else None + return env.get_env(_EXPERIMENT_ID_ENV_VAR) def _get_experiment_id(): - return int(_active_experiment_id or - env.get_env(_EXPERIMENT_ID_ENV_VAR) or - Experiment.DEFAULT_EXPERIMENT_ID) - - -def _get_git_commit(path): - try: - from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError - except ImportError as e: - eprint("Notice: failed to import Git (the Git executable is probably not on your PATH)," - " so Git SHA is not available. Error: %s" % e) - return None - try: - if os.path.isfile(path): - path = os.path.dirname(path) - repo = Repo(path, search_parent_directories=True) - commit = repo.head.commit.hexsha - return commit - except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError): - return None + # TODO: Replace with None for 1.0, leaving for 0.9.1 release backcompat with existing servers + deprecated_default_exp_id = "0" + + return (_active_experiment_id or + _get_experiment_id_from_env() or + (is_in_databricks_notebook() and get_notebook_id())) or deprecated_default_exp_id diff --git a/mlflow/tracking/registry.py b/mlflow/tracking/registry.py new file mode 100644 index 0000000000000..bdf0159179644 --- /dev/null +++ b/mlflow/tracking/registry.py @@ -0,0 +1,65 @@ +import warnings + +import entrypoints + +from mlflow.exceptions import MlflowException +from mlflow.utils import get_uri_scheme + + +class TrackingStoreRegistry: + """Scheme-based registry for tracking store implementations + + This class allows the registration of a function or class to provide an + implementation for a given scheme of `store_uri` through the `register` + methods. Implementations declared though the entrypoints + `mlflow.tracking_store` group can be automatically registered through the + `register_entrypoints` method. + + When instantiating a store through the `get_store` method, the scheme of + the store URI provided (or inferred from environment) will be used to + select which implementation to instantiate, which will be called with same + arguments passed to the `get_store` method. + """ + + def __init__(self): + self._registry = {} + + def register(self, scheme, store_builder): + self._registry[scheme] = store_builder + + def register_entrypoints(self): + """Register tracking stores provided by other packages""" + for entrypoint in entrypoints.get_group_all("mlflow.tracking_store"): + try: + self.register(entrypoint.name, entrypoint.load()) + except (AttributeError, ImportError) as exc: + warnings.warn( + 'Failure attempting to register tracking store for scheme "{}": {}'.format( + entrypoint.name, str(exc) + ), + stacklevel=2 + ) + + def get_store(self, store_uri=None, artifact_uri=None): + """Get a store from the registry based on the scheme of store_uri + + :param store_uri: The store URI. If None, it will be inferred from the environment. This URI + is used to select which tracking store implementation to instantiate and + is passed to the constructor of the implementation. + :param artifact_uri: Artifact repository URI. Passed through to the tracking store + implementation. + + :return: An instance of `mlflow.store.AbstractStore` that fulfills the store URI + requirements. + """ + from mlflow.tracking import utils + store_uri = store_uri if store_uri is not None else utils.get_tracking_uri() + scheme = store_uri if store_uri == "databricks" else get_uri_scheme(store_uri) + + try: + store_builder = self._registry[scheme] + except KeyError: + raise MlflowException( + "Unexpected URI scheme '{}' for tracking store. " + "Valid schemes are: {}".format(store_uri, list(self._registry.keys()))) + return store_builder(store_uri=store_uri, artifact_uri=artifact_uri) diff --git a/mlflow/tracking/sinks.py b/mlflow/tracking/sinks.py deleted file mode 100644 index a86193915ea1a..0000000000000 --- a/mlflow/tracking/sinks.py +++ /dev/null @@ -1,78 +0,0 @@ -import distutils.dir_util as dir_util -import os -import shutil - - -_TRACKING_DIR_ENV_VAR = "MLFLOW_TRACKING_DIR" - - -def _get_tracking_dir(): - if _TRACKING_DIR_ENV_VAR in os.environ: - return os.environ[_TRACKING_DIR_ENV_VAR] - else: - return "mlruns" - - -class FileSink(object): - def __init__(self, run_id, root_dir=_get_tracking_dir()): - self.run_id = run_id - self.run_dir = os.path.join(root_dir, run_id) - dir_util.mkpath(self.run_dir) - - def log_param(self, key, value): - # TODO: prevent keys from containing funky values like ".." - fn = os.path.join(self.run_dir, "parameters", key) - dir_util.mkpath(os.path.dirname(fn)) - with open(fn, "w") as f: - f.write("%s\n" % value) - - def log_metric(self, key, value): - # TODO: prevent keys from containing funky values like ".." - fn = os.path.join(self.run_dir, "metrics", key) - dir_util.mkpath(os.path.dirname(fn)) - with open(fn, "a") as f: - f.write("%s\n" % value) - - def log_artifact(self, local_path, artifact_path=None): - if artifact_path is None: - artifact_path = os.path.basename(local_path) - if os.path.exists(local_path): - dst_path = os.path.join(self.run_dir, "outputs", artifact_path) - if not os.path.exists(os.path.dirname(dst_path)): - dir_util.mkpath(os.path.dirname(dst_path)) - shutil.copy(local_path, dst_path) - - def log_output_files(self, output_dir, path): - if os.path.exists(output_dir): - if path is not None: - dst_dir = os.path.join(self.run_dir, "outputs", path) - else: - dst_dir = os.path.join(self.run_dir, "outputs") - if not os.path.exists(dst_dir): - dir_util.mkpath(dst_dir) - dir_util.copy_tree(src=output_dir, dst=dst_dir) - - def set_status(self, status): - fn = os.path.join(self.run_dir, "status") - with open(fn, "w") as f: - f.write("%s\n" % status) - - def set_source(self, source): - fn = os.path.join(self.run_dir, "source") - with open(fn, "w") as f: - f.write("%s\n" % source) - - def set_git_commit(self, commit): - fn = os.path.join(self.run_dir, "git_commit") - with open(fn, "w") as f: - f.write("%s\n" % commit) - - def set_start_date(self, utc_date_time): - fn = os.path.join(self.run_dir, "start_date") - with open(fn, "w") as f: - f.write("%s\n" % utc_date_time.isoformat()) - - def set_end_date(self, utc_date_time): - fn = os.path.join(self.run_dir, "end_date") - with open(fn, "w") as f: - f.write("%s\n" % utc_date_time.isoformat()) diff --git a/mlflow/tracking/utils.py b/mlflow/tracking/utils.py index 8649b87a46189..bf5169d9848fa 100644 --- a/mlflow/tracking/utils.py +++ b/mlflow/tracking/utils.py @@ -5,13 +5,15 @@ from six.moves import urllib +from mlflow.store import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH +from mlflow.store.dbmodels.db_types import DATABASE_ENGINES from mlflow.store.file_store import FileStore from mlflow.store.rest_store import RestStore -from mlflow.store.artifact_repo import ArtifactRepository +from mlflow.tracking.registry import TrackingStoreRegistry from mlflow.utils import env, rest_utils +from mlflow.utils.file_utils import path_to_local_file_uri from mlflow.utils.databricks_utils import get_databricks_host_creds - _TRACKING_URI_ENV_VAR = "MLFLOW_TRACKING_URI" _LOCAL_FS_URI_PREFIX = "file:///" _REMOTE_URI_PREFIX = "http://" @@ -23,7 +25,6 @@ _TRACKING_TOKEN_ENV_VAR = "MLFLOW_TRACKING_TOKEN" _TRACKING_INSECURE_TLS_ENV_VAR = "MLFLOW_TRACKING_INSECURE_TLS" - _tracking_uri = None @@ -66,28 +67,11 @@ def get_tracking_uri(): elif env.get_env(_TRACKING_URI_ENV_VAR) is not None: return env.get_env(_TRACKING_URI_ENV_VAR) else: - return os.path.abspath("./mlruns") - - -def _get_store(store_uri=None): - store_uri = store_uri if store_uri else get_tracking_uri() - # Default: if URI hasn't been set, return a FileStore - if store_uri is None: - return FileStore() - # Pattern-match on the URI - if _is_databricks_uri(store_uri): - return _get_databricks_rest_store(store_uri) - if _is_local_uri(store_uri): - return _get_file_store(store_uri) - if _is_http_uri(store_uri): - return _get_rest_store(store_uri) - - raise Exception("Tracking URI must be a local filesystem URI of the form '%s...' or a " - "remote URI of the form '%s...'. Update the tracking URI via " - "mlflow.set_tracking_uri" % (_LOCAL_FS_URI_PREFIX, _REMOTE_URI_PREFIX)) + return path_to_local_file_uri(os.path.abspath(DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH)) def _is_local_uri(uri): + """Returns true if this is a local file path (/foo or file:/foo).""" scheme = urllib.parse.urlparse(uri).scheme return uri != 'databricks' and (scheme == '' or scheme == 'file') @@ -103,12 +87,18 @@ def _is_databricks_uri(uri): return scheme == 'databricks' or uri == 'databricks' -def _get_file_store(store_uri): - path = urllib.parse.urlparse(store_uri).path - return FileStore(path) +def _get_file_store(store_uri, **_): + return FileStore(store_uri, store_uri) + + +def _get_sqlalchemy_store(store_uri, artifact_uri): + from mlflow.store.sqlalchemy_store import SqlAlchemyStore + if artifact_uri is None: + artifact_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH + return SqlAlchemyStore(store_uri, artifact_uri) -def _get_rest_store(store_uri): +def _get_rest_store(store_uri, **_): def get_default_host_creds(): return rest_utils.MlflowHostCreds( host=store_uri, @@ -117,6 +107,7 @@ def get_default_host_creds(): token=os.environ.get(_TRACKING_TOKEN_ENV_VAR), ignore_tls_verification=os.environ.get(_TRACKING_INSECURE_TLS_ENV_VAR) == 'true', ) + return RestStore(get_default_host_creds) @@ -131,20 +122,30 @@ def get_db_profile_from_uri(uri): return None -def _get_databricks_rest_store(store_uri): +def _get_databricks_rest_store(store_uri, **_): profile = get_db_profile_from_uri(store_uri) return RestStore(lambda: get_databricks_host_creds(profile)) -def _get_model_log_dir(model_name, run_id): - if not run_id: - raise Exception("Must specify a run_id to get logging directory for a model.") - store = _get_store() - run = store.get_run(run_id) - artifact_repo = ArtifactRepository.from_artifact_uri(run.info.artifact_uri, store) - return artifact_repo.download_artifacts(model_name) +_tracking_store_registry = TrackingStoreRegistry() +_tracking_store_registry.register('', _get_file_store) +_tracking_store_registry.register('file', _get_file_store) +_tracking_store_registry.register('databricks', _get_databricks_rest_store) + +for scheme in ['http', 'https']: + _tracking_store_registry.register(scheme, _get_rest_store) + +for scheme in DATABASE_ENGINES: + _tracking_store_registry.register(scheme, _get_sqlalchemy_store) + +_tracking_store_registry.register_entrypoints() + + +def _get_store(store_uri=None, artifact_uri=None): + return _tracking_store_registry.get_store(store_uri, artifact_uri) +# TODO(sueann): move to a projects utils module def _get_git_url_if_present(uri): """ Return the path git_uri#sub_directory if the URI passed is a local path that's part of diff --git a/mlflow/utils/__init__.py b/mlflow/utils/__init__.py index 77aa71ee997e9..361da9f18d5e4 100644 --- a/mlflow/utils/__init__.py +++ b/mlflow/utils/__init__.py @@ -2,41 +2,80 @@ import numpy as np import pandas as pd +from six.moves import urllib +from mlflow.exceptions import MlflowException +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE + +from mlflow.store.dbmodels.db_types import DATABASE_ENGINES +from mlflow.utils.annotations import deprecated, experimental, keyword_only +from mlflow.utils.validation import _validate_db_type_string PYTHON_VERSION = "{major}.{minor}.{micro}".format(major=version_info.major, minor=version_info.minor, micro=version_info.micro) +_INVALID_DB_URI_MSG = "Please refer to https://mlflow.org/docs/latest/tracking.html#storage for " \ + "format specifications." -def ndarray2list(ndarray): +def extract_db_type_from_uri(db_uri): """ - Convert n-dimensional numpy array into nested lists and convert the elements types to native - python so that the list is json-able using standard json library. - :param ndarray: numpy array - :return: list representation of the numpy array with element types convereted to native python + Parse the specified DB URI to extract the database type. Confirm the database type is + supported. If a driver is specified, confirm it passes a plausible regex. """ - if len(ndarray.shape) <= 1: - return [x.item() for x in ndarray] - return [ndarray2list(ndarray[i, :]) for i in range(0, ndarray.shape[0])] + scheme = urllib.parse.urlparse(db_uri).scheme + scheme_plus_count = scheme.count('+') + if scheme_plus_count == 0: + db_type = scheme + elif scheme_plus_count == 1: + db_type, _ = scheme.split('+') + else: + error_msg = "Invalid database URI: '%s'. %s" % (db_uri, _INVALID_DB_URI_MSG) + raise MlflowException(error_msg, INVALID_PARAMETER_VALUE) -def get_jsonable_obj(data): - """Attempt to make the data json-able via standard library. - Look for some commonly used types that are not jsonable and convert them into json-able ones. - Unknown data types are returned as is. + _validate_db_type_string(db_type) - :param data: data to be converted, works with pandas and numpy, rest will be returned as is. - """ - if isinstance(data, np.ndarray): - return ndarray2list(data) - if isinstance(data, pd.DataFrame): - return data.to_dict(orient='records') - if isinstance(data, pd.Series): - return pd.DataFrame(data).to_dict(orient='records') - else: # by default just return whatever this is and hope for the best - return data + return db_type def get_major_minor_py_version(py_version): return ".".join(py_version.split(".")[:2]) + + +def get_unique_resource_id(max_length=None): + """ + Obtains a unique id that can be included in a resource name. This unique id is a valid + DNS subname. + + :param max_length: The maximum length of the identifier + :return: A unique identifier that can be appended to a user-readable resource name to avoid + naming collisions. + """ + import uuid + import base64 + if max_length is not None and max_length <= 0: + raise ValueError( + "The specified maximum length for the unique resource id must be positive!") + + uuid_bytes = uuid.uuid4().bytes + # Use base64 encoding to shorten the UUID length. Note that the replacement of the + # unsupported '+' symbol maintains uniqueness because the UUID byte string is of a fixed, + # 16-byte length + uuid_b64 = base64.b64encode(uuid_bytes) + if version_info >= (3, 0): + # In Python3, `uuid_b64` is a `bytes` object. It needs to be + # converted to a string + uuid_b64 = uuid_b64.decode("ascii") + unique_id = uuid_b64.rstrip('=\n').replace("/", "-").replace("+", "AB").lower() + if max_length is not None: + unique_id = unique_id[:int(max_length)] + return unique_id + + +def get_uri_scheme(uri_or_path): + scheme = urllib.parse.urlparse(uri_or_path).scheme + if any([scheme.lower().startswith(db) for db in DATABASE_ENGINES]): + return extract_db_type_from_uri(uri_or_path) + else: + return scheme diff --git a/mlflow/utils/annotations.py b/mlflow/utils/annotations.py new file mode 100644 index 0000000000000..9ccda419ff4df --- /dev/null +++ b/mlflow/utils/annotations.py @@ -0,0 +1,46 @@ +from functools import wraps + + +def experimental(func): + """ + Decorator for marking APIs experimental in the docstring. + + :param func: A function to mark + :returns Decorated function. + """ + notice = ".. Note:: Experimental: This method may change or " + \ + "be removed in a future release without warning.\n" + func.__doc__ = notice + func.__doc__ + return func + + +def deprecated(alternative=None, since=None): + """ + Decorator for marking APIs deprecated in the docstring. + + :param func: A function to mark + :returns Decorated function. + """ + def deprecated_func(func): + since_str = " since %s" % since if since else "" + notice = ".. Warning:: Deprecated%s: This method will be removed in " % since_str + \ + "a near future release." + if alternative is not None and alternative.strip(): + notice += " Use ``%s`` instead." % alternative + func.__doc__ = notice + "\n" + func.__doc__ + return func + return deprecated_func + + +def keyword_only(func): + """ + A decorator that forces keyword arguments in the wrapped method. + """ + @wraps(func) + def wrapper(*args, **kwargs): + if len(args) > 0: + raise TypeError("Method %s only takes keyword arguments." % func.__name__) + return func(**kwargs) + notice = ".. Note:: This method requires all argument be specified by keyword.\n" + wrapper.__doc__ = notice + wrapper.__doc__ + return wrapper diff --git a/mlflow/utils/cli_args.py b/mlflow/utils/cli_args.py index 270ad5f2039fd..feff17dcf4c12 100644 --- a/mlflow/utils/cli_args.py +++ b/mlflow/utils/cli_args.py @@ -7,6 +7,13 @@ help="Path to the model. The path is relative to the run with the given " "run-id or local filesystem path without run-id.") +MODEL_URI = click.option("--model-uri", "-m", default=None, metavar="URI", required=True, + help="URI to the model. A local path, a 'runs:/' URI, or a" + " remote storage URI (e.g., an 's3://' URI). For more information" + " about supported remote URIs for model artifacts, see" + " https://mlflow.org/docs/latest/tracking.html" + "#supported-artifact-stores") + MLFLOW_HOME = click.option("--mlflow-home", default=None, metavar="PATH", help="Path to local clone of MLflow project. Use for development only.") @@ -14,7 +21,24 @@ help="ID of the MLflow run that generated the referenced content.") NO_CONDA = click.option("--no-conda", is_flag=True, - help="If specified, will assume that MLModel/MLProject is running within " - "a Conda environmen with the necessary dependencies for " + help="If specified, will assume that MLmodel/MLproject is running within " + "a Conda environment with the necessary dependencies for " "the current project instead of attempting to create a new " "conda environment.") + +INSTALL_MLFLOW = click.option("--install-mlflow", is_flag=True, default=False, + help="If specified and there is a conda environment to be activated " + "mlflow will be installed into the environment after it has been" + " activated. The version of installed mlflow will be the same as" + "the one used to invoke this command.") + +HOST = click.option("--host", "-h", metavar="HOST", default="127.0.0.1", + help="The network address to listen on (default: 127.0.0.1). " + "Use 0.0.0.0 to bind to all addresses if you want to access the tracking " + "server from other machines.") +PORT = click.option("--port", "-p", default=5000, + help="The port to listen on (default: 5000).") + +# We use None to disambiguate manually selecting "4" +WORKERS = click.option("--workers", "-w", default=None, + help="Number of gunicorn worker processes to handle requests (default: 4).") diff --git a/mlflow/utils/databricks_utils.py b/mlflow/utils/databricks_utils.py index 71302bff69d49..2957ef654b9eb 100644 --- a/mlflow/utils/databricks_utils.py +++ b/mlflow/utils/databricks_utils.py @@ -1,9 +1,15 @@ +import os +import logging +import subprocess + from mlflow.exceptions import MlflowException from mlflow.utils.rest_utils import MlflowHostCreds -from mlflow.utils.logging_utils import eprint from databricks_cli.configure import provider +_logger = logging.getLogger(__name__) + + def _get_dbutils(): try: import IPython @@ -27,15 +33,39 @@ def _get_extra_context(context_key): return java_dbutils.notebook().getContext().extraContext().get(context_key).get() +def _get_property_from_spark_context(key): + try: + from pyspark import TaskContext # pylint: disable=import-error + task_context = TaskContext.get() + if task_context: + return task_context.getLocalProperty(key) + except Exception: # pylint: disable=broad-except + return None + + def is_in_databricks_notebook(): + if _get_property_from_spark_context("spark.databricks.notebook.id") is not None: + return True try: return _get_extra_context("aclPathOfAclRoot").startswith('/workspace') except Exception: # pylint: disable=broad-except return False +def is_dbfs_fuse_available(): + with open(os.devnull, 'w') as devnull_stderr, open(os.devnull, 'w') as devnull_stdout: + try: + return subprocess.call( + ["mountpoint", "/dbfs"], stderr=devnull_stderr, stdout=devnull_stdout) == 0 + except Exception: # pylint: disable=broad-except + return False + + def get_notebook_id(): """Should only be called if is_in_databricks_notebook is true""" + notebook_id = _get_property_from_spark_context("spark.databricks.notebook.id") + if notebook_id is not None: + return notebook_id acl_path = _get_extra_context("aclPathOfAclRoot") if acl_path.startswith('/workspace'): return acl_path.split('/')[-1] @@ -44,11 +74,17 @@ def get_notebook_id(): def get_notebook_path(): """Should only be called if is_in_databricks_notebook is true""" + path = _get_property_from_spark_context("spark.databricks.notebook.path") + if path is not None: + return path return _get_extra_context("notebook_path") def get_webapp_url(): """Should only be called if is_in_databricks_notebook is true""" + url = _get_property_from_spark_context("spark.databricks.api.url") + if url is not None: + return url return _get_extra_context("api_url") @@ -69,14 +105,14 @@ def get_databricks_host_creds(profile=None): authentication information necessary to talk to the Databricks server. """ if not hasattr(provider, 'get_config'): - eprint("Warning: support for databricks-cli<0.8.0 is deprecated and will be removed" - " in a future version.") + _logger.warning( + "Support for databricks-cli<0.8.0 is deprecated and will be removed" + " in a future version.") config = provider.get_config_for_profile(profile) elif profile: config = provider.ProfileConfigProvider(profile).get_config() else: config = provider.get_config() - if not config or not config.host: _fail_malformed_databricks_auth(profile) diff --git a/mlflow/utils/environment.py b/mlflow/utils/environment.py index 45fec3d9fb3db..084a1c5c2fc9f 100644 --- a/mlflow/utils/environment.py +++ b/mlflow/utils/environment.py @@ -1,31 +1,41 @@ +import yaml + from mlflow.utils import PYTHON_VERSION -_conda_header = """name: mlflow-env +_conda_header = """\ +name: mlflow-env channels: - - anaconda - defaults -dependencies:""" +""" -def _mlflow_conda_env(path, additional_conda_deps=None, additional_pip_deps=None): +def _mlflow_conda_env(path=None, additional_conda_deps=None, additional_pip_deps=None, + additional_conda_channels=None, install_mlflow=True): """ - Create conda environment file. Contains default dependency on current python version. - :param path: local filesystem path where the conda env file is to be created. + Creates a Conda environment with the specified package channels and dependencies. + + :param path: Local filesystem path where the conda env file is to be written. If unspecified, + the conda env will not be written to the filesystem; it will still be returned + in dictionary format. :param additional_conda_deps: List of additional conda dependencies passed as strings. :param additional_pip_deps: List of additional pip dependencies passed as strings. - :return: path where the files has been created + :param additional_channels: List of additional conda channels to search when resolving packages. + :return: ``None`` if ``path`` is specified. Otherwise, the a dictionary representation of the + Conda environment. """ - conda_deps = ["python={}".format(PYTHON_VERSION)] - if additional_conda_deps: - conda_deps += additional_conda_deps - pip_deps = additional_pip_deps - with open(path, "w") as f: - f.write(_conda_header) - prefix = "\n - " - f.write(prefix + prefix.join(conda_deps)) - if pip_deps: - f.write(prefix + "pip:") - prefix = "\n - " - f.write(prefix + prefix.join(pip_deps)) - f.write("\n") - return path + env = yaml.safe_load(_conda_header) + env["dependencies"] = ["python={}".format(PYTHON_VERSION)] + pip_deps = (["mlflow"] if install_mlflow else []) + ( + additional_pip_deps if additional_pip_deps else []) + if additional_conda_deps is not None: + env["dependencies"] += additional_conda_deps + env["dependencies"].append({"pip": pip_deps}) + if additional_conda_channels is not None: + env["channels"] += additional_conda_channels + + if path is not None: + with open(path, "w") as out: + yaml.safe_dump(env, stream=out, default_flow_style=False) + return None + else: + return env diff --git a/mlflow/utils/file_utils.py b/mlflow/utils/file_utils.py index a4b55716855b5..6b07d3dd8920b 100644 --- a/mlflow/utils/file_utils.py +++ b/mlflow/utils/file_utils.py @@ -1,13 +1,20 @@ import codecs import gzip import os +import posixpath import shutil +import sys import tarfile import tempfile +from six.moves.urllib.request import pathname2url +from six.moves.urllib.parse import unquote +from six.moves import urllib + import yaml from mlflow.entities import FileInfo +from mlflow.exceptions import MissingConfigException ENCODING = "utf-8" @@ -24,11 +31,6 @@ def exists(name): return os.path.exists(name) -def build_path(*path_segments): - """ Returns the path formed by joining the passed-in path segments. """ - return os.path.join(*path_segments) - - def list_all(root, filter_func=lambda x: True, full_path=False): """ List all entities directly under 'dir_name' that satisfy 'filter_func' @@ -124,7 +126,7 @@ def write_yaml(root, file_name, data, overwrite=False): :param overwrite: If True, will overwrite existing files """ if not exists(root): - raise Exception("Parent directory '%s' does not exist." % root) + raise MissingConfigException("Parent directory '%s' does not exist." % root) file_path = os.path.join(root, file_name) yaml_file_name = file_path if file_path.endswith(".yaml") else file_path + ".yaml" @@ -133,7 +135,7 @@ def write_yaml(root, file_name, data, overwrite=False): raise Exception("Yaml file '%s' exists as '%s" % (file_path, yaml_file_name)) try: - with open(yaml_file_name, 'w') as yaml_file: + with codecs.open(yaml_file_name, mode='w', encoding=ENCODING) as yaml_file: yaml.safe_dump(data, yaml_file, default_flow_style=False, allow_unicode=True) except Exception as e: raise e @@ -149,14 +151,14 @@ def read_yaml(root, file_name): :return: Data in yaml file as dictionary """ if not exists(root): - raise Exception("Cannot read '%s'. Parent dir '%s' does not exist." % (file_name, root)) + raise MissingConfigException( + "Cannot read '%s'. Parent dir '%s' does not exist." % (file_name, root)) file_path = os.path.join(root, file_name) if not exists(file_path): - raise Exception("Yaml file '%s' does not exist." % file_path) - + raise MissingConfigException("Yaml file '%s' does not exist." % file_path) try: - with open(file_path, 'r') as yaml_file: + with codecs.open(file_path, mode='r', encoding=ENCODING) as yaml_file: return yaml.safe_load(yaml_file) except Exception as e: raise e @@ -274,7 +276,7 @@ def _filter_timestamps(tar_info): tar.add(source_dir, arcname=archive_name, filter=_filter_timestamps) # When gzipping the tar, don't include the tar's filename or modification time in the # zipped archive (see https://docs.python.org/3/library/gzip.html#gzip.GzipFile) - with gzip.GzipFile(filename="", fileobj=open(output_filename, 'wb'), mode='wb', mtime=0)\ + with gzip.GzipFile(filename="", fileobj=open(output_filename, 'wb'), mode='wb', mtime=0) \ as gzipped_tar, open(unzipped_filename, 'rb') as tar: gzipped_tar.write(tar.read()) finally: @@ -318,16 +320,71 @@ def ignore(_, names): return mlflow_dir -def _copy_file_or_tree(src, dst, dst_dir): - name = os.path.join(dst_dir, os.path.basename(os.path.abspath(src))) - if dst_dir: - os.mkdir(os.path.join(dst, dst_dir)) +def _copy_file_or_tree(src, dst, dst_dir=None): + """ + :return: The path to the copied artifacts, relative to `dst` + """ + dst_subpath = os.path.basename(os.path.abspath(src)) + if dst_dir is not None: + dst_subpath = os.path.join(dst_dir, dst_subpath) + dst_path = os.path.join(dst, dst_subpath) if os.path.isfile(src): - shutil.copy(src=src, dst=os.path.join(dst, name)) + dst_dirpath = os.path.dirname(dst_path) + if not os.path.exists(dst_dirpath): + os.makedirs(dst_dirpath) + shutil.copy(src=src, dst=dst_path) else: - shutil.copytree(src=src, dst=os.path.join(dst, name)) - return name + shutil.copytree(src=src, dst=dst_path) + return dst_subpath def get_parent_dir(path): return os.path.abspath(os.path.join(path, os.pardir)) + + +def relative_path_to_artifact_path(path): + if os.path == posixpath: + return path + if os.path.abspath(path) == path: + raise Exception("This method only works with relative paths.") + return unquote(pathname2url(path)) + + +def path_to_local_file_uri(path): + """ + Convert local filesystem path to local file uri. + """ + path = pathname2url(path) + if path == posixpath.abspath(path): + return "file://{path}".format(path=path) + else: + return "file:{path}".format(path=path) + + +def path_to_local_sqlite_uri(path): + """ + Convert local filesystem path to sqlite uri. + """ + path = posixpath.abspath(pathname2url(os.path.abspath(path))) + prefix = "sqlite://" if sys.platform == "win32" else "sqlite:///" + return prefix + path + + +def local_file_uri_to_path(uri): + """ + Convert URI to local filesystem path. + No-op if the uri does not have the expected scheme. + """ + path = urllib.parse.urlparse(uri).path if uri.startswith("file:") else uri + return urllib.request.url2pathname(path) + + +def get_local_path_or_none(path_or_uri): + """Check if the argument is a local path (no scheme or file:///) and return local path if true, + None otherwise. + """ + parsed_uri = urllib.parse.urlparse(path_or_uri) + if len(parsed_uri.scheme) == 0 or parsed_uri.scheme == "file" and len(parsed_uri.netloc) == 0: + return local_file_uri_to_path(path_or_uri) + else: + return None diff --git a/mlflow/utils/logging_utils.py b/mlflow/utils/logging_utils.py index 64ba60d08c3b4..81859d73d2c2f 100644 --- a/mlflow/utils/logging_utils.py +++ b/mlflow/utils/logging_utils.py @@ -1,5 +1,42 @@ from __future__ import print_function + import sys +import logging +import logging.config + + +# Logging format example: +# 2018/11/20 12:36:37 INFO mlflow.sagemaker: Creating new SageMaker endpoint +LOGGING_LINE_FORMAT = "%(asctime)s %(levelname)s %(name)s: %(message)s" +LOGGING_DATETIME_FORMAT = "%Y/%m/%d %H:%M:%S" + + +def _configure_mlflow_loggers(root_module_name): + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'mlflow_formatter': { + 'format': LOGGING_LINE_FORMAT, + 'datefmt': LOGGING_DATETIME_FORMAT, + }, + }, + 'handlers': { + 'mlflow_handler': { + 'level': 'INFO', + 'formatter': 'mlflow_formatter', + 'class': 'logging.StreamHandler', + 'stream': sys.stderr, + }, + }, + 'loggers': { + root_module_name: { + 'handlers': ['mlflow_handler'], + 'level': 'INFO', + 'propagate': False, + }, + }, + }) def eprint(*args, **kwargs): diff --git a/mlflow/utils/mlflow_tags.py b/mlflow/utils/mlflow_tags.py index 222c4ee047459..be11ce1b0eaa3 100644 --- a/mlflow/utils/mlflow_tags.py +++ b/mlflow/utils/mlflow_tags.py @@ -1,12 +1,31 @@ """ File containing all of the run tags in the mlflow. namespace. + +See the REST API documentation for information on the meaning of these tags. """ + +MLFLOW_RUN_NAME = "mlflow.runName" +MLFLOW_PARENT_RUN_ID = "mlflow.parentRunId" +MLFLOW_USER = "mlflow.user" +MLFLOW_SOURCE_TYPE = "mlflow.source.type" +MLFLOW_SOURCE_NAME = "mlflow.source.name" +MLFLOW_GIT_COMMIT = "mlflow.source.git.commit" +MLFLOW_GIT_BRANCH = "mlflow.source.git.branch" +MLFLOW_GIT_REPO_URL = "mlflow.source.git.repoURL" +MLFLOW_PROJECT_ENV = "mlflow.project.env" +MLFLOW_PROJECT_ENTRY_POINT = "mlflow.project.entryPoint" +MLFLOW_DOCKER_IMAGE_URI = "mlflow.docker.image.uri" +MLFLOW_DOCKER_IMAGE_ID = "mlflow.docker.image.id" + MLFLOW_DATABRICKS_NOTEBOOK_ID = "mlflow.databricks.notebookID" MLFLOW_DATABRICKS_NOTEBOOK_PATH = "mlflow.databricks.notebookPath" MLFLOW_DATABRICKS_WEBAPP_URL = "mlflow.databricks.webappURL" MLFLOW_DATABRICKS_RUN_URL = "mlflow.databricks.runURL" MLFLOW_DATABRICKS_SHELL_JOB_ID = "mlflow.databricks.shellJobID" MLFLOW_DATABRICKS_SHELL_JOB_RUN_ID = "mlflow.databricks.shellJobRunID" -MLFLOW_RUN_NAME = "mlflow.runName" -MLFLOW_GIT_BRANCH_NAME = "mlflow.gitBranchName" -MLFLOW_PARENT_RUN_ID = "mlflow.parentRunId" + +MLFLOW_PROJECT_BACKEND = "mlflow.project.backend" + +# The following legacy tags are deprecated and will be removed by MLflow 1.0. +LEGACY_MLFLOW_GIT_BRANCH_NAME = "mlflow.gitBranchName" # Replaced with mlflow.source.git.branch +LEGACY_MLFLOW_GIT_REPO_URL = "mlflow.gitRepoURL" # Replaced with mlflow.source.git.repoURL diff --git a/mlflow/utils/model_utils.py b/mlflow/utils/model_utils.py new file mode 100644 index 0000000000000..527972c539054 --- /dev/null +++ b/mlflow/utils/model_utils.py @@ -0,0 +1,32 @@ +import os + +from mlflow.exceptions import MlflowException +from mlflow.models import Model +from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST + + +def _get_flavor_configuration(model_path, flavor_name): + """ + Obtains the configuration for the specified flavor from the specified + MLflow model path. If the model does not contain the specified flavor, + an exception will be thrown. + + :param model_path: The path to the root directory of the MLflow model for which to load + the specified flavor configuration. + :param flavor_name: The name of the flavor configuration to load. + :return: The flavor configuration as a dictionary. + """ + model_configuration_path = os.path.join(model_path, "MLmodel") + if not os.path.exists(model_configuration_path): + raise MlflowException( + "Could not find an \"MLmodel\" configuration file at \"{model_path}\"".format( + model_path=model_path), + RESOURCE_DOES_NOT_EXIST) + + model_conf = Model.load(model_configuration_path) + if flavor_name not in model_conf.flavors: + raise MlflowException( + "Model does not have the \"{flavor_name}\" flavor".format(flavor_name=flavor_name), + RESOURCE_DOES_NOT_EXIST) + conf = model_conf.flavors[flavor_name] + return conf diff --git a/mlflow/utils/proto_json_utils.py b/mlflow/utils/proto_json_utils.py index e05fff2c9ca30..2b3ed985183b6 100644 --- a/mlflow/utils/proto_json_utils.py +++ b/mlflow/utils/proto_json_utils.py @@ -6,6 +6,35 @@ def message_to_json(message): return MessageToJson(message, preserving_proto_field_name=True) +def _stringify_all_experiment_ids(x): + """Converts experiment_id fields which are defined as ints into strings in the given json. + This is necessary for backwards- and forwards-compatibility with MLflow clients/servers + running MLflow 0.9.0 and below, as experiment_id was changed from an int to a string. + To note, the Python JSON serializer is happy to auto-convert strings into ints (so a + server or client that sees the new format is fine), but is unwilling to convert ints + to strings. Therefore, we need to manually perform this conversion. + + This code can be removed after MLflow 1.0, after users have given reasonable time to + upgrade clients and servers to MLflow 0.9.1+. + """ + if isinstance(x, dict): + items = x.items() + for k, v in items: + if k == "experiment_id": + x[k] = str(v) + elif k == "experiment_ids": + x[k] = [str(w) for w in v] + elif k == "info" and isinstance(v, dict) and "experiment_id" in v and "run_uuid" in v: + # shortcut for run info + v["experiment_id"] = str(v["experiment_id"]) + elif k not in ("params", "tags", "metrics"): # skip run data + _stringify_all_experiment_ids(v) + elif isinstance(x, list): + for y in x: + _stringify_all_experiment_ids(y) + + def parse_dict(js_dict, message): - """Parses a JSON dictionary into a message proto, ignoring unknown fields in the JOSN.""" + """Parses a JSON dictionary into a message proto, ignoring unknown fields in the JSON.""" + _stringify_all_experiment_ids(js_dict) ParseDict(js_dict=js_dict, message=message, ignore_unknown_fields=True) diff --git a/mlflow/utils/rest_utils.py b/mlflow/utils/rest_utils.py index 19cec8e8b83cc..9d5647bfeeccd 100644 --- a/mlflow/utils/rest_utils.py +++ b/mlflow/utils/rest_utils.py @@ -1,18 +1,22 @@ import base64 import time +import logging import json -from json import JSONEncoder -import numpy import requests -from mlflow.utils.logging_utils import eprint +from mlflow import __version__ from mlflow.utils.string_utils import strip_suffix from mlflow.exceptions import MlflowException, RestException - RESOURCE_DOES_NOT_EXIST = 'RESOURCE_DOES_NOT_EXIST' +_logger = logging.getLogger(__name__) + +_DEFAULT_HEADERS = { + 'User-Agent': 'mlflow-python-client/%s' % __version__ +} + def http_request(host_creds, endpoint, retries=3, retry_interval=3, **kwargs): """ @@ -33,7 +37,7 @@ def http_request(host_creds, endpoint, retries=3, retry_interval=3, **kwargs): elif host_creds.token: auth_str = "Bearer %s" % host_creds.token - headers = {} + headers = dict(_DEFAULT_HEADERS) if auth_str: headers['Authorization'] = auth_str @@ -46,9 +50,10 @@ def http_request(host_creds, endpoint, retries=3, retry_interval=3, **kwargs): if response.status_code >= 200 and response.status_code < 500: return response else: - eprint("API request to %s failed with code %s != 200, retrying up to %s more times. " - "API response body: %s" % (url, response.status_code, retries - i - 1, - response.text)) + _logger.error( + "API request to %s failed with code %s != 200, retrying up to %s more times. " + "API response body: %s", + url, response.status_code, retries - i - 1, response.text) time.sleep(retry_interval) raise MlflowException("API request to %s failed to return code 200 after %s tries" % (url, retries)) @@ -67,6 +72,11 @@ def http_request_safe(host_creds, endpoint, **kwargs): Wrapper around ``http_request`` that also verifies that the request succeeds with code 200. """ response = http_request(host_creds=host_creds, endpoint=endpoint, **kwargs) + return verify_rest_response(response, endpoint) + + +def verify_rest_response(response, endpoint): + """Verify the return code and raise exception if the request was not successful.""" if response.status_code != 200: base_msg = "API request to endpoint %s failed with error code " \ "%s != 200" % (endpoint, response.status_code) @@ -76,19 +86,6 @@ def http_request_safe(host_creds, endpoint, **kwargs): return response -class NumpyEncoder(JSONEncoder): - """ Special json encoder for numpy types. - Note that some numpy types doesn't have native python equivalence, - hence json.dumps will raise TypeError. - In this case, you'll need to convert your numpy types into its closest python equivalence. - """ - - def default(self, o): # pylint: disable=E0202 - if isinstance(o, numpy.generic): - return numpy.asscalar(o) - return JSONEncoder.default(self, o) - - class MlflowHostCreds(object): """ Provides a hostname and optional authentication for talking to an MLflow tracking server. diff --git a/mlflow/utils/search_utils.py b/mlflow/utils/search_utils.py index 234561a2431ff..783eec1232b4b 100644 --- a/mlflow/utils/search_utils.py +++ b/mlflow/utils/search_utils.py @@ -1,45 +1,381 @@ -def does_run_match_clause(run, search_expression): - key_type = search_expression.WhichOneof('expression') - if key_type == 'metric': - key = search_expression.metric.key - metric_type = search_expression.metric.WhichOneof('clause') - if metric_type == 'float': - comparator = search_expression.metric.float.comparator - value = search_expression.metric.float.value - elif metric_type == 'double': - comparator = search_expression.metric.double.comparator - value = search_expression.metric.double.value +import base64 +import json +import sqlparse +from sqlparse.sql import Identifier, Token, Comparison, Statement +from sqlparse.tokens import Token as TokenType + +from mlflow.entities import RunInfo +from mlflow.exceptions import MlflowException +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE + + +class SearchUtils(object): + VALID_METRIC_COMPARATORS = set(['>', '>=', '!=', '=', '<', '<=']) + VALID_PARAM_COMPARATORS = set(['!=', '=']) + VALID_TAG_COMPARATORS = set(['!=', '=']) + VALID_STRING_ATTRIBUTE_COMPARATORS = set(['!=', '=']) + VALID_SEARCH_ATTRIBUTE_KEYS = set(RunInfo.get_searchable_attributes()) + VALID_ORDER_BY_ATTRIBUTE_KEYS = set(RunInfo.get_orderable_attributes()) + _METRIC_IDENTIFIER = "metric" + _ALTERNATE_METRIC_IDENTIFIERS = set(["metrics"]) + _PARAM_IDENTIFIER = "parameter" + _ALTERNATE_PARAM_IDENTIFIERS = set(["parameters", "param", "params"]) + _TAG_IDENTIFIER = "tag" + _ALTERNATE_TAG_IDENTIFIERS = set(["tags"]) + _ATTRIBUTE_IDENTIFIER = "attribute" + _ALTERNATE_ATTRIBUTE_IDENTIFIERS = set(["attr", "attributes", "run"]) + _IDENTIFIERS = [_METRIC_IDENTIFIER, _PARAM_IDENTIFIER, _TAG_IDENTIFIER, _ATTRIBUTE_IDENTIFIER] + _VALID_IDENTIFIERS = set(_IDENTIFIERS + + list(_ALTERNATE_METRIC_IDENTIFIERS) + + list(_ALTERNATE_PARAM_IDENTIFIERS) + + list(_ALTERNATE_TAG_IDENTIFIERS) + + list(_ALTERNATE_ATTRIBUTE_IDENTIFIERS)) + STRING_VALUE_TYPES = set([TokenType.Literal.String.Single]) + NUMERIC_VALUE_TYPES = set([TokenType.Literal.Number.Integer, TokenType.Literal.Number.Float]) + + @classmethod + def _trim_ends(cls, string_value): + return string_value[1:-1] + + @classmethod + def _is_quoted(cls, value, pattern): + return len(value) >= 2 and value.startswith(pattern) and value.endswith(pattern) + + @classmethod + def _trim_backticks(cls, entity_type): + """Remove backticks from identifier like `param`, if they exist.""" + if cls._is_quoted(entity_type, "`"): + return cls._trim_ends(entity_type) + return entity_type + + @classmethod + def _strip_quotes(cls, value, expect_quoted_value=False): + """ + Remove quotes for input string. + Values of type strings are expected to have quotes. + Keys containing special characters are also expected to be enclose in quotes. + """ + if cls._is_quoted(value, "'") or cls._is_quoted(value, '"'): + return cls._trim_ends(value) + elif expect_quoted_value: + raise MlflowException("Parameter value is either not quoted or unidentified quote " + "types used for string value %s. Use either single or double " + "quotes." % value, error_code=INVALID_PARAMETER_VALUE) else: - raise Exception("Invalid metric type: '%s', expected float or double") - metric = next((m for m in run.data.metrics if m.key == key), None) - if metric is None: + return value + + @classmethod + def _valid_entity_type(cls, entity_type): + entity_type = cls._trim_backticks(entity_type) + if entity_type not in cls._VALID_IDENTIFIERS: + raise MlflowException("Invalid entity type '%s'. " + "Valid values are %s" % (entity_type, cls._IDENTIFIERS), + error_code=INVALID_PARAMETER_VALUE) + + if entity_type in cls._ALTERNATE_PARAM_IDENTIFIERS: + return cls._PARAM_IDENTIFIER + elif entity_type in cls._ALTERNATE_METRIC_IDENTIFIERS: + return cls._METRIC_IDENTIFIER + elif entity_type in cls._ALTERNATE_TAG_IDENTIFIERS: + return cls._TAG_IDENTIFIER + elif entity_type in cls._ALTERNATE_ATTRIBUTE_IDENTIFIERS: + return cls._ATTRIBUTE_IDENTIFIER + else: + # one of ("metric", "parameter", "tag", or "attribute") since it a valid type + return entity_type + + @classmethod + def _get_identifier(cls, identifier, valid_attributes): + try: + entity_type, key = identifier.split(".", 1) + except ValueError: + raise MlflowException("Invalid identifier '%s'. Columns should be specified as " + "'attribute.', 'metric.', 'tag.', or " + "'param.'." % identifier, + error_code=INVALID_PARAMETER_VALUE) + identifier = cls._valid_entity_type(entity_type) + key = cls._trim_backticks(cls._strip_quotes(key)) + if identifier == cls._ATTRIBUTE_IDENTIFIER and key not in valid_attributes: + raise MlflowException("Invalid attribute key '{}' specified. Valid keys " + " are '{}'".format(key, valid_attributes)) + return {"type": identifier, "key": key} + + @classmethod + def _get_value(cls, identifier_type, token): + if identifier_type == cls._METRIC_IDENTIFIER: + if token.ttype not in cls.NUMERIC_VALUE_TYPES: + raise MlflowException("Expected numeric value type for metric. " + "Found {}".format(token.value), + error_code=INVALID_PARAMETER_VALUE) + return token.value + elif identifier_type == cls._PARAM_IDENTIFIER or identifier_type == cls._TAG_IDENTIFIER: + if token.ttype in cls.STRING_VALUE_TYPES or isinstance(token, Identifier): + return cls._strip_quotes(token.value, expect_quoted_value=True) + raise MlflowException("Expected a quoted string value for " + "{identifier_type} (e.g. 'my-value'). Got value " + "{value}".format(identifier_type=identifier_type, + value=token.value), + error_code=INVALID_PARAMETER_VALUE) + elif identifier_type == cls._ATTRIBUTE_IDENTIFIER: + if token.ttype in cls.STRING_VALUE_TYPES or isinstance(token, Identifier): + return cls._strip_quotes(token.value, expect_quoted_value=True) + else: + raise MlflowException("Expected a quoted string value for attributes. " + "Got value {value}".format(value=token.value), + error_code=INVALID_PARAMETER_VALUE) + else: + # Expected to be either "param" or "metric". + raise MlflowException("Invalid identifier type. Expected one of " + "{}.".format([cls._METRIC_IDENTIFIER, cls._PARAM_IDENTIFIER])) + + @classmethod + def _validate_comparison(cls, tokens): + base_error_string = "Invalid comparison clause" + if len(tokens) != 3: + raise MlflowException("{}. Expected 3 tokens found {}".format(base_error_string, + len(tokens)), + error_code=INVALID_PARAMETER_VALUE) + if not isinstance(tokens[0], Identifier): + raise MlflowException("{}. Expected 'Identifier' found '{}'".format(base_error_string, + str(tokens[0])), + error_code=INVALID_PARAMETER_VALUE) + if not isinstance(tokens[1], Token) and tokens[1].ttype != TokenType.Operator.Comparison: + raise MlflowException("{}. Expected comparison found '{}'".format(base_error_string, + str(tokens[1])), + error_code=INVALID_PARAMETER_VALUE) + if not isinstance(tokens[2], Token) and \ + (tokens[2].ttype not in cls.STRING_VALUE_TYPES.union(cls.NUMERIC_VALUE_TYPES) or + isinstance(tokens[2], Identifier)): + raise MlflowException("{}. Expected value token found '{}'".format(base_error_string, + str(tokens[2])), + error_code=INVALID_PARAMETER_VALUE) + + @classmethod + def _get_comparison(cls, comparison): + stripped_comparison = [token for token in comparison.tokens if not token.is_whitespace] + cls._validate_comparison(stripped_comparison) + comp = cls._get_identifier(stripped_comparison[0].value, cls.VALID_SEARCH_ATTRIBUTE_KEYS) + comp["comparator"] = stripped_comparison[1].value + comp["value"] = cls._get_value(comp.get("type"), stripped_comparison[2]) + return comp + + @classmethod + def _invalid_statement_token(cls, token): + if isinstance(token, Comparison): + return False + elif token.is_whitespace: + return False + elif token.match(ttype=TokenType.Keyword, values=["AND"]): return False - if comparator == '>': - return metric.value > value + else: + return True + + @classmethod + def _process_statement(cls, statement): + # check validity + invalids = list(filter(cls._invalid_statement_token, statement.tokens)) + if len(invalids) > 0: + invalid_clauses = ", ".join("'%s'" % token for token in invalids) + raise MlflowException("Invalid clause(s) in filter string: %s" % invalid_clauses, + error_code=INVALID_PARAMETER_VALUE) + return [cls._get_comparison(si) for si in statement.tokens if isinstance(si, Comparison)] + + @classmethod + def _parse_search_filter(cls, filter_string): + if not filter_string: + return [] + try: + parsed = sqlparse.parse(filter_string) + except Exception: + raise MlflowException("Error on parsing filter '%s'" % filter_string, + error_code=INVALID_PARAMETER_VALUE) + if len(parsed) == 0 or not isinstance(parsed[0], Statement): + raise MlflowException("Invalid filter '%s'. Could not be parsed." % + filter_string, error_code=INVALID_PARAMETER_VALUE) + elif len(parsed) > 1: + raise MlflowException("Search filter contained multiple expression '%s'. " + "Provide AND-ed expression list." % filter_string, + error_code=INVALID_PARAMETER_VALUE) + return SearchUtils._process_statement(parsed[0]) + + @classmethod + def _does_run_match_clause(cls, run, sed): + key_type = sed.get('type') + key = sed.get('key') + value = sed.get('value') + comparator = sed.get('comparator') + if key_type == cls._METRIC_IDENTIFIER: + if comparator not in cls.VALID_METRIC_COMPARATORS: + raise MlflowException("Invalid comparator '%s' " + "not one of '%s" % (comparator, + cls.VALID_METRIC_COMPARATORS), + error_code=INVALID_PARAMETER_VALUE) + lhs = run.data.metrics.get(key, None) + value = float(value) + elif key_type == cls._PARAM_IDENTIFIER: + if comparator not in cls.VALID_PARAM_COMPARATORS: + raise MlflowException("Invalid comparator '%s' " + "not one of '%s'" % (comparator, cls.VALID_PARAM_COMPARATORS), + error_code=INVALID_PARAMETER_VALUE) + lhs = run.data.params.get(key, None) + elif key_type == cls._TAG_IDENTIFIER: + if comparator not in cls.VALID_TAG_COMPARATORS: + raise MlflowException("Invalid comparator '%s' " + "not one of '%s" % (comparator, cls.VALID_TAG_COMPARATORS)) + lhs = run.data.tags.get(key, None) + elif key_type == cls._ATTRIBUTE_IDENTIFIER: + if comparator not in cls.VALID_STRING_ATTRIBUTE_COMPARATORS: + raise MlflowException("Invalid comparator '{}' not one of " + "'{}".format(comparator, + cls.VALID_STRING_ATTRIBUTE_COMPARATORS)) + lhs = getattr(run.info, key) + else: + raise MlflowException("Invalid search expression type '%s'" % key_type, + error_code=INVALID_PARAMETER_VALUE) + if lhs is None: + return False + elif comparator == '>': + return lhs > value elif comparator == '>=': - return metric.value >= value + return lhs >= value elif comparator == '=': - return metric.value == value + return lhs == value elif comparator == '!=': - return metric.value != value + return lhs != value elif comparator == '<=': - return metric.value <= value + return lhs <= value elif comparator == '<': - return metric.value < value + return lhs < value else: - raise Exception("Invalid comparator '%s' not one of '>, >=, =, !=, <=, <" - % comparator) - if key_type == 'parameter': - key = search_expression.parameter.key - comparator = search_expression.parameter.string.comparator - value = search_expression.parameter.string.value - param = next((p for p in run.data.params if p.key == key), None) - if param is None: return False - if comparator == '=': - return param.value == value - elif comparator == '!=': - return param.value != value + + @classmethod + def filter(cls, runs, filter_string): + """Filters a set of runs based on a search filter string.""" + if not filter_string: + return runs + parsed = cls._parse_search_filter(filter_string) + + def run_matches(run): + return all([cls._does_run_match_clause(run, s) for s in parsed]) + return [run for run in runs if run_matches(run)] + + @classmethod + def _parse_order_by(cls, order_by): + try: + parsed = sqlparse.parse(order_by) + except Exception: + raise MlflowException("Error on parsing order_by clause '%s'" % order_by, + error_code=INVALID_PARAMETER_VALUE) + if len(parsed) != 1 or not isinstance(parsed[0], Statement): + raise MlflowException("Invalid order_by clause '%s'. Could not be parsed." % + order_by, error_code=INVALID_PARAMETER_VALUE) + + statement = parsed[0] + if len(statement.tokens) != 1 or not isinstance(statement[0], Identifier): + raise MlflowException("Invalid order_by clause '%s'. Could not be parsed." % + order_by, error_code=INVALID_PARAMETER_VALUE) + + token_value = statement.tokens[0].value + is_ascending = True + if token_value.lower().endswith(" desc"): + is_ascending = False + token_value = token_value[0:-len(" desc")] + elif token_value.lower().endswith(" asc"): + token_value = token_value[0:-len(" asc")] + identifier = cls._get_identifier(token_value.strip(), cls.VALID_ORDER_BY_ATTRIBUTE_KEYS) + return (identifier["type"], identifier["key"], is_ascending) + + @classmethod + def _get_value_for_sort(cls, run, key_type, key, ascending): + """Returns a tuple suitable to be used as a sort key for runs.""" + sort_value = None + if key_type == cls._METRIC_IDENTIFIER: + sort_value = run.data.metrics.get(key) + elif key_type == cls._PARAM_IDENTIFIER: + sort_value = run.data.params.get(key) + elif key_type == cls._TAG_IDENTIFIER: + sort_value = run.data.tags.get(key) + elif key_type == cls._ATTRIBUTE_IDENTIFIER: + sort_value = getattr(run.info, key) else: - raise Exception("Invalid comparator '%s' not one of '=, !=" % comparator) - return False + raise MlflowException("Invalid order_by entity type '%s'" % key_type, + error_code=INVALID_PARAMETER_VALUE) + + # Return a key such that None values are always at the end. + if ascending: + return (sort_value is None, sort_value) + return (sort_value is not None, sort_value) + + @classmethod + def sort(cls, runs, order_by_list): + """Sorts a set of runs based on their natural ordering and an overriding set of order_bys. + Runs are naturally ordered first by start time descending, then by run id for tie-breaking. + """ + runs = sorted(runs, key=lambda run: (-run.info.start_time, run.info.run_uuid)) + if not order_by_list: + return runs + # NB: We rely on the stability of Python's sort function, so that we can apply + # the ordering conditions in reverse order. + for order_by_clause in reversed(order_by_list): + (key_type, key, ascending) = cls._parse_order_by(order_by_clause) + # pylint: disable=cell-var-from-loop + runs = sorted(runs, + key=lambda run: cls._get_value_for_sort(run, key_type, key, ascending), + reverse=not ascending) + return runs + + @classmethod + def _parse_start_offset_from_page_token(cls, page_token): + # Note: the page_token is expected to be a base64-encoded JSON that looks like + # { "offset": xxx }. However, this format is not stable, so it should not be + # relied upon outside of this method. + if not page_token: + return 0 + + try: + decoded_token = base64.b64decode(page_token) + except TypeError: + raise MlflowException("Invalid page token, could not base64-decode", + error_code=INVALID_PARAMETER_VALUE) + except base64.binascii.Error: + raise MlflowException("Invalid page token, could not base64-decode", + error_code=INVALID_PARAMETER_VALUE) + + try: + parsed_token = json.loads(decoded_token) + except ValueError: + raise MlflowException("Invalid page token, decoded value=%s" % decoded_token, + error_code=INVALID_PARAMETER_VALUE) + + offset_str = parsed_token.get("offset") + if not offset_str: + raise MlflowException("Invalid page token, parsed value=%s" % parsed_token, + error_code=INVALID_PARAMETER_VALUE) + + try: + offset = int(offset_str) + except ValueError: + raise MlflowException("Invalid page token, not stringable %s" % offset_str, + error_code=INVALID_PARAMETER_VALUE) + + return offset + + @classmethod + def _create_page_token(cls, offset): + return base64.b64encode(json.dumps({"offset": offset}).encode("utf-8")) + + @classmethod + def paginate(cls, runs, page_token, max_results): + """Paginates a set of runs based on an offset encoded into the page_token and a max + results limit. Returns a pair containing the set of paginated runs, followed by + an optional next_page_token if there are further results that need to be returned. + """ + start_offset = cls._parse_start_offset_from_page_token(page_token) + final_offset = start_offset + max_results + + paginated_runs = runs[start_offset:final_offset] + next_page_token = None + if final_offset < len(runs): + next_page_token = cls._create_page_token(final_offset) + return (paginated_runs, next_page_token) diff --git a/mlflow/utils/time_utils.py b/mlflow/utils/time_utils.py new file mode 100644 index 0000000000000..95f82aa303455 --- /dev/null +++ b/mlflow/utils/time_utils.py @@ -0,0 +1,11 @@ +import datetime +from pytz import reference + + +def conv_longdate_to_str(longdate, local_tz=True): + date_time = datetime.datetime.fromtimestamp(longdate / 1000.0) + str_long_date = date_time.strftime('%Y-%m-%d %H:%M:%S') + if local_tz: + str_long_date += " " + reference.LocalTimezone().tzname(date_time) + + return str_long_date diff --git a/mlflow/utils/validation.py b/mlflow/utils/validation.py index 0869a9f967848..0aa03afa5f14d 100644 --- a/mlflow/utils/validation.py +++ b/mlflow/utils/validation.py @@ -1,61 +1,207 @@ """ Utilities for validating user inputs such as metric names and parameter names. """ -import os.path +import numbers +import posixpath import re +import numpy as np + from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE +from mlflow.store.dbmodels.db_types import DATABASE_ENGINES _VALID_PARAM_AND_METRIC_NAMES = re.compile(r"^[/\w.\- ]*$") -# Regex for valid run IDs: must be a 32-character hex string. -_RUN_ID_REGEX = re.compile(r"^[0-9a-f]{32}$") +# Regex for valid run IDs: must be an alphanumeric string of length 1 to 256. +_RUN_ID_REGEX = re.compile(r"^[a-zA-Z0-9][\w\-]{0,255}$") + +_EXPERIMENT_ID_REGEX = re.compile(r"^[a-zA-Z0-9][\w\-]{0,63}$") _BAD_CHARACTERS_MESSAGE = ( "Names may only contain alphanumerics, underscores (_), dashes (-), periods (.)," " spaces ( ), and slashes (/)." ) +MAX_PARAMS_TAGS_PER_BATCH = 100 +MAX_METRICS_PER_BATCH = 1000 +MAX_ENTITIES_PER_BATCH = 1000 +MAX_BATCH_LOG_REQUEST_SIZE = int(1e6) +MAX_PARAM_VAL_LENGTH = 250 +MAX_TAG_VAL_LENGTH = 250 +MAX_ENTITY_KEY_LENGTH = 250 + +_UNSUPPORTED_DB_TYPE_MSG = "Supported database engines are {%s}" % ', '.join(DATABASE_ENGINES) + def bad_path_message(name): return ( "Names may be treated as files in certain cases, and must not resolve to other names" " when treated as such. This name would resolve to '%s'" - ) % os.path.normpath(name) + ) % posixpath.normpath(name) def path_not_unique(name): - norm = os.path.normpath(name) + norm = posixpath.normpath(name) return norm != name or norm == '.' or norm.startswith('..') or norm.startswith('/') def _validate_metric_name(name): """Check that `name` is a valid metric name and raise an exception if it isn't.""" if not _VALID_PARAM_AND_METRIC_NAMES.match(name): - raise Exception("Invalid metric name: '%s'. %s" % (name, _BAD_CHARACTERS_MESSAGE)) + raise MlflowException("Invalid metric name: '%s'. %s" % (name, _BAD_CHARACTERS_MESSAGE), + INVALID_PARAMETER_VALUE) if path_not_unique(name): - raise Exception("Invalid metric name: '%s'. %s" % (name, bad_path_message(name))) + raise MlflowException("Invalid metric name: '%s'. %s" % (name, bad_path_message(name)), + INVALID_PARAMETER_VALUE) + + +def _validate_metric(key, value, timestamp, step): + """ + Check that a param with the specified key, value, timestamp is valid and raise an exception if + it isn't. + """ + _validate_metric_name(key) + if not isinstance(value, numbers.Number) or value > np.finfo(np.float64).max \ + or value < np.finfo(np.float64).min: + raise MlflowException( + "Got invalid value %s for metric '%s' (timestamp=%s). Please specify value as a valid " + "double (64-bit floating point)" % (value, key, timestamp), + INVALID_PARAMETER_VALUE) + + if not isinstance(timestamp, numbers.Number) or timestamp < 0: + raise MlflowException( + "Got invalid timestamp %s for metric '%s' (value=%s). Timestamp must be a nonnegative " + "long (64-bit integer) " % (timestamp, key, value), + INVALID_PARAMETER_VALUE) + + if not isinstance(step, numbers.Number): + raise MlflowException( + "Got invalid step %s for metric '%s' (value=%s). Step must be a valid long " + "(64-bit integer)." % (step, key, value), + INVALID_PARAMETER_VALUE) + + +def _validate_param(key, value): + """ + Check that a param with the specified key & value is valid and raise an exception if it + isn't. + """ + _validate_param_name(key) + _validate_length_limit("Param key", MAX_ENTITY_KEY_LENGTH, key) + _validate_length_limit("Param value", MAX_PARAM_VAL_LENGTH, value) + + +def _validate_tag(key, value): + """ + Check that a tag with the specified key & value is valid and raise an exception if it isn't. + """ + _validate_tag_name(key) + _validate_length_limit("Tag key", MAX_ENTITY_KEY_LENGTH, key) + _validate_length_limit("Tag value", MAX_TAG_VAL_LENGTH, value) def _validate_param_name(name): """Check that `name` is a valid parameter name and raise an exception if it isn't.""" if not _VALID_PARAM_AND_METRIC_NAMES.match(name): - raise Exception("Invalid parameter name: '%s'. %s" % (name, _BAD_CHARACTERS_MESSAGE)) + raise MlflowException("Invalid parameter name: '%s'. %s" % (name, _BAD_CHARACTERS_MESSAGE), + INVALID_PARAMETER_VALUE) if path_not_unique(name): - raise Exception("Invalid parameter name: '%s'. %s" % (name, bad_path_message(name))) + raise MlflowException("Invalid parameter name: '%s'. %s" % (name, bad_path_message(name)), + INVALID_PARAMETER_VALUE) def _validate_tag_name(name): """Check that `name` is a valid tag name and raise an exception if it isn't.""" # Reuse param & metric check. if not _VALID_PARAM_AND_METRIC_NAMES.match(name): - raise Exception("Invalid tag name: '%s'. %s" % (name, _BAD_CHARACTERS_MESSAGE)) + raise MlflowException("Invalid tag name: '%s'. %s" % (name, _BAD_CHARACTERS_MESSAGE), + INVALID_PARAMETER_VALUE) if path_not_unique(name): - raise Exception("Invalid tag name: '%s'. %s" % (name, bad_path_message(name))) + raise MlflowException("Invalid tag name: '%s'. %s" % (name, bad_path_message(name)), + INVALID_PARAMETER_VALUE) + + +def _validate_length_limit(entity_name, limit, value): + if len(value) > limit: + raise MlflowException( + "%s '%s' had length %s, which exceeded length limit of %s" % + (entity_name, value, len(value), limit)) def _validate_run_id(run_id): """Check that `run_id` is a valid run ID and raise an exception if it isn't.""" if _RUN_ID_REGEX.match(run_id) is None: raise MlflowException("Invalid run ID: '%s'" % run_id, error_code=INVALID_PARAMETER_VALUE) + + +def _validate_experiment_id(exp_id): + """Check that `experiment_id`is a valid string or None, raise an exception if it isn't.""" + if exp_id is not None and _EXPERIMENT_ID_REGEX.match(exp_id) is None: + raise MlflowException("Invalid experiment ID: '%s'" % exp_id, + error_code=INVALID_PARAMETER_VALUE) + + +def _validate_batch_limit(entity_name, limit, length): + if length > limit: + error_msg = ("A batch logging request can contain at most {limit} {name}. " + "Got {count} {name}. Please split up {name} across multiple requests and try " + "again.").format(name=entity_name, count=length, limit=limit) + raise MlflowException(error_msg, error_code=INVALID_PARAMETER_VALUE) + + +def _validate_batch_log_limits(metrics, params, tags): + """Validate that the provided batched logging arguments are within expected limits.""" + _validate_batch_limit(entity_name="metrics", limit=MAX_METRICS_PER_BATCH, length=len(metrics)) + _validate_batch_limit(entity_name="params", limit=MAX_PARAMS_TAGS_PER_BATCH, length=len(params)) + _validate_batch_limit(entity_name="tags", limit=MAX_PARAMS_TAGS_PER_BATCH, length=len(tags)) + total_length = len(metrics) + len(params) + len(tags) + _validate_batch_limit(entity_name="metrics, params, and tags", + limit=MAX_ENTITIES_PER_BATCH, length=total_length) + + +def _validate_batch_log_data(metrics, params, tags): + for metric in metrics: + _validate_metric(metric.key, metric.value, metric.timestamp, metric.step) + # TODO: move _validate_length_limit calls into _validate_metric etc. This would be a + # breaking change as _validate_metric is also used in the single-entry log_metric API. Thus + # we defer it for now to allow for a release of the batched logging APIs without breaking + # changes to other APIs. See related discussion in + # https://github.com/mlflow/mlflow/issues/985 + _validate_length_limit("Metric name", MAX_ENTITY_KEY_LENGTH, metric.key) + for param in params: + _validate_param(param.key, param.value) + for tag in tags: + _validate_tag(tag.key, tag.value) + + +def _validate_batch_log_api_req(json_req): + if len(json_req) > MAX_BATCH_LOG_REQUEST_SIZE: + error_msg = ("Batched logging API requests must be at most {limit} bytes, got a " + "request of size {size}.").format( + limit=MAX_BATCH_LOG_REQUEST_SIZE, size=len(json_req)) + raise MlflowException(error_msg, error_code=INVALID_PARAMETER_VALUE) + + +def _validate_experiment_name(experiment_name): + """Check that `experiment_name` is a valid string and raise an exception if it isn't.""" + if experiment_name == "" or experiment_name is None: + raise MlflowException("Invalid experiment name: '%s'" % experiment_name, + error_code=INVALID_PARAMETER_VALUE) + if not isinstance(experiment_name, str): + raise MlflowException("Invalid experiment name: %s. Expects a string." % experiment_name, + error_code=INVALID_PARAMETER_VALUE) + + +def _validate_experiment_artifact_location(artifact_location): + if artifact_location is not None and artifact_location.startswith("runs:"): + raise MlflowException("Artifact location cannot be a runs:/ URI. Given: '%s'" + % artifact_location, + error_code=INVALID_PARAMETER_VALUE) + + +def _validate_db_type_string(db_type): + """validates db_type parsed from DB URI is supported""" + if db_type not in DATABASE_ENGINES: + error_msg = "Invalid database engine: '%s'. '%s'" % (db_type, _UNSUPPORTED_DB_TYPE_MSG) + raise MlflowException(error_msg, INVALID_PARAMETER_VALUE) diff --git a/mlflow/version.py b/mlflow/version.py index 1febf6fc0c6b3..3a31524cd6a90 100644 --- a/mlflow/version.py +++ b/mlflow/version.py @@ -1,4 +1,4 @@ # Copyright 2018 Databricks, Inc. -VERSION = '0.7.1.dev' +VERSION = '1.0.0' diff --git a/pylintrc b/pylintrc index d4ca2dfb7d806..2bd0f4d8797be 100644 --- a/pylintrc +++ b/pylintrc @@ -7,7 +7,7 @@ extension-pkg-whitelist= # Add files or directories to the blacklist. They should be base names, not # paths. -ignore=build,protos,sdk +ignore=build,protos,sdk,db_migrations,temporary_db_migrations_for_pre_1_users # Add files or directories matching the regex patterns to the blacklist. The @@ -419,7 +419,7 @@ ignored-classes=optparse.Values,thread._local,_thread._local # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. -ignored-modules=distutils +ignored-modules=distutils,tensorflow.keras # Show a hint with possible names when a member name was not found. The aspect # of finding the hint is based on edit distance. diff --git a/setup.py b/setup.py index 916dc9b2c6976..ef7ff055fe408 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,6 @@ import imp import os +import sys from setuptools import setup, find_packages version = imp.load_source( @@ -9,7 +10,7 @@ # Get a list of all files in the JS directory to include in our module def package_files(directory): paths = [] - for (path, directories, filenames) in os.walk(directory): + for (path, _, filenames) in os.walk(directory): for filename in filenames: paths.append(os.path.join('..', path, filename)) return paths @@ -18,33 +19,49 @@ def package_files(directory): # Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build # to include in the wheel, e.g. "../mlflow/server/js/build/index.html" js_files = package_files('mlflow/server/js/build') -sagmaker_server_files = package_files("mlflow/sagemaker/container") +models_container_server_files = package_files("mlflow/models/container") +alembic_files = ["../mlflow/store/db_migrations/alembic.ini", "../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini"] setup( name='mlflow', version=version, packages=find_packages(exclude=['tests', 'tests.*']), - package_data={"mlflow": js_files + sagmaker_server_files}, + package_data={"mlflow": js_files + models_container_server_files + alembic_files}, install_requires=[ - 'click>=6.7', - 'databricks-cli>=0.8.0', + 'alembic', + 'click>=7.0', + 'cloudpickle', + 'databricks-cli>=0.8.7', 'requests>=2.17.3', 'six>=1.10.0', - 'gunicorn', + 'waitress' if sys.platform == 'win32' else 'gunicorn', 'Flask', 'numpy', 'pandas', - 'scipy', - 'scikit-learn', 'python-dateutil', 'protobuf>=3.6.0', 'gitpython>=2.1.0', 'pyyaml', - 'boto3>=1.7.12', 'querystring_parser', 'simplejson', - 'mleap>=0.8.1', + 'docker>=3.6.0', + 'entrypoints', + 'sqlparse', + 'sqlalchemy', + 'docker>=3.6.0', + 'gorilla', ], + extras_require={ + 'extras':[ + "scikit-learn; python_version >= '3.5'", + # scikit-learn 0.20 is the last version to support Python 2.x & Python 3.4. + "scikit-learn==0.20; python_version < '3.5'", + 'boto3>=1.7.12', + 'mleap>=0.8.1', + 'azure-storage', + 'google-cloud-storage', + ], + }, entry_points=''' [console_scripts] mlflow=mlflow.cli:cli diff --git a/test-requirements.txt b/test-requirements.txt index d0f49139c2031..aac70e9436a43 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,23 +1,3 @@ -# Test reqs -azure-storage -google-cloud-storage -h2o -# TODO: don't pin boto version once https://github.com/spulec/moto/issues/1793 is addressed -boto3==1.7.84 -mock==2.0.0 -moto==1.3.4 -prospector[with_pyroma]==0.12.7 -pep8==1.7.1 -pyarrow -pylint==1.8.2 -pyspark -pytest==3.2.1 -pytest-cov -rstcheck==3.2 -scipy -tensorflow -torch -torchvision -pysftp -# TODO: don't pin Keras version once https://github.com/keras-team/keras/issues/11276 is addressed -keras==2.2.2 +-r travis/small-requirements.txt +-r travis/large-requirements.txt +-r travis/lint-requirements.txt diff --git a/tests/__init__.py b/tests/__init__.py index e69de29bb2d1d..0474b96f1f9e1 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1,3 @@ +from mlflow.utils.logging_utils import _configure_mlflow_loggers + +_configure_mlflow_loggers(root_module_name=__name__) diff --git a/tests/autologging/test_tensorflow_autolog.py b/tests/autologging/test_tensorflow_autolog.py new file mode 100644 index 0000000000000..0f38a82deebf4 --- /dev/null +++ b/tests/autologging/test_tensorflow_autolog.py @@ -0,0 +1,207 @@ +# pep8: disable=E501 + +from __future__ import print_function + +import collections +import shutil +import pytest +import tempfile + +import numpy as np +import pandas as pd +import tensorflow as tf +from tensorflow.python.keras import layers + +import mlflow +import mlflow.tensorflow +import mlflow.keras + +SavedModelInfo = collections.namedtuple( + "SavedModelInfo", + ["path", "meta_graph_tags", "signature_def_key", "inference_df", "expected_results_df"]) + +client = mlflow.tracking.MlflowClient() + + +@pytest.fixture +def random_train_data(): + return np.random.random((1000, 32)) + + +@pytest.fixture +def tf_keras_random_data_run(random_train_data): + mlflow.tensorflow.autolog(metrics_every_n_steps=5) + + def random_one_hot_labels(shape): + n, n_class = shape + classes = np.random.randint(0, n_class, n) + labels = np.zeros((n, n_class)) + labels[np.arange(n), classes] = 1 + return labels + + with mlflow.start_run() as run: + data = random_train_data + labels = random_one_hot_labels((1000, 10)) + + model = tf.keras.Sequential() + + model.add(layers.Dense(64, activation='relu', input_shape=(32,))) + model.add(layers.Dense(64, activation='relu')) + model.add(layers.Dense(10, activation='softmax')) + + model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) + + model.fit(data, labels, epochs=10) + + return client.get_run(run.info.run_id) + + +@pytest.mark.large +def test_tf_keras_autolog_logs_expected_data(tf_keras_random_data_run): + data = tf_keras_random_data_run.data + + assert 'epoch_acc' in data.metrics + assert 'epoch_loss' in data.metrics + assert 'optimizer_name' in data.params + assert data.params['optimizer_name'] == 'AdamOptimizer' + assert 'summary' in tf_keras_random_data_run.data.tags + assert 'Total params: 6,922' in tf_keras_random_data_run.data.tags['summary'] + all_epoch_acc = client.get_metric_history(tf_keras_random_data_run.info.run_id, 'epoch_acc') + assert all((x.step - 1) % 5 == 0 for x in all_epoch_acc) + + +@pytest.mark.large +def test_tf_keras_autolog_model_can_load_from_artifact(tf_keras_random_data_run, random_train_data): + artifacts = client.list_artifacts(tf_keras_random_data_run.info.run_id) + artifacts = map(lambda x: x.path, artifacts) + assert 'model' in artifacts + assert 'tensorboard_logs' in artifacts + model = mlflow.keras.load_model("runs:/" + tf_keras_random_data_run.info.run_id + + "/model") + model.predict(random_train_data) + + +@pytest.fixture +def tf_core_random_tensors(): + mlflow.tensorflow.autolog(metrics_every_n_steps=4) + with mlflow.start_run() as run: + sess = tf.Session() + a = tf.constant(3.0, dtype=tf.float32) + b = tf.constant(4.0) + total = a + b + tf.summary.scalar('a', a) + tf.summary.scalar('b', b) + merged = tf.summary.merge_all() + dir = tempfile.mkdtemp() + writer = tf.summary.FileWriter(dir, sess.graph) + with sess.as_default(): + for i in range(40): + summary, _ = sess.run([merged, total]) + writer.add_summary(summary, global_step=i) + shutil.rmtree(dir) + writer.close() + sess.close() + + return client.get_run(run.info.run_id) + + +@pytest.mark.large +def test_tf_core_autolog_logs_scalars(tf_core_random_tensors): + assert 'a' in tf_core_random_tensors.data.metrics + assert tf_core_random_tensors.data.metrics['a'] == 3.0 + assert 'b' in tf_core_random_tensors.data.metrics + assert tf_core_random_tensors.data.metrics['b'] == 4.0 + all_a = client.get_metric_history(tf_core_random_tensors.info.run_id, 'a') + assert all((x.step - 1) % 4 == 0 for x in all_a) + assert mlflow.active_run() is None + + +@pytest.fixture +def tf_estimator_random_data_run(): + mlflow.tensorflow.autolog() + with mlflow.start_run() as run: + dir = tempfile.mkdtemp() + CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species'] + SPECIES = ['Setosa', 'Versicolor', 'Virginica'] + + train_path = tf.keras.utils.get_file( + "iris_training.csv", "https://storage.googleapis.com/download" + ".tensorflow.org/data/iris_training.csv") + test_path = tf.keras.utils.get_file( + "iris_test.csv", "https://storage.googleapis.com/download" + ".tensorflow.org/data/iris_test.csv") + + train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0) + test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0) + + train_y = train.pop('Species') + test_y = test.pop('Species') + + def input_fn(features, labels, training=True, batch_size=256): + """An input function for training or evaluating""" + # Convert the inputs to a Dataset. + dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels)) + + # Shuffle and repeat if you are in training mode. + if training: + dataset = dataset.shuffle(1000).repeat() + + return dataset.batch(batch_size) + + my_feature_columns = [] + for key in train.keys(): + my_feature_columns.append(tf.feature_column.numeric_column(key=key)) + + feature_spec = {} + for feature in CSV_COLUMN_NAMES: + feature_spec[feature] = tf.placeholder(dtype="float", name=feature, shape=[150]) + + receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec) + + classifier = tf.estimator.DNNClassifier( + feature_columns=my_feature_columns, + # Two hidden layers of 10 nodes each. + hidden_units=[30, 10], + # The model must choose between 3 classes. + n_classes=3, + model_dir=dir) + + classifier.train( + input_fn=lambda: input_fn(train, train_y, training=True), + steps=500) + classifier.export_saved_model(dir, receiver_fn) + + shutil.rmtree(dir) + return client.get_run(run.info.run_id) + + +@pytest.mark.large +def test_tf_estimator_autolog_logs_metrics(tf_estimator_random_data_run): + assert 'loss' in tf_estimator_random_data_run.data.metrics + metrics = client.get_metric_history(tf_estimator_random_data_run.info.run_id, 'loss') + assert all((x.step-1) % 100 == 0 for x in metrics) + + +@pytest.mark.large +def test_tf_keras_autolog_model_can_load_from_artifact(tf_estimator_random_data_run): + artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id) + artifacts = map(lambda x: x.path, artifacts) + assert 'model' in artifacts + session = tf.Session() + model = mlflow.tensorflow.load_model("runs:/" + tf_estimator_random_data_run.info.run_id + + "/model", session) + + +@pytest.fixture +def duplicate_autolog_tf_estimator_run(): + mlflow.tensorflow.autolog(metrics_every_n_steps=23) # 23 is prime; no false positives in test + run = tf_estimator_random_data_run() + return run # should be autologged every 4 steps + + +@pytest.mark.large +def test_duplicate_autolog_second_overrides(duplicate_autolog_tf_estimator_run): + metrics = client.get_metric_history(duplicate_autolog_tf_estimator_run.info.run_id, 'loss') + assert all((x.step - 1) % 4 == 0 for x in metrics) diff --git a/tests/azureml/test_image_creation.py b/tests/azureml/test_image_creation.py new file mode 100644 index 0000000000000..6bec1284ffb98 --- /dev/null +++ b/tests/azureml/test_image_creation.py @@ -0,0 +1,711 @@ +from __future__ import print_function + +import sys +import os +import json +import pytest +import yaml +import mock +import numpy as np +from mock import Mock + +import pandas as pd +import pandas.testing +import sklearn.datasets as datasets +import sklearn.linear_model as glm +from keras.models import Sequential +from keras.layers import Dense +from click.testing import CliRunner + +import mlflow +import mlflow.azureml +import mlflow.azureml.cli +import mlflow.keras +import mlflow.sklearn +from mlflow import pyfunc +from mlflow.exceptions import MlflowException +from mlflow.models import Model +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.file_utils import TempDir + +from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import +from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import + +pytestmark = pytest.mark.skipif( + (sys.version_info < (3, 0)), + reason="Tests require Python 3 to run!") + + +class AzureMLMocks: + + def __init__(self): + self.mocks = { + "register_model": mock.patch("azureml.core.model.Model.register"), + "get_model_path": mock.patch("azureml.core.model.Model.get_model_path"), + "create_image": mock.patch("azureml.core.Image.create"), + "load_workspace": mock.patch("azureml.core.Workspace.get"), + } + + def __getitem__(self, key): + return self.mocks[key] + + def __enter__(self): + for key, mock in self.mocks.items(): + self.mocks[key] = mock.__enter__() + return self + + def __exit__(self, *args): + for mock in self.mocks.values(): + mock.__exit__(*args) + + +def get_azure_workspace(): + # pylint: disable=import-error + from azureml.core import Workspace + return Workspace.get("test_workspace") + + +@pytest.fixture(scope="module") +def sklearn_data(): + iris = datasets.load_iris() + x = iris.data[:, :2] # we only take the first two features. + y = iris.target + return x, y + + +@pytest.fixture(scope="module") +def sklearn_model(sklearn_data): + x, y = sklearn_data + linear_lr = glm.LogisticRegression() + linear_lr.fit(x, y) + return linear_lr + + +@pytest.fixture(scope="module") +def keras_data(): + iris = datasets.load_iris() + data = pd.DataFrame(data=np.c_[iris['data'], iris['target']], + columns=iris['feature_names'] + ['target']) + y = data['target'] + x = data.drop('target', axis=1) + return x, y + + +@pytest.fixture(scope="module") +def keras_model(keras_data): + x, y = keras_data + model = Sequential() + model.add(Dense(3, input_dim=4)) + model.add(Dense(1)) + model.compile(loss='mean_squared_error', optimizer='SGD') + model.fit(x, y) + return model + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(str(tmpdir), "model") + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_with_absolute_model_path_calls_expected_azure_routines( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks() as aml_mocks: + workspace = get_azure_workspace() + mlflow.azureml.build_image(model_uri=model_path, workspace=workspace) + + assert aml_mocks["register_model"].call_count == 1 + assert aml_mocks["create_image"].call_count == 1 + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_with_relative_model_path_calls_expected_azure_routines( + sklearn_model): + with TempDir(chdr=True): + model_path = "model" + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks() as aml_mocks: + workspace = get_azure_workspace() + mlflow.azureml.build_image(model_uri=model_path, workspace=workspace) + + assert aml_mocks["register_model"].call_count == 1 + assert aml_mocks["create_image"].call_count == 1 + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_with_runs_uri_calls_expected_azure_routines(sklearn_model): + artifact_path = "model" + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=sklearn_model, artifact_path=artifact_path) + run_id = mlflow.active_run().info.run_id + + with AzureMLMocks() as aml_mocks: + workspace = get_azure_workspace() + model_uri = "runs:///{run_id}/{artifact_path}".format( + run_id=run_id, artifact_path=artifact_path) + mlflow.azureml.build_image(model_uri=model_uri, workspace=workspace) + + assert aml_mocks["register_model"].call_count == 1 + assert aml_mocks["create_image"].call_count == 1 + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_with_remote_uri_calls_expected_azure_routines( + sklearn_model, model_path, mock_s3_bucket): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + artifact_path = "model" + artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) + s3_artifact_repo = S3ArtifactRepository(artifact_root) + s3_artifact_repo.log_artifacts(model_path, artifact_path=artifact_path) + model_uri = artifact_root + "/" + artifact_path + + with AzureMLMocks() as aml_mocks: + workspace = get_azure_workspace() + mlflow.azureml.build_image(model_uri=model_uri, workspace=workspace) + + assert aml_mocks["register_model"].call_count == 1 + assert aml_mocks["create_image"].call_count == 1 + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_synchronous_build_image_awaits_azure_image_creation(sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks(): + workspace = get_azure_workspace() + image, _ = mlflow.azureml.build_image( + model_uri=model_path, workspace=workspace, synchronous=True) + image.wait_for_creation.assert_called_once() + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_asynchronous_build_image_does_not_await_azure_image_creation(sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks(): + workspace = get_azure_workspace() + image, _ = mlflow.azureml.build_image( + model_uri=model_path, workspace=workspace, synchronous=False) + image.wait_for_creation.assert_not_called() + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_registers_model_and_creates_image_with_specified_names( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks() as aml_mocks: + workspace = get_azure_workspace() + model_name = "MODEL_NAME_1" + image_name = "IMAGE_NAME_1" + mlflow.azureml.build_image( + model_uri=model_path, workspace=workspace, model_name=model_name, + image_name=image_name) + + register_model_call_args = aml_mocks["register_model"].call_args_list + assert len(register_model_call_args) == 1 + _, register_model_call_kwargs = register_model_call_args[0] + assert register_model_call_kwargs["model_name"] == model_name + + create_image_call_args = aml_mocks["create_image"].call_args_list + assert len(create_image_call_args) == 1 + _, create_image_call_kwargs = create_image_call_args[0] + assert create_image_call_kwargs["name"] == image_name + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_generates_model_and_image_names_meeting_azureml_resource_naming_requirements( + sklearn_model, model_path): + aml_resource_name_max_length = 32 + + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks() as aml_mocks: + workspace = get_azure_workspace() + mlflow.azureml.build_image(model_uri=model_path, workspace=workspace) + + register_model_call_args = aml_mocks["register_model"].call_args_list + assert len(register_model_call_args) == 1 + _, register_model_call_kwargs = register_model_call_args[0] + called_model_name = register_model_call_kwargs["model_name"] + assert len(called_model_name) <= aml_resource_name_max_length + + create_image_call_args = aml_mocks["create_image"].call_args_list + assert len(create_image_call_args) == 1 + _, create_image_call_kwargs = create_image_call_args[0] + called_image_name = create_image_call_kwargs["name"] + assert len(called_image_name) <= aml_resource_name_max_length + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_passes_model_conda_environment_to_azure_image_creation_routine( + sklearn_model, model_path): + sklearn_conda_env_text = """\ + name: sklearn-env + dependencies: + - scikit-learn + """ + with TempDir(chdr=True) as tmp: + sklearn_conda_env_path = tmp.path("conda.yaml") + with open(sklearn_conda_env_path, "w") as f: + f.write(sklearn_conda_env_text) + + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path, + conda_env=sklearn_conda_env_path) + + # Mock the TempDir.__exit__ function to ensure that the enclosing temporary + # directory is not deleted + with AzureMLMocks() as aml_mocks,\ + mock.patch("mlflow.utils.file_utils.TempDir.path") as tmpdir_path_mock,\ + mock.patch("mlflow.utils.file_utils.TempDir.__exit__"): + def get_mock_path(subpath): + # Our current working directory is a temporary directory. Therefore, it is safe to + # directly return the specified subpath. + return subpath + tmpdir_path_mock.side_effect = get_mock_path + + workspace = get_azure_workspace() + mlflow.azureml.build_image(model_uri=model_path, workspace=workspace) + + create_image_call_args = aml_mocks["create_image"].call_args_list + assert len(create_image_call_args) == 1 + _, create_image_call_kwargs = create_image_call_args[0] + image_config = create_image_call_kwargs["image_config"] + assert image_config.conda_file is not None + with open(image_config.conda_file, "r") as f: + assert yaml.safe_load(f.read()) == yaml.safe_load(sklearn_conda_env_text) + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_includes_default_metadata_in_azure_image_and_model_tags(sklearn_model): + artifact_path = "model" + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=sklearn_model, artifact_path=artifact_path) + run_id = mlflow.active_run().info.run_id + model_uri = "runs:///{run_id}/{artifact_path}".format( + run_id=run_id, artifact_path=artifact_path) + model_config = Model.load( + os.path.join(_download_artifact_from_uri(artifact_uri=model_uri), "MLmodel")) + + with AzureMLMocks() as aml_mocks: + workspace = get_azure_workspace() + mlflow.azureml.build_image(model_uri=model_uri, workspace=workspace) + + register_model_call_args = aml_mocks["register_model"].call_args_list + assert len(register_model_call_args) == 1 + _, register_model_call_kwargs = register_model_call_args[0] + called_tags = register_model_call_kwargs["tags"] + assert called_tags["model_uri"] == model_uri + assert called_tags["python_version"] ==\ + model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.PY_VERSION] + + create_image_call_args = aml_mocks["create_image"].call_args_list + assert len(create_image_call_args) == 1 + _, create_image_call_kwargs = create_image_call_args[0] + image_config = create_image_call_kwargs["image_config"] + assert image_config.tags["model_uri"] == model_uri + assert image_config.tags["python_version"] ==\ + model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.PY_VERSION] + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_includes_user_specified_tags_in_azure_image_and_model_tags( + sklearn_model, model_path): + custom_tags = { + "User": "Corey", + "Date": "Today", + "Other": "Entry", + } + + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks() as aml_mocks: + workspace = get_azure_workspace() + mlflow.azureml.build_image(model_uri=model_path, workspace=workspace, tags=custom_tags) + + register_model_call_args = aml_mocks["register_model"].call_args_list + assert len(register_model_call_args) == 1 + _, register_model_call_kwargs = register_model_call_args[0] + called_tags = register_model_call_kwargs["tags"] + assert custom_tags.items() <= called_tags.items() + + create_image_call_args = aml_mocks["create_image"].call_args_list + assert len(create_image_call_args) == 1 + _, create_image_call_kwargs = create_image_call_args[0] + image_config = create_image_call_kwargs["image_config"] + assert custom_tags.items() <= image_config.tags.items() + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_includes_user_specified_description_in_azure_image_and_model_tags( + sklearn_model, model_path): + custom_description = "a custom description" + + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks() as aml_mocks: + workspace = get_azure_workspace() + mlflow.azureml.build_image( + model_uri=model_path, workspace=workspace, description=custom_description) + + register_model_call_args = aml_mocks["register_model"].call_args_list + assert len(register_model_call_args) == 1 + _, register_model_call_kwargs = register_model_call_args[0] + assert register_model_call_kwargs["description"] == custom_description + + create_image_call_args = aml_mocks["create_image"].call_args_list + assert len(create_image_call_args) == 1 + _, create_image_call_kwargs = create_image_call_args[0] + image_config = create_image_call_kwargs["image_config"] + assert image_config.description == custom_description + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_throws_exception_if_model_does_not_contain_pyfunc_flavor( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + model_config_path = os.path.join(model_path, "MLmodel") + model_config = Model.load(model_config_path) + del model_config.flavors[pyfunc.FLAVOR_NAME] + model_config.save(model_config_path) + + with AzureMLMocks(), pytest.raises(MlflowException) as exc: + workspace = get_azure_workspace() + mlflow.azureml.build_image(model_uri=model_path, workspace=workspace) + assert exc.error_code == INVALID_PARAMETER_VALUE + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_throws_exception_if_model_python_version_is_less_than_three( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + model_config_path = os.path.join(model_path, "MLmodel") + model_config = Model.load(model_config_path) + model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.PY_VERSION] = "2.7.6" + model_config.save(model_config_path) + + with AzureMLMocks(), pytest.raises(MlflowException) as exc: + workspace = get_azure_workspace() + mlflow.azureml.build_image(model_uri=model_path, workspace=workspace) + assert exc.error_code == INVALID_PARAMETER_VALUE + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_build_image_includes_mlflow_home_as_file_dependency_if_specified( + sklearn_model, model_path): + def mock_create_dockerfile(output_path, *args, **kwargs): + # pylint: disable=unused-argument + with open(output_path, "w") as f: + f.write("Dockerfile contents") + + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks() as aml_mocks, TempDir() as tmp,\ + mock.patch("mlflow.azureml._create_dockerfile") as create_dockerfile_mock: + create_dockerfile_mock.side_effect = mock_create_dockerfile + + # Write a mock `setup.py` file to the mlflow home path so that it will be recognized + # as a viable MLflow source directory during the image build process + mlflow_home = tmp.path() + with open(os.path.join(mlflow_home, "setup.py"), "w") as f: + f.write("setup instructions") + + workspace = get_azure_workspace() + mlflow.azureml.build_image( + model_uri=model_path, workspace=workspace, mlflow_home=mlflow_home) + + assert len(create_dockerfile_mock.call_args_list) == 1 + _, create_dockerfile_kwargs = create_dockerfile_mock.call_args_list[0] + # The path to MLflow that is referenced by the Docker container may differ from the + # user-specified `mlflow_home` path if the directory is copied before image building + # for safety + dockerfile_mlflow_path = create_dockerfile_kwargs["mlflow_path"] + + create_image_call_args = aml_mocks["create_image"].call_args_list + assert len(create_image_call_args) == 1 + _, create_image_call_kwargs = create_image_call_args[0] + image_config = create_image_call_kwargs["image_config"] + assert dockerfile_mlflow_path in image_config.dependencies + + +@pytest.mark.large +def test_execution_script_init_method_attempts_to_load_correct_azure_ml_model( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + + model_name = "test_model_name" + model_version = 1 + + model_mock = Mock() + model_mock.name = model_name + model_mock.version = model_version + + with TempDir() as tmp: + execution_script_path = tmp.path("dest") + mlflow.azureml._create_execution_script( + output_path=execution_script_path, azure_model=model_mock) + + with open(execution_script_path, "r") as f: + execution_script = f.read() + + # Define the `init` and `score` methods contained in the execution script + # pylint: disable=exec-used + # Define an empty globals dictionary to ensure that the initialize of the execution + # script does not depend on the current state of the test environment + globs = {} + exec(execution_script, globs) + # Update the set of global variables available to the test environment to include + # functions defined during the evaluation of the execution script + globals().update(globs) + with AzureMLMocks() as aml_mocks: + aml_mocks["get_model_path"].side_effect = lambda *args, **kwargs: model_path + # Execute the `init` method of the execution script. + # pylint: disable=undefined-variable + init() + + assert aml_mocks["get_model_path"].call_count == 1 + get_model_path_call_args = aml_mocks["get_model_path"].call_args_list + assert len(get_model_path_call_args) == 1 + _, get_model_path_call_kwargs = get_model_path_call_args[0] + assert get_model_path_call_kwargs["model_name"] == model_name + assert get_model_path_call_kwargs["version"] == model_version + + +@pytest.mark.large +def test_execution_script_run_method_scores_pandas_dfs_successfully_when_model_outputs_numpy_arrays( + sklearn_model, sklearn_data, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + + pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=model_path) + pyfunc_outputs = pyfunc_model.predict(sklearn_data[0]) + assert isinstance(pyfunc_outputs, np.ndarray) + + model_mock = Mock() + model_mock.name = "model_name" + model_mock.version = 1 + + with TempDir() as tmp: + execution_script_path = tmp.path("dest") + mlflow.azureml._create_execution_script( + output_path=execution_script_path, azure_model=model_mock) + + with open(execution_script_path, "r") as f: + execution_script = f.read() + + # Define the `init` and `score` methods contained in the execution script + # pylint: disable=exec-used + # Define an empty globals dictionary to ensure that the initialize of the execution + # script does not depend on the current state of the test environment + globs = {} + exec(execution_script, globs) + # Update the set of global variables available to the test environment to include + # functions defined during the evaluation of the execution script + globals().update(globs) + with AzureMLMocks() as aml_mocks: + aml_mocks["get_model_path"].side_effect = lambda *args, **kwargs: model_path + # Execute the `init` method of the execution script and load the sklearn model from the + # mocked path + # pylint: disable=undefined-variable + init() + + # Invoke the `run` method of the execution script with sample input data and verify that + # reasonable output data is produced + # pylint: disable=undefined-variable + output_data = run(pd.DataFrame(data=sklearn_data[0]).to_json(orient="split")) + np.testing.assert_array_equal(output_data, pyfunc_outputs) + + +@pytest.mark.large +def test_execution_script_run_method_scores_pandas_dfs_successfully_when_model_outputs_pandas_dfs( + keras_model, keras_data, model_path): + mlflow.keras.save_model(keras_model=keras_model, path=model_path) + pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=model_path) + pyfunc_outputs = pyfunc_model.predict(keras_data[0]) + assert isinstance(pyfunc_outputs, pd.DataFrame) + + model_mock = Mock() + model_mock.name = "model_name" + model_mock.version = 1 + + with TempDir() as tmp: + execution_script_path = tmp.path("dest") + mlflow.azureml._create_execution_script( + output_path=execution_script_path, azure_model=model_mock) + + with open(execution_script_path, "r") as f: + execution_script = f.read() + + # Define the `init` and `score` methods contained in the execution script + # pylint: disable=exec-used + # Define an empty globals dictionary to ensure that the initialize of the execution + # script does not depend on the current state of the test environment + globs = {} + exec(execution_script, globs) + # Update the set of global variables available to the test environment to include + # functions defined during the evaluation of the execution script + globals().update(globs) + with AzureMLMocks() as aml_mocks: + aml_mocks["get_model_path"].side_effect = lambda *args, **kwargs: model_path + # Execute the `init` method of the execution script and load the sklearn model from the + # mocked path + # pylint: disable=undefined-variable + init() + + # Invoke the `run` method of the execution script with sample input data and verify that + # reasonable output data is produced + # pylint: disable=undefined-variable + output_raw = run(pd.DataFrame(data=keras_data[0]).to_json(orient="split")) + output_df = pd.DataFrame(output_raw) + pandas.testing.assert_frame_equal( + output_df, + pyfunc_outputs, + check_dtype=False, + check_less_precise=False) + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_cli_build_image_with_absolute_model_path_calls_expected_azure_routines( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + with AzureMLMocks() as aml_mocks: + result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke( + mlflow.azureml.cli.commands, + [ + 'build-image', + '-m', model_path, + '-w', "test_workspace", + '-i', "image_name", + '-n', "model_name", + ]) + assert result.exit_code == 0 + + assert aml_mocks["register_model"].call_count == 1 + assert aml_mocks["create_image"].call_count == 1 + assert aml_mocks["load_workspace"].call_count == 1 + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_cli_build_image_with_relative_model_path_calls_expected_azure_routines(sklearn_model): + with TempDir(chdr=True): + model_path = "model" + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + + with AzureMLMocks() as aml_mocks: + result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke( + mlflow.azureml.cli.commands, + [ + 'build-image', + '-m', model_path, + '-w', 'test_workspace', + '-i', 'image_name', + '-n', 'model_name', + ]) + assert result.exit_code == 0 + + assert aml_mocks["register_model"].call_count == 1 + assert aml_mocks["create_image"].call_count == 1 + assert aml_mocks["load_workspace"].call_count == 1 + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_cli_build_image_with_runs_uri_calls_expected_azure_routines(sklearn_model): + artifact_path = "model" + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=sklearn_model, artifact_path=artifact_path) + run_id = mlflow.active_run().info.run_id + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=run_id, artifact_path=artifact_path) + + with AzureMLMocks() as aml_mocks: + result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke( + mlflow.azureml.cli.commands, + [ + 'build-image', + '-m', model_uri, + '-w', 'test_workspace', + '-i', 'image_name', + '-n', 'model_name', + ]) + assert result.exit_code == 0 + + assert aml_mocks["register_model"].call_count == 1 + assert aml_mocks["create_image"].call_count == 1 + assert aml_mocks["load_workspace"].call_count == 1 + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_cli_build_image_with_remote_uri_calls_expected_azure_routines( + sklearn_model, model_path, mock_s3_bucket): + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + artifact_path = "model" + artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) + s3_artifact_repo = S3ArtifactRepository(artifact_root) + s3_artifact_repo.log_artifacts(model_path, artifact_path=artifact_path) + model_uri = artifact_root + "/" + artifact_path + + with AzureMLMocks() as aml_mocks: + result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke( + mlflow.azureml.cli.commands, + [ + 'build-image', + '-m', model_uri, + '-w', 'test_workspace', + '-i', 'image_name', + '-n', 'model_name', + ]) + assert result.exit_code == 0 + + assert aml_mocks["register_model"].call_count == 1 + assert aml_mocks["create_image"].call_count == 1 + assert aml_mocks["load_workspace"].call_count == 1 + + +@pytest.mark.large +@mock.patch("mlflow.azureml.mlflow_version", "0.7.0") +def test_cli_build_image_parses_and_includes_user_specified_tags_in_azureml_image_and_model_tags( + sklearn_model, model_path): + custom_tags = { + "User": "Corey", + "Date": "Today", + "Other": "Entry", + } + + mlflow.sklearn.save_model(sk_model=sklearn_model, path=model_path) + + with AzureMLMocks() as aml_mocks: + result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke( + mlflow.azureml.cli.commands, + [ + 'build-image', + '-m', model_path, + '-w', 'test_workspace', + '-t', json.dumps(custom_tags), + ]) + assert result.exit_code == 0 + + register_model_call_args = aml_mocks["register_model"].call_args_list + assert len(register_model_call_args) == 1 + _, register_model_call_kwargs = register_model_call_args[0] + called_tags = register_model_call_kwargs["tags"] + assert custom_tags.items() <= called_tags.items() + + create_image_call_args = aml_mocks["create_image"].call_args_list + assert len(create_image_call_args) == 1 + _, create_image_call_kwargs = create_image_call_args[0] + image_config = create_image_call_kwargs["image_config"] + assert custom_tags.items() <= image_config.tags.items() diff --git a/tests/azureml/test_model_export.py b/tests/azureml/test_model_export.py deleted file mode 100644 index 6ad1090d3af40..0000000000000 --- a/tests/azureml/test_model_export.py +++ /dev/null @@ -1,66 +0,0 @@ -from __future__ import print_function - -import importlib -import os -import pickle -import tempfile -import unittest - -import sklearn.datasets as datasets -import sklearn.linear_model as glm -from click.testing import CliRunner - -from mlflow.utils.file_utils import TempDir -from mlflow import pyfunc -from mlflow.azureml import cli - - -def _load_pyfunc(path): - with open(path, "rb") as f: - return pickle.load(f) - - -class TestModelExport(unittest.TestCase): - def setUp(self): - self._tmp = tempfile.mkdtemp() - iris = datasets.load_iris() - self._X = iris.data[:, :2] # we only take the first two features. - self._y = iris.target - self._linear_lr = glm.LogisticRegression() - self._linear_lr.fit(self._X, self._y) - self._linear_lr_predict = self._linear_lr.predict(self._X) - - def test_model_export(self): - with TempDir(chdr=True, remove_on_exit=True) as tmp: - model_pkl = tmp.path("model.pkl") - with open(model_pkl, "wb") as f: - pickle.dump(self._linear_lr, f) - input_path = tmp.path("input_model") - pyfunc.save_model(input_path, loader_module="test_model_export", code_path=[__file__], - data_path=model_pkl) - output_path = tmp.path("output_model") - result = CliRunner( - env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(cli.commands, - ['export', '-m', - input_path, '-o', - output_path]) - if result.exit_code: - print('non-zero return code, output:', result.output, result.exception, - result.exc_info) - self.assertEqual(0, result.exit_code) - os.chdir(output_path) - import sys - sys.path.insert(0, '') - print(sys.path) - score = importlib.import_module("score") - score.init() - for i in range(0, len(self._linear_lr_predict)): - json = '[{"col1":%f, "col2":%f}]' % tuple(self._X[i, :]) - x = score.run(json) - self.assertEqual(self._linear_lr_predict[i], x[0]) - print("current dir", os.getcwd()) - assert os.path.exists(os.getcwd()) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/conftest.py b/tests/conftest.py index e69de29bb2d1d..effc22a7007f2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -0,0 +1,16 @@ +import pytest + + +@pytest.fixture +def reset_mock(): + cache = [] + + def set_mock(obj, attr, mock): + cache.append((obj, attr, getattr(obj, attr))) + setattr(obj, attr, mock) + + yield set_mock + + for obj, attr, value in cache: + setattr(obj, attr, value) + cache[:] = [] diff --git a/tests/data/test_data.py b/tests/data/test_data.py index 3e5c6ed84273b..a4b833c2f9cef 100644 --- a/tests/data/test_data.py +++ b/tests/data/test_data.py @@ -28,6 +28,7 @@ def load_project(): def test_is_uri(): assert is_uri("s3://some/s3/path") + assert is_uri("gs://some/gs/path") assert is_uri("dbfs:/some/dbfs/path") assert is_uri("file://some/local/path") assert not is_uri("/tmp/some/local/path") @@ -35,7 +36,9 @@ def test_is_uri(): def test_download_uri(): # Verify downloading from DBFS & S3 urls calls the corresponding helper functions - prefix_to_mock = {"dbfs:/": "mlflow.data._fetch_dbfs", "s3://": "mlflow.data._fetch_s3"} + prefix_to_mock = {"dbfs:/": "mlflow.data._fetch_dbfs", + "s3://": "mlflow.data._fetch_s3", + "gs://": "mlflow.data._fetch_gs"} for prefix, fn_name in prefix_to_mock.items(): with mock.patch(fn_name) as mocked_fn, temp_directory() as dst_dir: download_uri(uri=os.path.join(prefix, "some/path"), diff --git a/tests/entities/test_experiment.py b/tests/entities/test_experiment.py index 5b476e6f79f11..6b8383ec65b00 100644 --- a/tests/entities/test_experiment.py +++ b/tests/entities/test_experiment.py @@ -1,6 +1,6 @@ import unittest -from mlflow.entities import Experiment +from mlflow.entities import Experiment, LifecycleStage from tests.helper_functions import random_int, random_file @@ -13,9 +13,9 @@ def _check(self, exp, exp_id, name, location, lifecyle_stage): self.assertEqual(exp.lifecycle_stage, lifecyle_stage) def test_creation_and_hydration(self): - exp_id = random_int() + exp_id = str(random_int()) name = "exp_%d_%d" % (random_int(), random_int()) - lifecycle_stage = Experiment.ACTIVE_LIFECYCLE + lifecycle_stage = LifecycleStage.ACTIVE location = random_file(".json") exp = Experiment(exp_id, name, location, lifecycle_stage) @@ -34,6 +34,6 @@ def test_creation_and_hydration(self): def test_string_repr(self): exp = Experiment(experiment_id=0, name="myname", artifact_location="hi", - lifecycle_stage=Experiment.ACTIVE_LIFECYCLE) - assert str(exp) == "" + lifecycle_stage=LifecycleStage.ACTIVE) + assert str(exp) == "" diff --git a/tests/entities/test_metric.py b/tests/entities/test_metric.py index f23c4c7a69996..83ca339bbd0c4 100644 --- a/tests/entities/test_metric.py +++ b/tests/entities/test_metric.py @@ -1,31 +1,32 @@ import time -import unittest from mlflow.entities import Metric -from tests.helper_functions import random_str +from tests.helper_functions import random_str, random_int -class TestMetric(unittest.TestCase): - def _check(self, metric, key, value, timestamp): - self.assertIsInstance(metric, Metric) - self.assertEqual(metric.key, key) - self.assertEqual(metric.value, value) - self.assertEqual(metric.timestamp, timestamp) +def _check(metric, key, value, timestamp, step): + assert type(metric) == Metric + assert metric.key == key + assert metric.value == value + assert metric.timestamp == timestamp + assert metric.step == step - def test_creation_and_hydration(self): - key = random_str() - value = 10000 - ts = int(time.time()) - metric = Metric(key, value, ts) - self._check(metric, key, value, ts) +def test_creation_and_hydration(): + key = random_str() + value = 10000 + ts = int(time.time()) + step = random_int() - as_dict = {"key": key, "value": value, "timestamp": ts} - self.assertEqual(dict(metric), as_dict) + metric = Metric(key, value, ts, step) + _check(metric, key, value, ts, step) - proto = metric.to_proto() - metric2 = metric.from_proto(proto) - self._check(metric2, key, value, ts) + as_dict = {"key": key, "value": value, "timestamp": ts, "step": step} + assert dict(metric) == as_dict - metric3 = Metric.from_dictionary(as_dict) - self._check(metric3, key, value, ts) + proto = metric.to_proto() + metric2 = metric.from_proto(proto) + _check(metric2, key, value, ts, step) + + metric3 = Metric.from_dictionary(as_dict) + _check(metric3, key, value, ts, step) diff --git a/tests/entities/test_run.py b/tests/entities/test_run.py index 468f13f669d91..3908dd2fa06b6 100644 --- a/tests/entities/test_run.py +++ b/tests/entities/test_run.py @@ -1,66 +1,79 @@ -from mlflow.entities import Run, Metric, RunData, SourceType, RunStatus, RunInfo +import pytest + +from mlflow.entities import Run, Metric, RunData, RunStatus, RunInfo, LifecycleStage +from mlflow.exceptions import MlflowException from tests.entities.test_run_data import TestRunData from tests.entities.test_run_info import TestRunInfo class TestRun(TestRunInfo, TestRunData): - def _check_run(self, run, ri, rd): - TestRunInfo._check(self, run.info, ri.run_uuid, ri.experiment_id, ri.name, - ri.source_type, ri.source_name, ri.entry_point_name, - ri.user_id, ri.status, ri.start_time, ri.end_time, ri.source_version, + def _check_run(self, run, ri, rd_metrics, rd_params, rd_tags): + TestRunInfo._check(self, run.info, ri.run_id, ri.experiment_id, + ri.user_id, ri.status, ri.start_time, ri.end_time, ri.lifecycle_stage, ri.artifact_uri) - TestRunData._check(self, run.data, rd.metrics, rd.params, rd.tags) + TestRunData._check(self, run.data, rd_metrics, rd_params, rd_tags) def test_creation_and_hydration(self): run_data, metrics, params, tags = TestRunData._create() - (run_info, run_uuid, experiment_id, name, source_type, source_name, entry_point_name, - user_id, status, start_time, end_time, source_version, lifecycle_stage, + (run_info, run_id, experiment_id, user_id, status, start_time, end_time, lifecycle_stage, artifact_uri) = TestRunInfo._create() run1 = Run(run_info, run_data) - self._check_run(run1, run_info, run_data) + self._check_run(run1, run_info, metrics, params, tags) - as_dict = {"info": {"run_uuid": run_uuid, - "experiment_id": experiment_id, - "name": name, - "source_type": source_type, - "source_name": source_name, - "entry_point_name": entry_point_name, - "user_id": user_id, - "status": status, - "start_time": start_time, - "end_time": end_time, - "source_version": source_version, - "lifecycle_stage": lifecycle_stage, - "artifact_uri": artifact_uri, - }, - "data": {"metrics": metrics, - "params": params, - "tags": tags}} - self.assertEqual(run1.to_dictionary(), as_dict) + expected_info_dict = { + "run_uuid": run_id, + "run_id": run_id, + "experiment_id": experiment_id, + "user_id": user_id, + "status": status, + "start_time": start_time, + "end_time": end_time, + "lifecycle_stage": lifecycle_stage, + "artifact_uri": artifact_uri, + } + self.assertEqual( + run1.to_dictionary(), + { + "info": expected_info_dict, + "data": { + "metrics": {m.key: m.value for m in metrics}, + "params": {p.key: p.value for p in params}, + "tags": {t.key: t.value for t in tags}, + } + } + ) proto = run1.to_proto() run2 = Run.from_proto(proto) - self._check_run(run2, run_info, run_data) + self._check_run(run2, run_info, metrics, params, tags) - run3 = Run.from_dictionary(as_dict) - self._check_run(run3, run_info, run_data) + run3 = Run(run_info, None) + self.assertEqual( + run3.to_dictionary(), + { + "info": expected_info_dict, + } + ) def test_string_repr(self): run_info = RunInfo( - run_uuid="hi", experiment_id=0, name="name", source_type=SourceType.PROJECT, - source_name="source-name", entry_point_name="entry-point-name", - user_id="user-id", status=RunStatus.FAILED, start_time=0, end_time=1, - source_version="version", lifecycle_stage=RunInfo.ACTIVE_LIFECYCLE) - metrics = [Metric("key", i, 0) for i in range(5)] + run_uuid="hi", run_id="hi", experiment_id=0, + user_id="user-id", status=RunStatus.FAILED, + start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE) + metrics = [Metric(key="key-%s" % i, value=i, timestamp=0, step=i) for i in range(3)] run_data = RunData(metrics=metrics, params=[], tags=[]) run1 = Run(run_info, run_data) - expected = ", " \ - "data=, " \ - ", ...], params=[], tags=[]>>" + expected = (", info=>") assert str(run1) == expected + + def test_creating_run_with_absent_info_throws_exception(self): + run_data = TestRunData._create()[0] + with pytest.raises(MlflowException) as no_info_exc: + Run(None, run_data) + assert "run_info cannot be None" in str(no_info_exc) diff --git a/tests/entities/test_run_data.py b/tests/entities/test_run_data.py index 3a2aa3038ac65..454531ad595c4 100644 --- a/tests/entities/test_run_data.py +++ b/tests/entities/test_run_data.py @@ -1,63 +1,54 @@ import time import unittest -from mlflow.entities import Metric, Param, RunData, RunTag +from mlflow.entities import Metric, RunData, Param, RunTag from tests.helper_functions import random_str, random_int class TestRunData(unittest.TestCase): - def _check_metrics(self, metrics_1, metrics_2): - for metric in metrics_1: - self.assertIsInstance(metric, Metric) - self.assertEqual(set([m.key for m in metrics_1]), set([m.key for m in metrics_2])) - self.assertEqual(set([m.value for m in metrics_1]), set([m.value for m in metrics_2])) - self.assertEqual(set([m.timestamp for m in metrics_1]), - set([m.timestamp for m in metrics_2])) - - def _check_params(self, params_1, params_2): - for param in params_1: - self.assertIsInstance(param, Param) - self.assertEqual(set([p.key for p in params_1]), set([p.key for p in params_2])) - self.assertEqual(set([p.value for p in params_1]), set([p.value for p in params_2])) - - def _check_tags(self, tags_1, tags_2): - for tag in tags_1: - self.assertIsInstance(tag, RunTag) - self.assertEqual(set([t.key for t in tags_1]), set([t.key for t in tags_2])) - self.assertEqual(set([t.value for t in tags_2]), set([t.value for t in tags_2])) + def _check_metrics(self, metric_objs, metrics_dict, expected_metrics): + self.assertEqual(set([m.key for m in metric_objs]), + set([m.key for m in expected_metrics])) + self.assertEqual(set([m.value for m in metric_objs]), + set([m.value for m in expected_metrics])) + self.assertEqual(set([m.timestamp for m in metric_objs]), + set([m.timestamp for m in expected_metrics])) + self.assertEqual(set([m.step for m in metric_objs]), + set([m.step for m in expected_metrics])) + assert len(metrics_dict) == len(expected_metrics) + assert metrics_dict == {m.key: m.value for m in expected_metrics} + + def _check_params(self, params_dict, expected_params): + self.assertEqual(params_dict, {p.key: p.value for p in expected_params}) + + def _check_tags(self, tags_dict, expected_tags): + self.assertEqual(tags_dict, {t.key: t.value for t in expected_tags}) def _check(self, rd, metrics, params, tags): self.assertIsInstance(rd, RunData) - self._check_metrics(rd.metrics, metrics) + self._check_metrics(rd._metric_objs, rd.metrics, metrics) self._check_params(rd.params, params) self._check_tags(rd.tags, tags) @staticmethod def _create(): - metrics = [Metric(random_str(10), random_int(0, 1000), - int(time.time() + random_int(-1e4, 1e4))) - for _ in range(100)] + metrics = [Metric(key=random_str(10), + value=random_int(0, 1000), + timestamp=int(time.time()) + random_int(-1e4, 1e4), + step=random_int())] params = [Param(random_str(10), random_str(random_int(10, 35))) for _ in range(10)] # noqa tags = [RunTag(random_str(10), random_str(random_int(10, 35))) for _ in range(10)] # noqa - rd = RunData() - for p in params: - rd._add_param(p) - for m in metrics: - rd._add_metric(m) - for t in tags: - rd._add_tag(t) + rd = RunData(metrics=metrics, params=params, tags=tags) return rd, metrics, params, tags def test_creation_and_hydration(self): rd1, metrics, params, tags = self._create() self._check(rd1, metrics, params, tags) - as_dict = {"metrics": metrics, "params": params, "tags": tags} + as_dict = {"metrics": {m.key: m.value for m in metrics}, + "params": {p.key: p.value for p in params}, + "tags": {t.key: t.value for t in tags}} self.assertEqual(dict(rd1), as_dict) - proto = rd1.to_proto() rd2 = RunData.from_proto(proto) self._check(rd2, metrics, params, tags) - - rd3 = RunData.from_dictionary(as_dict) - self._check(rd3, metrics, params, tags) diff --git a/tests/entities/test_run_info.py b/tests/entities/test_run_info.py index f6625fe96aff7..5e9db29234dbc 100644 --- a/tests/entities/test_run_info.py +++ b/tests/entities/test_run_info.py @@ -1,73 +1,54 @@ +import random import unittest import uuid -from mlflow.entities import RunInfo +from mlflow.entities import RunInfo, LifecycleStage, RunStatus from tests.helper_functions import random_str, random_int class TestRunInfo(unittest.TestCase): - def _check(self, ri, run_uuid, experiment_id, name, source_type, source_name, - entry_point_name, user_id, status, start_time, end_time, source_version, + def _check(self, ri, run_id, experiment_id, user_id, status, start_time, end_time, lifecycle_stage, artifact_uri): self.assertIsInstance(ri, RunInfo) - self.assertEqual(ri.run_uuid, run_uuid) + self.assertEqual(ri.run_uuid, run_id) + self.assertEqual(ri.run_id, run_id) self.assertEqual(ri.experiment_id, experiment_id) - self.assertEqual(ri.name, name) - self.assertEqual(ri.source_type, source_type) - self.assertEqual(ri.source_name, source_name) - self.assertEqual(ri.entry_point_name, entry_point_name) self.assertEqual(ri.user_id, user_id) self.assertEqual(ri.status, status) self.assertEqual(ri.start_time, start_time) self.assertEqual(ri.end_time, end_time) - self.assertEqual(ri.source_version, source_version) self.assertEqual(ri.lifecycle_stage, lifecycle_stage) self.assertEqual(ri.artifact_uri, artifact_uri) @staticmethod def _create(): - run_uuid = str(uuid.uuid4()) - experiment_id = random_int(10, 2000) - name = random_str(random_int(10, 40)) - source_type = random_int(1, 4) - source_name = random_str(random_int(100, 300)) - entry_point_name = random_str(random_int(100, 300)) + run_id = str(uuid.uuid4()) + experiment_id = str(random_int(10, 2000)) user_id = random_str(random_int(10, 25)) - status = random_int(1, 5) + status = RunStatus.to_string(random.choice(RunStatus.all_status())) start_time = random_int(1, 10) end_time = start_time + random_int(1, 10) - source_version = random_str(random_int(10, 40)) - lifecycle_stage = RunInfo.ACTIVE_LIFECYCLE + lifecycle_stage = LifecycleStage.ACTIVE artifact_uri = random_str(random_int(10, 40)) - ri = RunInfo(run_uuid=run_uuid, experiment_id=experiment_id, name=name, - source_type=source_type, source_name=source_name, - entry_point_name=entry_point_name, user_id=user_id, + ri = RunInfo(run_uuid=run_id, run_id=run_id, experiment_id=experiment_id, user_id=user_id, status=status, start_time=start_time, end_time=end_time, - source_version=source_version, lifecycle_stage=lifecycle_stage, - artifact_uri=artifact_uri) - return (ri, run_uuid, experiment_id, name, source_type, source_name, entry_point_name, - user_id, status, start_time, end_time, source_version, lifecycle_stage, + lifecycle_stage=lifecycle_stage, artifact_uri=artifact_uri) + return (ri, run_id, experiment_id, user_id, status, start_time, end_time, lifecycle_stage, artifact_uri) def test_creation_and_hydration(self): - (ri1, run_uuid, experiment_id, name, source_type, source_name, entry_point_name, - user_id, status, start_time, end_time, source_version, lifecycle_stage, + (ri1, run_id, experiment_id, user_id, status, start_time, end_time, lifecycle_stage, artifact_uri) = self._create() - self._check(ri1, run_uuid, experiment_id, name, source_type, source_name, entry_point_name, - user_id, status, start_time, end_time, source_version, lifecycle_stage, - artifact_uri) + self._check(ri1, run_id, experiment_id, user_id, status, start_time, end_time, + lifecycle_stage, artifact_uri) as_dict = { - "run_uuid": run_uuid, + "run_uuid": run_id, + "run_id": run_id, "experiment_id": experiment_id, - "name": name, - "source_type": source_type, - "source_name": source_name, - "entry_point_name": entry_point_name, "user_id": user_id, "status": status, "start_time": start_time, "end_time": end_time, - "source_version": source_version, "lifecycle_stage": lifecycle_stage, "artifact_uri": artifact_uri } @@ -75,17 +56,18 @@ def test_creation_and_hydration(self): proto = ri1.to_proto() ri2 = RunInfo.from_proto(proto) - self._check(ri2, run_uuid, experiment_id, name, source_type, source_name, entry_point_name, - user_id, status, start_time, end_time, source_version, lifecycle_stage, - artifact_uri) + self._check(ri2, run_id, experiment_id, user_id, status, start_time, end_time, + lifecycle_stage, artifact_uri) ri3 = RunInfo.from_dictionary(as_dict) - self._check(ri3, run_uuid, experiment_id, name, source_type, source_name, entry_point_name, - user_id, status, start_time, end_time, source_version, lifecycle_stage, - artifact_uri) + self._check(ri3, run_id, experiment_id, user_id, status, start_time, end_time, + lifecycle_stage, artifact_uri) # Test that we can add a field to RunInfo and still deserialize it from a dictionary dict_copy_0 = as_dict.copy() dict_copy_0["my_new_field"] = "new field value" ri4 = RunInfo.from_dictionary(dict_copy_0) - self._check(ri4, run_uuid, experiment_id, name, source_type, source_name, entry_point_name, - user_id, status, start_time, end_time, source_version, lifecycle_stage, - artifact_uri) + self._check(ri4, run_id, experiment_id, user_id, status, start_time, end_time, + lifecycle_stage, artifact_uri) + + def test_searchable_attributes(self): + self.assertSequenceEqual(set(["status", "artifact_uri"]), + set(RunInfo.get_searchable_attributes())) diff --git a/tests/entities/test_run_status.py b/tests/entities/test_run_status.py new file mode 100644 index 0000000000000..b1f22f1bf8e10 --- /dev/null +++ b/tests/entities/test_run_status.py @@ -0,0 +1,47 @@ +import unittest + +from mlflow.entities import RunStatus + + +class TestRunStatus(unittest.TestCase): + def test_all_status_covered(self): + # ensure that all known status are returned. Test will fail if new status are added to PB + all_statuses = set([RunStatus.RUNNING, + RunStatus.SCHEDULED, + RunStatus.FINISHED, + RunStatus.FAILED, + RunStatus.KILLED, + ]) + self.assertSequenceEqual(all_statuses, set(RunStatus.all_status())) + + def test_status_mappings(self): + # test enum to string mappings + self.assertEqual("RUNNING", RunStatus.to_string(RunStatus.RUNNING)) + self.assertEqual(RunStatus.RUNNING, RunStatus.from_string("RUNNING")) + + self.assertEqual("SCHEDULED", RunStatus.to_string(RunStatus.SCHEDULED)) + self.assertEqual(RunStatus.SCHEDULED, RunStatus.from_string("SCHEDULED")) + + self.assertEqual("FINISHED", RunStatus.to_string(RunStatus.FINISHED)) + self.assertEqual(RunStatus.FINISHED, RunStatus.from_string("FINISHED")) + + self.assertEqual("FAILED", RunStatus.to_string(RunStatus.FAILED)) + self.assertEqual(RunStatus.FAILED, RunStatus.from_string("FAILED")) + + self.assertEqual("KILLED", RunStatus.to_string(RunStatus.KILLED)) + self.assertEqual(RunStatus.KILLED, RunStatus.from_string("KILLED")) + + with self.assertRaises(Exception) as e: + RunStatus.to_string(-120) + self.assertIn("Could not get string corresponding to run status -120", str(e.exception)) + + with self.assertRaises(Exception) as e: + RunStatus.from_string("the IMPOSSIBLE status string") + self.assertIn("Could not get run status corresponding to string the IMPO", str(e.exception)) + + def test_is_terminated(self): + self.assertTrue(RunStatus.is_terminated(RunStatus.FAILED)) + self.assertTrue(RunStatus.is_terminated(RunStatus.FINISHED)) + self.assertTrue(RunStatus.is_terminated(RunStatus.KILLED)) + self.assertFalse(RunStatus.is_terminated(RunStatus.SCHEDULED)) + self.assertFalse(RunStatus.is_terminated(RunStatus.RUNNING)) diff --git a/tests/generate_ui_test_data.py b/tests/generate_ui_test_data.py index b9db19bec2c03..4eef54b4b0edf 100644 --- a/tests/generate_ui_test_data.py +++ b/tests/generate_ui_test_data.py @@ -1,18 +1,16 @@ """ Small script used to generate mock data to test the UI. """ + +import argparse import mlflow import itertools -from random import random +import random +import string +from random import random as rand from mlflow.tracking import MlflowClient -SOURCE_VERSIONS = [ - 'f7581541a524f4879794e724a9653eaca2bef1d7', - '53de5661eb457efa3cb996aa592656c41a888c1d', - 'ccc76efe9ceb633710bbd7acf408bebe0095eb10' -] - def log_metrics(metrics): for k, values in metrics.items(): @@ -25,25 +23,34 @@ def log_params(parameters): mlflow.log_param(k, v) +def rand_str(max_len=40): + return "".join(random.sample(string.ascii_letters, random.randint(1, max_len))) + + if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + "--large", help="If true, will also generate larger datasets for testing UI performance.", + action="store_true") + args = parser.parse_args() client = MlflowClient() # Simple run for l1, alpha in itertools.product([0, 0.25, 0.5, 0.75, 1], [0, 0.5, 1]): - with mlflow.start_run(source_name='ipython', source_version=SOURCE_VERSIONS[0]): + with mlflow.start_run(run_name='ipython'): parameters = { 'l1': str(l1), 'alpha': str(alpha), } metrics = { - 'MAE': [random()], - 'R2': [random()], - 'RMSE': [random()], + 'MAE': [rand()], + 'R2': [rand()], + 'RMSE': [rand()], } log_params(parameters) log_metrics(metrics) # Big parameter values - with mlflow.start_run(source_name='ipython', source_version=SOURCE_VERSIONS[1]): + with mlflow.start_run(run_name='ipython'): parameters = { 'this is a pretty long parameter name': 'NA10921-test_file_2018-08-10.txt', } @@ -54,7 +61,7 @@ def log_params(parameters): log_metrics(metrics) # Nested runs. - with mlflow.start_run(source_name='multirun.py'): + with mlflow.start_run(run_name='multirun.py'): l1 = 0.5 alpha = 0.5 parameters = { @@ -62,79 +69,110 @@ def log_params(parameters): 'alpha': str(alpha), } metrics = { - 'MAE': [random()], - 'R2': [random()], - 'RMSE': [random()], + 'MAE': [rand()], + 'R2': [rand()], + 'RMSE': [rand()], } log_params(parameters) log_metrics(metrics) - with mlflow.start_run(source_name='child_params.py', nested=True): + with mlflow.start_run(run_name='child_params.py', nested=True): parameters = { - 'lot': str(random()), - 'of': str(random()), - 'parameters': str(random()), - 'in': str(random()), - 'this': str(random()), - 'experiement': str(random()), - 'run': str(random()), - 'because': str(random()), - 'we': str(random()), - 'need': str(random()), - 'to': str(random()), - 'check': str(random()), - 'how': str(random()), - 'it': str(random()), - 'handles': str(random()), + 'lot': str(rand()), + 'of': str(rand()), + 'parameters': str(rand()), + 'in': str(rand()), + 'this': str(rand()), + 'experiement': str(rand()), + 'run': str(rand()), + 'because': str(rand()), + 'we': str(rand()), + 'need': str(rand()), + 'to': str(rand()), + 'check': str(rand()), + 'how': str(rand()), + 'it': str(rand()), + 'handles': str(rand()), } log_params(parameters) mlflow.log_metric('test_metric', 1) - with mlflow.start_run(source_name='child_metrics.py', nested=True): + with mlflow.start_run(run_name='child_metrics.py', nested=True): metrics = { - 'lot': [random()], - 'of': [random()], - 'parameters': [random()], - 'in': [random()], - 'this': [random()], - 'experiement': [random()], - 'run': [random()], - 'because': [random()], - 'we': [random()], - 'need': [random()], - 'to': [random()], - 'check': [random()], - 'how': [random()], - 'it': [random()], - 'handles': [random()], + 'lot': [rand()], + 'of': [rand()], + 'parameters': [rand()], + 'in': [rand()], + 'this': [rand()], + 'experiement': [rand()], + 'run': [rand()], + 'because': [rand()], + 'we': [rand()], + 'need': [rand()], + 'to': [rand()], + 'check': [rand()], + 'how': [rand()], + 'it': [rand()], + 'handles': [rand()], } log_metrics(metrics) - with mlflow.start_run(source_name='sort_child.py', nested=True): + with mlflow.start_run(run_name='sort_child.py', nested=True): mlflow.log_metric('test_metric', 1) mlflow.log_param('test_param', 1) - with mlflow.start_run(source_name='sort_child.py', nested=True): + with mlflow.start_run(run_name='sort_child.py', nested=True): mlflow.log_metric('test_metric', 2) mlflow.log_param('test_param', 2) # Grandchildren - with mlflow.start_run(source_name='parent'): - with mlflow.start_run(source_name='child', nested=True): - with mlflow.start_run(source_name='grandchild', nested=True): + with mlflow.start_run(run_name='parent'): + with mlflow.start_run(run_name='child', nested=True): + with mlflow.start_run(run_name='grandchild', nested=True): pass # Loop loop_1_run_id = None loop_2_run_id = None - with mlflow.start_run(source_name='loop-1') as run_1: - with mlflow.start_run(source_name='loop-2', nested=True) as run_2: - loop_1_run_id = run_1.info.run_uuid - loop_2_run_id = run_2.info.run_uuid + with mlflow.start_run(run_name='loop-1') as run_1: + with mlflow.start_run(run_name='loop-2', nested=True) as run_2: + loop_1_run_id = run_1.info.run_id + loop_2_run_id = run_2.info.run_id client.set_tag(loop_1_run_id, 'mlflow.parentRunId', loop_2_run_id) # Lot's of children - with mlflow.start_run(source_name='parent-with-lots-of-children'): + with mlflow.start_run(run_name='parent-with-lots-of-children'): for i in range(100): - with mlflow.start_run(source_name='child-{}'.format(i), nested=True): + with mlflow.start_run(run_name='child-{}'.format(i), nested=True): pass + mlflow.set_experiment("my-empty-experiment") + mlflow.set_experiment("runs-but-no-metrics-params") + for i in range(100): + with mlflow.start_run(run_name="empty-run-{}".format(i)): + pass + if args.large: + mlflow.set_experiment("med-size-experiment") + # Experiment with a mix of nested runs & non-nested runs + for i in range(3): + with mlflow.start_run(run_name='parent-with-children-{}'.format(i)): + params = {rand_str(): rand_str() for _ in range(5)} + metrics = {rand_str(): [rand()] for _ in range(5)} + log_params(params) + log_metrics(metrics) + for j in range(10): + with mlflow.start_run(run_name='child-{}'.format(j), nested=True): + params = {rand_str(): rand_str() for _ in range(30)} + metrics = {rand_str(): [rand()] for idx in range(30)} + log_params(params) + log_metrics(metrics) + for j in range(10): + with mlflow.start_run(run_name='unnested-{}-{}'.format(i, j)): + params = {rand_str(): rand_str() for _ in range(5)} + metrics = {rand_str(): [rand()] for _ in range(5)} + mlflow.set_experiment("hitting-metric-param-limits") + for i in range(50): + with mlflow.start_run(run_name="big-run-{}".format(i)): + params = {str(j) + "a" * 250: "b" * 1000 for j in range(100)} + metrics = {str(j) + "a" * 250: [rand()] for j in range(100)} + log_metrics(metrics) + log_params(params) diff --git a/tests/h2o/test_h2o_model_export.py b/tests/h2o/test_h2o_model_export.py index 05b121740c32c..8670a06d08c0a 100644 --- a/tests/h2o/test_h2o_model_export.py +++ b/tests/h2o/test_h2o_model_export.py @@ -2,66 +2,234 @@ from __future__ import print_function -import collections import os -import pandas -import shutil -import unittest +import pytest +import yaml +import json +import pandas as pd +import pandas.testing +from collections import namedtuple import sklearn.datasets as datasets import h2o from h2o.estimators.gbm import H2OGradientBoostingEstimator -import tempfile import mlflow.h2o import mlflow +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server from mlflow import pyfunc +from mlflow.models import Model +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env from mlflow.utils.file_utils import TempDir +from mlflow.utils.model_utils import _get_flavor_configuration +from tests.helper_functions import score_model_in_sagemaker_docker_container -class TestModelExport(unittest.TestCase): - def setUp(self): - h2o.init() - iris = datasets.load_iris() - data = h2o.H2OFrame({ - 'feature1': list(iris.data[:, 0]), - 'feature2': list(iris.data[:, 1]), - 'target': list(map(lambda i: "Flower %d" % i, iris.target)) - }) - train, self.test = data.split_frame(ratios=[.7]) - - self.gbm = H2OGradientBoostingEstimator(ntrees=10, max_depth=6) - self.gbm.train(['feature1', 'feature2'], 'target', training_frame=train) - self.predicted = self.gbm.predict(self.test).as_data_frame() - - def test_model_save_load(self): - with TempDir(chdr=True, remove_on_exit=True) as tmp: - path = tmp.path("model") - mlflow.h2o.save_model(self.gbm, path) - - # Loading h2o model - gbm_loaded = mlflow.h2o.load_model(path) - assert all(gbm_loaded.predict(self.test).as_data_frame() == self.predicted) - - # Loading pyfunc model - pyfunc_loaded = mlflow.pyfunc.load_pyfunc(path) - assert all(pyfunc_loaded.predict(self.test.as_data_frame()) == self.predicted) - - def test_model_log(self): - old_uri = mlflow.get_tracking_uri() - # should_start_run tests whether or not calling log_model() automatically starts a run. - for should_start_run in [False, True]: - with TempDir(chdr=True, remove_on_exit=True) as tmp: - try: - mlflow.set_tracking_uri("test") - if should_start_run: - mlflow.start_run() - mlflow.h2o.log_model(self.gbm, artifact_path="gbm") - - # Load model - gbm_loaded = mlflow.h2o.load_model("gbm", - run_id=mlflow.active_run().info.run_uuid) - assert all(gbm_loaded.predict(self.test).as_data_frame() == self.predicted) - finally: - mlflow.end_run() - mlflow.set_tracking_uri(old_uri) + +ModelWithData = namedtuple("ModelWithData", ["model", "inference_data"]) + + +@pytest.fixture +def h2o_iris_model(): + h2o.init() + iris = datasets.load_iris() + data = h2o.H2OFrame({ + 'feature1': list(iris.data[:, 0]), + 'feature2': list(iris.data[:, 1]), + 'target': list(map(lambda i: "Flower %d" % i, iris.target)) + }) + train, test = data.split_frame(ratios=[.7]) + + h2o_gbm = H2OGradientBoostingEstimator(ntrees=10, max_depth=6) + h2o_gbm.train(['feature1', 'feature2'], 'target', training_frame=train) + return ModelWithData(model=h2o_gbm, inference_data=test) + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(str(tmpdir), "model") + + +@pytest.fixture +def h2o_custom_env(tmpdir): + conda_env = os.path.join(str(tmpdir), "conda_env.yml") + _mlflow_conda_env( + conda_env, + additional_conda_deps=["pytest"], + additional_pip_deps=["h2o"]) + return conda_env + + +@pytest.mark.large +def test_model_save_load(h2o_iris_model, model_path): + h2o_model = h2o_iris_model.model + mlflow.h2o.save_model(h2o_model=h2o_model, path=model_path) + + # Loading h2o model + h2o_model_loaded = mlflow.h2o.load_model(model_path) + assert all( + h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() == + h2o_model.predict(h2o_iris_model.inference_data).as_data_frame()) + + # Loading pyfunc model + pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path) + assert all( + pyfunc_loaded.predict(h2o_iris_model.inference_data.as_data_frame()) == + h2o_model.predict(h2o_iris_model.inference_data).as_data_frame()) + + +@pytest.mark.large +def test_model_log(h2o_iris_model): + h2o_model = h2o_iris_model.model + old_uri = mlflow.get_tracking_uri() + # should_start_run tests whether or not calling log_model() automatically starts a run. + for should_start_run in [False, True]: + with TempDir(chdr=True, remove_on_exit=True): + try: + artifact_path = "gbm_model" + mlflow.set_tracking_uri("test") + if should_start_run: + mlflow.start_run() + mlflow.h2o.log_model(h2o_model=h2o_model, artifact_path=artifact_path) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + # Load model + h2o_model_loaded = mlflow.h2o.load_model(model_uri=model_uri) + assert all( + h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() == + h2o_model.predict(h2o_iris_model.inference_data).as_data_frame()) + finally: + mlflow.end_run() + mlflow.set_tracking_uri(old_uri) + + +@pytest.mark.large +def test_model_load_succeeds_with_missing_data_key_when_data_exists_at_default_path( + h2o_iris_model, model_path): + """ + This is a backwards compatibility test to ensure that models saved in MLflow version <= 0.7.0 + can be loaded successfully. These models are missing the `data` flavor configuration key. + """ + h2o_model = h2o_iris_model.model + mlflow.h2o.save_model(h2o_model=h2o_model, path=model_path) + + model_conf_path = os.path.join(model_path, "MLmodel") + model_conf = Model.load(model_conf_path) + flavor_conf = model_conf.flavors.get(mlflow.h2o.FLAVOR_NAME, None) + assert flavor_conf is not None + del flavor_conf['data'] + model_conf.save(model_conf_path) + + h2o_model_loaded = mlflow.h2o.load_model(model_path) + assert all( + h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() == + h2o_model.predict(h2o_iris_model.inference_data).as_data_frame()) + + +@pytest.mark.large +def test_model_save_persists_specified_conda_env_in_mlflow_model_directory( + h2o_iris_model, model_path, h2o_custom_env): + mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=h2o_custom_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != h2o_custom_env + + with open(h2o_custom_env, "r") as f: + h2o_custom_env_text = f.read() + with open(saved_conda_env_path, "r") as f: + saved_conda_env_text = f.read() + assert saved_conda_env_text == h2o_custom_env_text + + +@pytest.mark.large +def test_model_save_accepts_conda_env_as_dict(h2o_iris_model, model_path): + conda_env = dict(mlflow.h2o.get_default_conda_env()) + conda_env["dependencies"].append("pytest") + mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=conda_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == conda_env + + +@pytest.mark.large +def test_model_log_persists_specified_conda_env_in_mlflow_model_directory( + h2o_iris_model, h2o_custom_env): + artifact_path = "model" + with mlflow.start_run(): + mlflow.h2o.log_model(h2o_model=h2o_iris_model.model, + artifact_path=artifact_path, + conda_env=h2o_custom_env) + model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != h2o_custom_env + + with open(h2o_custom_env, "r") as f: + h2o_custom_env_text = f.read() + with open(saved_conda_env_path, "r") as f: + saved_conda_env_text = f.read() + assert saved_conda_env_text == h2o_custom_env_text + + +@pytest.mark.large +def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies( + h2o_iris_model, model_path): + mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=None) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.h2o.get_default_conda_env() + + +@pytest.mark.large +def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies( + h2o_iris_model): + artifact_path = "model" + with mlflow.start_run(): + mlflow.h2o.log_model(h2o_model=h2o_iris_model.model, artifact_path=artifact_path) + model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.h2o.get_default_conda_env() + + +@pytest.mark.release +def test_sagemaker_docker_model_scoring_with_default_conda_env(h2o_iris_model, model_path): + mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=None) + reloaded_h2o_pyfunc = mlflow.pyfunc.load_pyfunc(model_path) + + scoring_response = score_model_in_sagemaker_docker_container( + model_uri=model_path, + data=h2o_iris_model.inference_data.as_data_frame(), + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON, + flavor=mlflow.pyfunc.FLAVOR_NAME) + deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) + + pandas.testing.assert_frame_equal( + deployed_model_preds["predict"].to_frame(), + reloaded_h2o_pyfunc.predict( + h2o_iris_model.inference_data.as_data_frame())["predict"].to_frame(), + check_dtype=False, + check_less_precise=6) diff --git a/tests/helper_functions.py b/tests/helper_functions.py index f69dca2d415f4..e7a40307da2c9 100644 --- a/tests/helper_functions.py +++ b/tests/helper_functions.py @@ -1,15 +1,33 @@ +from __future__ import print_function + import os import random - -import re import requests import string -from subprocess import Popen, PIPE, STDOUT import time +import signal +import socket +from subprocess import Popen +import uuid +import sys import pandas as pd +import pytest +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server import mlflow.pyfunc +from mlflow.utils.file_utils import read_yaml, write_yaml + +LOCALHOST = '127.0.0.1' + + +def get_safe_port(): + """Returns an ephemeral port that is very likely to be free to bind to.""" + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind((LOCALHOST, 0)) + port = sock.getsockname()[1] + sock.close() + return port def random_int(lo=1, hi=1e10): @@ -24,87 +42,203 @@ def random_file(ext): return "temp_test_%d.%s" % (random_int(), ext) -def score_model_in_sagemaker_docker_container(model_path, data, flavor=mlflow.pyfunc.FLAVOR_NAME): +def score_model_in_sagemaker_docker_container( + model_uri, data, content_type, flavor=mlflow.pyfunc.FLAVOR_NAME, + activity_polling_timeout_seconds=500): """ - :param model_path: Path to the model to be served. + :param model_uri: URI to the model to be served. :param data: The data to send to the docker container for testing. This is either a - Pandas dataframe or a JSON-formatted string. + Pandas dataframe or string of the format specified by `content_type`. + :param content_type: The type of the data to send to the docker container for testing. This is + one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`. :param flavor: Model flavor to be deployed. + :param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before + declaring the scoring process to have failed. """ env = dict(os.environ) env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8") - proc = Popen(['mlflow', 'sagemaker', 'run-local', '-m', model_path, '-p', "5000", "-f", flavor], - stdout=PIPE, - stderr=STDOUT, - universal_newlines=True, env=env) - r = _score_proc(proc, 5000, data, "json").content - import json - return json.loads(r) # TODO: we should return pd.Dataframe the same as pyfunc serve + proc = _start_scoring_proc( + cmd=['mlflow', 'sagemaker', 'run-local', '-m', model_uri, '-p', "5000", "-f", flavor], + env=env) + return _evaluate_scoring_proc(proc, 5000, data, content_type, activity_polling_timeout_seconds) -def pyfunc_serve_and_score_model(model_path, data): +def pyfunc_build_image(model_uri, extra_args=None): """ - :param model_path: Path to the model to be served. - :param data: Data in pandas.DataFrame format to send to the docker container for testing. + Builds a docker image containing the specified model, returning the name of the image. + :param model_uri: URI of model, e.g. runs:/some-run-id/run-relative/path/to/model + :param extra_args: List of extra args to pass to `mlflow models build-docker` command + """ + name = uuid.uuid4().hex + cmd = ["mlflow", "models", "build-docker", "-m", model_uri, "-n", name] + if extra_args: + cmd += extra_args + p = Popen(cmd, ) + assert p.wait() == 0, "Failed to build docker image to serve model from %s" % model_uri + return name + + +def pyfunc_serve_from_docker_image(image_name, host_port, extra_args=None): + """ + Serves a model from a docker container, exposing it as an endpoint at the specified port + on the host machine. Returns a handle (Popen object) to the server process. """ env = dict(os.environ) env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8") - cmd = ['mlflow', 'pyfunc', 'serve', '-m', model_path, "-p", "0"] - proc = Popen(cmd, - stdout=PIPE, - stderr=STDOUT, - universal_newlines=True, - env=env) - for x in iter(proc.stdout.readline, ""): - print(x) - m = re.match(pattern=".*Running on http://127.0.0.1:(\\d+).*", string=x) - if m: - return pd.read_json(_score_proc(proc, int(m.group(1)), data, data_type="csv").content, - orient="records") + scoring_cmd = ['docker', 'run', "-p", "%s:8080" % host_port, image_name] + if extra_args is not None: + scoring_cmd += extra_args + return _start_scoring_proc(cmd=scoring_cmd, env=env) - raise Exception("Failed to start server") +def pyfunc_serve_from_docker_image_with_env_override(image_name, + host_port, + gunicorn_opts, + extra_args=None): + """ + Serves a model from a docker container, exposing it as an endpoint at the specified port + on the host machine. Returns a handle (Popen object) to the server process. + """ + env = dict(os.environ) + env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8") + scoring_cmd = ['docker', 'run', "-e", "GUNICORN_CMD_ARGS=%s" % gunicorn_opts, + "-p", "%s:8080" % host_port, image_name] + if extra_args is not None: + scoring_cmd += extra_args + return _start_scoring_proc(cmd=scoring_cmd, env=env) + + +def pyfunc_serve_and_score_model( + model_uri, data, content_type, activity_polling_timeout_seconds=500, extra_args=None, + stdout=sys.stdout): + """ + :param model_uri: URI to the model to be served. + :param data: The data to send to the pyfunc server for testing. This is either a + Pandas dataframe or string of the format specified by `content_type`. + :param content_type: The type of the data to send to the pyfunc server for testing. This is + one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`. + :param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before + declaring the scoring process to have failed. + :param extra_args: A list of extra arguments to pass to the pyfunc scoring server command. For + example, passing ``extra_args=["--no-conda"]`` will pass the ``--no-conda`` + flag to the scoring server to ensure that conda environment activation + is skipped. + """ + env = dict(os.environ) + env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8") + port = get_safe_port() + scoring_cmd = ['mlflow', 'models', 'serve', '-m', model_uri, "-p", str(port)] + if extra_args is not None: + scoring_cmd += extra_args + proc = _start_scoring_proc(cmd=scoring_cmd, env=env, stdout=stdout, stderr=stdout) + return _evaluate_scoring_proc( + proc, port, data, content_type, activity_polling_timeout_seconds) -def _score_proc(proc, port, data, data_type): - try: - for i in range(0, 50): - assert proc.poll() is None, "scoring process died" + +def _start_scoring_proc(cmd, env, stdout=sys.stdout, stderr=sys.stderr): + proc = Popen(cmd, + stdout=stdout, + stderr=stderr, + universal_newlines=True, + env=env, + # Assign the scoring process to a process group. All child processes of the + # scoring process will be assigned to this group as well. This allows child + # processes of the scoring process to be terminated successfully + preexec_fn=os.setsid) + return proc + + +class RestEndpoint: + def __init__(self, proc, port, activity_polling_timeout_seconds=250): + self._proc = proc + self._port = port + self._activity_polling_timeout_seconds = activity_polling_timeout_seconds + + def __enter__(self): + for i in range(0, int(self._activity_polling_timeout_seconds / 5)): + assert self._proc.poll() is None, "scoring process died" time.sleep(5) # noinspection PyBroadException try: - ping_status = requests.get(url='http://localhost:%d/ping' % port) + ping_status = requests.get(url='http://localhost:%d/ping' % self._port) print('connection attempt', i, "server is up! ping status", ping_status) if ping_status.status_code == 200: break except Exception: # pylint: disable=broad-except print('connection attempt', i, "failed, server is not up yet") - - assert proc.poll() is None, "scoring process died" - ping_status = requests.get(url='http://localhost:%d/ping' % port) - print("server up, ping status", ping_status) if ping_status.status_code != 200: raise Exception("ping failed, server is not happy") - if data_type == "json": - if type(data) == pd.DataFrame: - data = data.to_dict(orient="records") - r = requests.post(url='http://localhost:%d/invocations' % port, - json=data) - elif data_type == "csv": - data = data.to_csv(index=False, header=True) - r = requests.post(url='http://localhost:%d/invocations' % port, - data=data, - headers={"Content-Type": "text/csv"}) - else: - raise Exception("Unexpected data_type %s" % data_type) - if r.status_code != 200: - raise Exception("scoring failed, status code = {}. Response = '{}' ".format( - r.status_code, - r)) - return r - finally: - if proc.poll() is None: - proc.terminate() - print("captured output of the scoring process") - print("-------------------------STDOUT------------------------------") - print(proc.stdout.read()) - print("==============================================================") + print("server up, ping status", ping_status) + return self + + def __exit__(self, tp, val, traceback): + if self._proc.poll() is None: + # Terminate the process group containing the scoring process. + # This will terminate all child processes of the scoring process + pgrp = os.getpgid(self._proc.pid) + os.killpg(pgrp, signal.SIGTERM) + + def invoke(self, data, content_type): + if type(data) == pd.DataFrame: + if content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED: + data = data.to_json(orient="records") + elif content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON \ + or content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED: + data = data.to_json(orient="split") + elif content_type == pyfunc_scoring_server.CONTENT_TYPE_CSV: + data = data.to_csv(index=False) + else: + raise Exception( + "Unexpected content type for Pandas dataframe input %s" % content_type) + response = requests.post(url='http://localhost:%d/invocations' % self._port, + data=data, + headers={"Content-Type": content_type}) + return response + + +def _evaluate_scoring_proc(proc, port, data, content_type, activity_polling_timeout_seconds=250): + """ + :param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before + declaring the scoring process to have failed. + """ + with RestEndpoint(proc, port, activity_polling_timeout_seconds) as endpoint: + return endpoint.invoke(data, content_type) + + +@pytest.fixture(scope='module', autouse=True) +def set_boto_credentials(): + os.environ["AWS_ACCESS_KEY_ID"] = "NotARealAccessKey" + os.environ["AWS_SECRET_ACCESS_KEY"] = "NotARealSecretAccessKey" + os.environ["AWS_SESSION_TOKEN"] = "NotARealSessionToken" + + +@pytest.fixture +def mock_s3_bucket(): + """ + Creates a mock S3 bucket using moto + + :return: The name of the mock bucket + """ + import boto3 + import moto + + with moto.mock_s3(): + bucket_name = "mock-bucket" + s3_client = boto3.client("s3") + s3_client.create_bucket(Bucket=bucket_name) + yield bucket_name + + +class safe_edit_yaml(object): + def __init__(self, root, file_name, edit_func): + self._root = root + self._file_name = file_name + self._edit_func = edit_func + self._original = read_yaml(root, file_name) + + def __enter__(self): + new_dict = self._edit_func(self._original.copy()) + write_yaml(self._root, self._file_name, new_dict, overwrite=True) + + def __exit__(self, *args): + write_yaml(self._root, self._file_name, self._original, overwrite=True) diff --git a/tests/keras/test_keras_model_export.py b/tests/keras/test_keras_model_export.py index 034c7dee486fe..e0d00f9bad3d3 100644 --- a/tests/keras/test_keras_model_export.py +++ b/tests/keras/test_keras_model_export.py @@ -2,18 +2,36 @@ from __future__ import print_function +import h5py import os +import json import pytest +import shutil +import importlib from keras.models import Sequential -from keras.layers import Dense +from keras.layers import Layer, Dense +from keras import backend as K import sklearn.datasets as datasets import pandas as pd import numpy as np +import yaml +import mock -import mlflow.keras import mlflow +import mlflow.keras +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server from mlflow import pyfunc +from mlflow.exceptions import MlflowException +from mlflow.models import Model +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration from tests.helper_functions import pyfunc_serve_and_score_model +from tests.helper_functions import score_model_in_sagemaker_docker_container +from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import +from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import +from tests.pyfunc.test_spark import score_model_as_udf from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import @@ -38,48 +56,367 @@ def model(data): return model +@pytest.fixture(scope='module') +def tf_keras_model(data): + x, y = data + from tensorflow.keras.models import Sequential as TfSequential + from tensorflow.keras.layers import Dense as TfDense + model = TfSequential() + model.add(TfDense(3, input_dim=4)) + model.add(TfDense(1)) + model.compile(loss='mean_squared_error', optimizer='SGD') + model.fit(x, y) + return model + + @pytest.fixture(scope='module') def predicted(model, data): return model.predict(data[0]) -def test_model_save_load(tmpdir, model, data, predicted): +@pytest.fixture(scope='module') +def custom_layer(): + class MyDense(Layer): + def __init__(self, output_dim, **kwargs): + self.output_dim = output_dim + super(MyDense, self).__init__(**kwargs) + + def build(self, input_shape): + self.kernel = self.add_weight(name='kernel', + shape=(input_shape[1], self.output_dim), + initializer='uniform', + trainable=True) + super(MyDense, self).build(input_shape) + + def call(self, x): + return K.dot(x, self.kernel) + + def compute_output_shape(self, input_shape): + return (input_shape[0], self.output_dim) + + def get_config(self): + return {'output_dim': self.output_dim} + + return MyDense + + +@pytest.fixture(scope='module') +def custom_model(data, custom_layer): x, y = data - path = os.path.join(tmpdir.strpath, "model") - mlflow.keras.save_model(model, path) + x, y = x.values, y.values + model = Sequential() + model.add(custom_layer(6)) + model.add(Dense(1)) + model.compile(loss='mean_squared_error', optimizer='SGD') + model.fit(x, y, epochs=1) + return model + + +@pytest.fixture(scope='module') +def custom_predicted(custom_model, data): + return custom_model.predict(data[0]) + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(tmpdir.strpath, "model") + + +@pytest.fixture +def keras_custom_env(tmpdir): + conda_env = os.path.join(str(tmpdir), "conda_env.yml") + _mlflow_conda_env( + conda_env, + additional_conda_deps=["keras", "tensorflow", "pytest"]) + return conda_env - # Loading Keras model - model_loaded = mlflow.keras.load_model(path) - assert all(model_loaded.predict(x) == predicted) +def test_that_keras_module_arg_works(model_path): + class MyModel(object): + def __init__(self, x): + self._x = x + + def __eq__(self, other): + return self._x == other._x + + def save(self, path, **kwargs): + with h5py.File(path, "w") as f: + f.create_dataset(name="x", data=self._x) + + class FakeKerasModule(object): + __name__ = "some.test.keras.module" + __version__ = "42.42.42" + + @staticmethod + def load_model(file, **kwars): + return MyModel(file.get("x").value) + + def _import_module(name, **kwargs): + if name.startswith(FakeKerasModule.__name__): + return FakeKerasModule + else: + return importlib.import_module(name, **kwargs) + + with mock.patch("importlib.import_module") as import_module_mock: + import_module_mock.side_effect = _import_module + x = MyModel("x123") + path0 = os.path.join(model_path, "0") + with pytest.raises(MlflowException): + mlflow.keras.save_model(x, path0) + mlflow.keras.save_model(x, path0, keras_module=FakeKerasModule) + y = mlflow.keras.load_model(path0) + assert x == y + path1 = os.path.join(model_path, "1") + mlflow.keras.save_model(x, path1, keras_module=FakeKerasModule.__name__) + z = mlflow.keras.load_model(path1) + assert x == z + # Tets model log + with mlflow.start_run() as active_run: + with pytest.raises(MlflowException): + mlflow.keras.log_model(x, "model0") + mlflow.keras.log_model(x, "model0", keras_module=FakeKerasModule) + a = mlflow.keras.load_model("runs:/{}/model0".format(active_run.info.run_id)) + assert x == a + mlflow.keras.log_model(x, "model1", keras_module=FakeKerasModule.__name__) + b = mlflow.keras.load_model("runs:/{}/model1".format(active_run.info.run_id)) + assert x == b + + +@pytest.mark.parametrize("build_model", [model, tf_keras_model]) +@pytest.mark.large +def test_model_save_load(build_model, model_path, data): + x, _ = data + keras_model = build_model(data) + if build_model == tf_keras_model: + model_path = os.path.join(model_path, "tf") + else: + model_path = os.path.join(model_path, "plain") + expected = keras_model.predict(x) + mlflow.keras.save_model(keras_model, model_path) + # Loading Keras model + model_loaded = mlflow.keras.load_model(model_path) + assert type(keras_model) == type(model_loaded) + assert all(expected == model_loaded.predict(x)) # Loading pyfunc model - pyfunc_loaded = mlflow.pyfunc.load_pyfunc(path) - assert all(pyfunc_loaded.predict(x).values == predicted) + pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path) + assert all(pyfunc_loaded.predict(x).values == expected) # pyfunc serve - preds = pyfunc_serve_and_score_model(model_path=os.path.abspath(path), data=pd.DataFrame(x)) - assert all(preds.values.astype(np.float32) == predicted) + scoring_response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=pd.DataFrame(x), + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + assert all(pd.read_json(scoring_response.content, orient="records").values.astype(np.float32) + == expected) + # test spark udf + spark_udf_preds = score_model_as_udf(model_uri=os.path.abspath(model_path), + pandas_df=pd.DataFrame(x), + result_type="float") + np.testing.assert_array_almost_equal( + np.array(spark_udf_preds), expected.reshape(len(spark_udf_preds)), decimal=4) + +@pytest.mark.large +def test_custom_model_save_load(custom_model, custom_layer, data, custom_predicted, model_path): + x, _ = data + custom_objects = {'MyDense': custom_layer} + mlflow.keras.save_model(custom_model, model_path, custom_objects=custom_objects) + # Loading Keras model + model_loaded = mlflow.keras.load_model(model_path) + assert all(model_loaded.predict(x) == custom_predicted) + # pyfunc serve + scoring_response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=pd.DataFrame(x), + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + assert np.allclose( + pd.read_json(scoring_response.content, orient="records").values.astype(np.float32), + custom_predicted, + rtol=1e-5, + atol=1e-9) + # Loading pyfunc model + pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path) + assert all(pyfunc_loaded.predict(x).values == custom_predicted) + # test spark udf + spark_udf_preds = score_model_as_udf(model_uri=os.path.abspath(model_path), + pandas_df=pd.DataFrame(x), + result_type="float") + np.testing.assert_array_almost_equal( + np.array(spark_udf_preds), custom_predicted.reshape(len(spark_udf_preds)), decimal=4) + + +def test_custom_model_save_respects_user_custom_objects(custom_model, custom_layer, model_path): + class DifferentCustomLayer(): + def __init__(self): + pass + + incorrect_custom_objects = {'MyDense': DifferentCustomLayer()} + correct_custom_objects = {'MyDense': custom_layer} + mlflow.keras.save_model(custom_model, model_path, custom_objects=incorrect_custom_objects) + model_loaded = mlflow.keras.load_model(model_path, custom_objects=correct_custom_objects) + assert model_loaded is not None + with pytest.raises(TypeError): + model_loaded = mlflow.keras.load_model(model_path) + + +@pytest.mark.large +def test_model_load_from_remote_uri_succeeds(model, model_path, mock_s3_bucket, data, predicted): + x, _ = data + mlflow.keras.save_model(model, model_path) + + artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) + artifact_path = "model" + artifact_repo = S3ArtifactRepository(artifact_root) + artifact_repo.log_artifacts(model_path, artifact_path=artifact_path) + + model_uri = artifact_root + "/" + artifact_path + model_loaded = mlflow.keras.load_model(model_uri=model_uri) + assert all(model_loaded.predict(x) == predicted) + + +@pytest.mark.large def test_model_log(tracking_uri_mock, model, data, predicted): # pylint: disable=unused-argument - x, y = data + x, _ = data # should_start_run tests whether or not calling log_model() automatically starts a run. for should_start_run in [False, True]: try: if should_start_run: mlflow.start_run() - mlflow.keras.log_model(model, artifact_path="keras_model") + artifact_path = "keras_model" + mlflow.keras.log_model(model, artifact_path=artifact_path) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) # Load model - model_loaded = mlflow.keras.load_model( - "keras_model", - run_id=mlflow.active_run().info.run_uuid) + model_loaded = mlflow.keras.load_model(model_uri=model_uri) assert all(model_loaded.predict(x) == predicted) # Loading pyfunc model - pyfunc_loaded = mlflow.pyfunc.load_pyfunc( - "keras_model", - run_id=mlflow.active_run().info.run_uuid) + pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_uri=model_uri) assert all(pyfunc_loaded.predict(x).values == predicted) finally: mlflow.end_run() + + +@pytest.mark.large +def test_model_save_persists_specified_conda_env_in_mlflow_model_directory( + model, model_path, keras_custom_env): + mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=keras_custom_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != keras_custom_env + + with open(keras_custom_env, "r") as f: + keras_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == keras_custom_env_parsed + + +@pytest.mark.large +def test_model_save_accepts_conda_env_as_dict(model, model_path): + conda_env = dict(mlflow.keras.get_default_conda_env()) + conda_env["dependencies"].append("pytest") + mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=conda_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == conda_env + + +@pytest.mark.large +def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(model, keras_custom_env): + artifact_path = "model" + with mlflow.start_run(): + mlflow.keras.log_model( + keras_model=model, artifact_path=artifact_path, conda_env=keras_custom_env) + model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != keras_custom_env + + with open(keras_custom_env, "r") as f: + keras_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == keras_custom_env_parsed + + +@pytest.mark.large +def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies( + model, model_path): + mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=None) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.keras.get_default_conda_env() + + +@pytest.mark.large +def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies( + model): + artifact_path = "model" + with mlflow.start_run(): + mlflow.keras.log_model(keras_model=model, artifact_path=artifact_path, conda_env=None) + model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.keras.get_default_conda_env() + + +@pytest.mark.large +def test_model_load_succeeds_with_missing_data_key_when_data_exists_at_default_path( + model, model_path, data, predicted): + """ + This is a backwards compatibility test to ensure that models saved in MLflow version <= 0.8.0 + can be loaded successfully. These models are missing the `data` flavor configuration key. + """ + mlflow.keras.save_model(keras_model=model, path=model_path) + shutil.move( + os.path.join(model_path, 'data', 'model.h5'), + os.path.join(model_path, 'model.h5')) + model_conf_path = os.path.join(model_path, "MLmodel") + model_conf = Model.load(model_conf_path) + flavor_conf = model_conf.flavors.get(mlflow.keras.FLAVOR_NAME, None) + assert flavor_conf is not None + del flavor_conf['data'] + model_conf.save(model_conf_path) + + model_loaded = mlflow.keras.load_model(model_path) + assert all(model_loaded.predict(data[0]) == predicted) + + +@pytest.mark.release +def test_sagemaker_docker_model_scoring_with_default_conda_env(model, model_path, data, predicted): + mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=None) + + scoring_response = score_model_in_sagemaker_docker_container( + model_uri=model_path, + data=data[0], + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + flavor=mlflow.pyfunc.FLAVOR_NAME, + activity_polling_timeout_seconds=500) + deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) + + np.testing.assert_array_almost_equal( + deployed_model_preds.values, + predicted, + decimal=4) diff --git a/tests/models/__init__.py b/tests/models/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/models/test_cli.py b/tests/models/test_cli.py new file mode 100644 index 0000000000000..d42301d88cbf9 --- /dev/null +++ b/tests/models/test_cli.py @@ -0,0 +1,281 @@ +import json +import os +import subprocess +import sys + +import numpy as np +import pandas as pd +import pytest +import re +import sklearn +import sklearn.datasets +import sklearn.neighbors + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +import mlflow +from mlflow import pyfunc +import mlflow.sklearn +from mlflow.utils.file_utils import TempDir, path_to_local_file_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils import PYTHON_VERSION +from tests.models import test_pyfunc +from tests.helper_functions import pyfunc_build_image, pyfunc_serve_from_docker_image, \ + pyfunc_serve_from_docker_image_with_env_override, \ + RestEndpoint, get_safe_port, pyfunc_serve_and_score_model +from mlflow.protos.databricks_pb2 import ErrorCode, MALFORMED_REQUEST +from mlflow.pyfunc.scoring_server import CONTENT_TYPE_JSON_SPLIT_ORIENTED, \ + CONTENT_TYPE_JSON, CONTENT_TYPE_CSV + +in_travis = 'TRAVIS' in os.environ +# NB: for now, windows tests on Travis do not have conda available. +no_conda = ["--no-conda"] if in_travis and sys.platform == "win32" else [] + +# NB: need to install mlflow since the pip version does not have mlflow models cli. +install_mlflow = ["--install-mlflow"] if not no_conda else [] + +extra_options = no_conda + install_mlflow +gunicorn_options = "--timeout 60 -w 5" + + +@pytest.fixture(scope="module") +def iris_data(): + iris = sklearn.datasets.load_iris() + x = iris.data[:, :2] + y = iris.target + return x, y + + +@pytest.fixture(scope="module") +def sk_model(iris_data): + x, y = iris_data + knn_model = sklearn.neighbors.KNeighborsClassifier() + knn_model.fit(x, y) + return knn_model + + +def test_predict_with_old_mlflow_in_conda_and_with_orient_records(iris_data): + if no_conda: + pytest.skip("This test needs conda.") + # TODO: Enable this test after 1.0 is out to ensure we do not break the serve / predict + # TODO: Also add a test for serve, not just predict. + pytest.skip("TODO: enable this after 1.0 release is out.") + x, _ = iris_data + with TempDir() as tmp: + input_records_path = tmp.path("input_records.json") + pd.DataFrame(x).to_json(input_records_path, orient="records") + output_json_path = tmp.path("output.json") + test_model_path = tmp.path("test_model") + test_model_conda_path = tmp.path("conda.yml") + # create env with odl mlflow! + _mlflow_conda_env(path=test_model_conda_path, + additional_pip_deps=["mlflow=={}".format(test_pyfunc.MLFLOW_VERSION)]) + pyfunc.save_model(path=test_model_path, + loader_module=test_pyfunc.__name__.split(".")[-1], + code_path=[test_pyfunc.__file__], + conda_env=test_model_conda_path) + # explicit json format with orient records + p = subprocess.Popen(["mlflow", "models", "predict", "-m", + path_to_local_file_uri(test_model_path), "-i", input_records_path, + "-o", output_json_path, "-t", "json", "--json-format", "records"] + + no_conda) + assert 0 == p.wait() + actual = pd.read_json(output_json_path, orient="records") + actual = actual[actual.columns[0]].values + expected = test_pyfunc.PyFuncTestModel(check_version=False).predict(df=pd.DataFrame(x)) + assert all(expected == actual) + + +def test_mlflow_is_not_installed_unless_specified(): + if no_conda: + pytest.skip("This test requires conda.") + with TempDir(chdr=True) as tmp: + fake_model_path = tmp.path("fake_model") + fake_env_path = tmp.path("fake_env.yaml") + _mlflow_conda_env(path=fake_env_path, install_mlflow=False) + mlflow.pyfunc.save_model(fake_model_path, loader_module=__name__, conda_env=fake_env_path) + # The following should fail because there should be no mlflow in the env: + p = subprocess.Popen(["mlflow", "models", "predict", "-m", fake_model_path], + stderr=subprocess.PIPE, cwd=tmp.path("")) + _, stderr = p.communicate() + stderr = stderr.decode("utf-8") + print(stderr) + assert p.wait() != 0 + if PYTHON_VERSION.startswith("3"): + assert "ModuleNotFoundError: No module named 'mlflow'" in stderr + else: + assert "ImportError: No module named mlflow.pyfunc.scoring_server" in stderr + + +def test_model_with_no_deployable_flavors_fails_pollitely(): + from mlflow.models import Model + with TempDir(chdr=True) as tmp: + m = Model(artifact_path=None, run_id=None, utc_time_created="now", + flavors={"some": {}, "useless": {}, "flavors": {}}) + os.mkdir(tmp.path("model")) + m.save(tmp.path("model", "MLmodel")) + # The following should fail because there should be no suitable flavor + p = subprocess.Popen(["mlflow", "models", "predict", "-m", tmp.path("model")], + stderr=subprocess.PIPE, cwd=tmp.path("")) + _, stderr = p.communicate() + stderr = stderr.decode("utf-8") + print(stderr) + assert p.wait() != 0 + assert "No suitable flavor backend was found for the model." in stderr + + +def test_serve_gunicorn_opts(iris_data, sk_model): + if sys.platform == "win32": + pytest.skip("This test requires gunicorn which is not available on windows.") + with mlflow.start_run() as active_run: + mlflow.sklearn.log_model(sk_model, "model") + model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id) + + with TempDir() as tpm: + output_file_path = tpm.path("stoudt") + with open(output_file_path, "w") as output_file: + x, _ = iris_data + scoring_response = pyfunc_serve_and_score_model( + model_uri, pd.DataFrame(x), + content_type=CONTENT_TYPE_JSON_SPLIT_ORIENTED, + stdout=output_file, + extra_args=["-w", "3"]) + with open(output_file_path, "r") as output_file: + stdout = output_file.read() + actual = pd.read_json(scoring_response.content, orient="records") + actual = actual[actual.columns[0]].values + expected = sk_model.predict(x) + assert all(expected == actual) + expected_command_pattern = re.compile(( + "gunicorn.*-w 3.*mlflow.pyfunc.scoring_server.wsgi:app")) + assert expected_command_pattern.search(stdout) is not None + + +def test_predict(iris_data, sk_model): + with TempDir(chdr=True) as tmp: + with mlflow.start_run() as active_run: + mlflow.sklearn.log_model(sk_model, "model") + model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id) + input_json_path = tmp.path("input.json") + input_csv_path = tmp.path("input.csv") + output_json_path = tmp.path("output.json") + x, _ = iris_data + pd.DataFrame(x).to_json(input_json_path, orient="split") + pd.DataFrame(x).to_csv(input_csv_path, index=False) + + # Test with no conda + p = subprocess.Popen(["mlflow", "models", "predict", "-m", model_uri, "-i", input_json_path, + "-o", output_json_path, "--no-conda"], stderr=subprocess.PIPE) + assert p.wait() == 0 + actual = pd.read_json(output_json_path, orient="records") + actual = actual[actual.columns[0]].values + expected = sk_model.predict(x) + assert all(expected == actual) + + # With conda + --install-mlflow + p = subprocess.Popen(["mlflow", "models", "predict", "-m", model_uri, "-i", input_json_path, + "-o", output_json_path] + extra_options) + assert 0 == p.wait() + actual = pd.read_json(output_json_path, orient="records") + actual = actual[actual.columns[0]].values + expected = sk_model.predict(x) + assert all(expected == actual) + + # explicit json format with default orient (should be split) + p = subprocess.Popen(["mlflow", "models", "predict", "-m", model_uri, "-i", input_json_path, + "-o", output_json_path, "-t", "json"] + extra_options) + assert 0 == p.wait() + actual = pd.read_json(output_json_path, orient="records") + actual = actual[actual.columns[0]].values + expected = sk_model.predict(x) + assert all(expected == actual) + + # explicit json format with orient==split + p = subprocess.Popen(["mlflow", "models", "predict", "-m", model_uri, "-i", input_json_path, + "-o", output_json_path, "-t", "json", "--json-format", "split"] + + extra_options) + assert 0 == p.wait() + actual = pd.read_json(output_json_path, orient="records") + actual = actual[actual.columns[0]].values + expected = sk_model.predict(x) + assert all(expected == actual) + + # read from stdin, write to stdout. + p = subprocess.Popen(["mlflow", "models", "predict", "-m", model_uri, "-t", "json", + "--json-format", "split"] + extra_options, + universal_newlines=True, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=sys.stderr) + with open(input_json_path, "r") as f: + stdout, _ = p.communicate(f.read()) + assert 0 == p.wait() + actual = pd.read_json(StringIO(stdout), orient="records") + actual = actual[actual.columns[0]].values + expected = sk_model.predict(x) + assert all(expected == actual) + + # NB: We do not test orient=records here because records may loose column ordering. + # orient == records is tested in other test with simpler model. + + # csv + p = subprocess.Popen(["mlflow", "models", "predict", "-m", model_uri, "-i", input_csv_path, + "-o", output_json_path, "-t", "csv"] + extra_options) + assert 0 == p.wait() + actual = pd.read_json(output_json_path, orient="records") + actual = actual[actual.columns[0]].values + expected = sk_model.predict(x) + assert all(expected == actual) + + +@pytest.mark.large +def test_build_docker(iris_data, sk_model): + with mlflow.start_run() as active_run: + mlflow.sklearn.log_model(sk_model, "model") + model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id) + x, _ = iris_data + df = pd.DataFrame(x) + image_name = pyfunc_build_image(model_uri, extra_args=["--install-mlflow"]) + host_port = get_safe_port() + scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port) + _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model) + + +@pytest.mark.large +def test_build_docker_with_env_override(iris_data, sk_model): + with mlflow.start_run() as active_run: + mlflow.sklearn.log_model(sk_model, "model") + model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id) + x, _ = iris_data + df = pd.DataFrame(x) + image_name = pyfunc_build_image(model_uri, extra_args=["--install-mlflow"]) + host_port = get_safe_port() + scoring_proc = pyfunc_serve_from_docker_image_with_env_override(image_name, + host_port, + gunicorn_options) + _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model) + + +def _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model): + with RestEndpoint(proc=scoring_proc, port=host_port) as endpoint: + for content_type in [CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV, CONTENT_TYPE_JSON]: + scoring_response = endpoint.invoke(df, content_type) + assert scoring_response.status_code == 200, "Failed to serve prediction, got " \ + "response %s" % scoring_response.text + np.testing.assert_array_equal( + np.array(json.loads(scoring_response.text)), + sk_model.predict(x)) + # Try examples of bad input, verify we get a non-200 status code + for content_type in [CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV, CONTENT_TYPE_JSON]: + scoring_response = endpoint.invoke(data="", content_type=content_type) + assert scoring_response.status_code == 500, \ + "Expected server failure with error code 500, got response with status code %s " \ + "and body %s" % (scoring_response.status_code, scoring_response.text) + scoring_response_dict = json.loads(scoring_response.content) + assert "error_code" in scoring_response_dict + assert scoring_response_dict["error_code"] == ErrorCode.Name(MALFORMED_REQUEST) + assert "message" in scoring_response_dict + assert "stack_trace" in scoring_response_dict diff --git a/tests/models/test_pyfunc.py b/tests/models/test_pyfunc.py new file mode 100644 index 0000000000000..1f79b15d34faf --- /dev/null +++ b/tests/models/test_pyfunc.py @@ -0,0 +1,17 @@ +MLFLOW_VERSION = "1.0.0" # we expect this model to be bound to this mlflow version. + + +class PyFuncTestModel: + def __init__(self, check_version=True): + self._check_version = check_version + + def predict(self, df): + from mlflow.version import VERSION + if self._check_version: + assert VERSION == MLFLOW_VERSION + mu = df.mean().mean() + return [mu for _ in range(len(df))] + + +def _load_pyfunc(_): + return PyFuncTestModel() diff --git a/tests/onnx/test_onnx_model_export.py b/tests/onnx/test_onnx_model_export.py new file mode 100644 index 0000000000000..533883802d476 --- /dev/null +++ b/tests/onnx/test_onnx_model_export.py @@ -0,0 +1,405 @@ +from __future__ import print_function + +import sys +import os +import pytest +import mock + +from keras.models import Sequential +from keras.layers import Dense +import sklearn.datasets as datasets +import pandas as pd +import numpy as np +import yaml + +import tensorflow as tf +import mlflow +import mlflow.keras +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server +from mlflow import pyfunc +from tests.helper_functions import pyfunc_serve_and_score_model +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration +from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import + +pytestmark = pytest.mark.skipif( + (sys.version_info < (3, 6)), + reason="Tests require Python 3 to run!") + + +@pytest.fixture(scope='module') +def data(): + iris = datasets.load_iris() + data = pd.DataFrame(data=np.c_[iris['data'], iris['target']], + columns=iris['feature_names'] + ['target']) + y = data['target'] + x = data.drop('target', axis=1) + return x, y + + +@pytest.fixture(scope='module') +def model(data): + x, y = data + model = Sequential() + model.add(Dense(3, input_dim=4)) + model.add(Dense(1)) + model.compile(loss='mean_squared_error', optimizer='SGD') + model.fit(x, y) + return model + + +@pytest.fixture(scope='module') +def onnx_model(model): + import onnxmltools + return onnxmltools.convert_keras(model) + + +@pytest.fixture(scope='module') +def predicted(model, data): + return model.predict(data[0]) + + +@pytest.fixture(scope='module') +def tf_model_multiple_inputs_float64(): + graph = tf.Graph() + with graph.as_default(): + t_in1 = tf.placeholder(tf.float64, 10, name="first_input") + t_in2 = tf.placeholder(tf.float64, 10, name="second_input") + t_out = tf.multiply(t_in1, t_in2) + t_out_named = tf.identity(t_out, name="output") + return graph + + +@pytest.fixture(scope='module') +def tf_model_multiple_inputs_float32(): + graph = tf.Graph() + with graph.as_default(): + t_in1 = tf.placeholder(tf.float32, 10, name="first_input") + t_in2 = tf.placeholder(tf.float32, 10, name="second_input") + t_out = tf.multiply(t_in1, t_in2) + t_out_named = tf.identity(t_out, name="output") + return graph + + +@pytest.fixture(scope='module') +def onnx_model_multiple_inputs_float64(tf_model_multiple_inputs_float64): + import tf2onnx + sess = tf.Session(graph=tf_model_multiple_inputs_float64) + + onnx_graph = tf2onnx.tfonnx.process_tf_graph( + sess.graph, + input_names=[ + "first_input:0", + "second_input:0", + ], + output_names=["output:0"]) + model_proto = onnx_graph.make_model("test") + return model_proto + + +@pytest.fixture(scope='module') +def onnx_model_multiple_inputs_float32(tf_model_multiple_inputs_float32): + import tf2onnx + sess = tf.Session(graph=tf_model_multiple_inputs_float32) + + onnx_graph = tf2onnx.tfonnx.process_tf_graph( + sess.graph, + input_names=[ + "first_input:0", + "second_input:0", + ], + output_names=["output:0"]) + model_proto = onnx_graph.make_model("test") + return model_proto + + +@pytest.fixture(scope='module') +def data_multiple_inputs(): + return pd.DataFrame({ + "first_input:0": np.random.random(10), + "second_input:0": np.random.random(10), + }) + + +@pytest.fixture(scope='module') +def predicted_multiple_inputs(data_multiple_inputs): + return pd.DataFrame( + data_multiple_inputs["first_input:0"] * data_multiple_inputs["second_input:0"]) + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(tmpdir.strpath, "model") + + +@pytest.fixture +def onnx_custom_env(tmpdir): + conda_env = os.path.join(str(tmpdir), "conda_env.yml") + _mlflow_conda_env( + conda_env, + additional_conda_deps=["pytest", "keras"], + additional_pip_deps=["onnx", "onnxmltools"]) + return conda_env + + +@pytest.mark.large +def test_cast_float64_to_float32(): + import mlflow.onnx + df = pd.DataFrame([[1.0, 2.1], [True, False]], columns=['col1', 'col2']) + df['col1'] = df['col1'].astype(np.float64) + df['col2'] = df['col2'].astype(np.bool) + df2 = mlflow.onnx._OnnxModelWrapper._cast_float64_to_float32(df, df.columns) + assert df2['col1'].dtype == np.float32 and df2['col2'].dtype == np.bool + + +# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime +# library +@pytest.mark.large +def test_model_save_load(onnx_model, model_path, onnx_custom_env): + import onnx + import mlflow.onnx + mlflow.onnx.save_model(onnx_model, model_path, conda_env=onnx_custom_env) + + # Loading ONNX model + onnx.checker.check_model = mock.Mock() + mlflow.onnx.load_model(model_path) + assert onnx.checker.check_model.called + + +# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library +@pytest.mark.release +def test_model_save_load_evaluate_pyfunc_format(onnx_model, model_path, data, predicted): + import onnx + import mlflow.onnx + x, y = data + mlflow.onnx.save_model(onnx_model, model_path) + + # Loading pyfunc model + pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path) + assert np.allclose(pyfunc_loaded.predict(x).values, predicted, rtol=1e-05, + atol=1e-05) + + # pyfunc serve + scoring_response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=x, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + assert np.allclose( + pd.read_json(scoring_response.content, orient="records").values.astype(np.float32), + predicted, rtol=1e-05, atol=1e-05) + + +# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime +# library +@pytest.mark.large +def test_model_save_load_multiple_inputs( + onnx_model_multiple_inputs_float64, model_path, onnx_custom_env): + import onnx + import mlflow.onnx + + mlflow.onnx.save_model(onnx_model_multiple_inputs_float64, + model_path, conda_env=onnx_custom_env) + + # Loading ONNX model + onnx.checker.check_model = mock.Mock() + mlflow.onnx.load_model(model_path) + assert onnx.checker.check_model.called + + +# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library +@pytest.mark.release +def test_model_save_load_evaluate_pyfunc_format_multiple_inputs( + onnx_model_multiple_inputs_float64, data_multiple_inputs, predicted_multiple_inputs, + model_path): + import onnx + import mlflow.onnx + + mlflow.onnx.save_model(onnx_model_multiple_inputs_float64, model_path) + + # Loading pyfunc model + pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path) + assert np.allclose(pyfunc_loaded.predict(data_multiple_inputs).values, + predicted_multiple_inputs.values, rtol=1e-05, atol=1e-05) + + # pyfunc serve + scoring_response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=data_multiple_inputs, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + assert np.allclose(pd.read_json(scoring_response.content, orient="records").values, + predicted_multiple_inputs.values, rtol=1e-05, atol=1e-05) + + +# TODO: Remove test, along with explicit casting, when https://github.com/mlflow/mlflow/issues/1286 +# is fixed. +# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library +@pytest.mark.release +def test_pyfunc_representation_of_float32_model_casts_and_evalutes_float64_inputs( + onnx_model_multiple_inputs_float32, model_path, data_multiple_inputs, + predicted_multiple_inputs): + """ + The ``python_function`` representation of an MLflow model with the ONNX flavor + casts 64-bit floats to 32-bit floats automatically before evaluating, as opposed + to throwing an unexpected type exception. This behavior is implemented due + to the issue described in https://github.com/mlflow/mlflow/issues/1286 where + the JSON representation of a Pandas DataFrame does not always preserve float + precision (e.g., 32-bit floats may be converted to 64-bit floats when persisting a + DataFrame as JSON). + """ + import onnx + import mlflow.onnx + + mlflow.onnx.save_model(onnx_model_multiple_inputs_float32, model_path) + + # Loading pyfunc model + pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path) + assert np.allclose(pyfunc_loaded.predict(data_multiple_inputs.astype("float64")).values, + predicted_multiple_inputs.astype("float32").values, rtol=1e-05, atol=1e-05) + + with pytest.raises(RuntimeError): + pyfunc_loaded.predict(data_multiple_inputs.astype("int32")) + + +# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime +# library +@pytest.mark.large +def test_model_log(tracking_uri_mock, onnx_model, onnx_custom_env): + # pylint: disable=unused-argument + + import onnx + import mlflow.onnx + # should_start_run tests whether or not calling log_model() automatically starts a run. + for should_start_run in [False, True]: + try: + if should_start_run: + mlflow.start_run() + artifact_path = "onnx_model" + mlflow.onnx.log_model(onnx_model=onnx_model, + artifact_path=artifact_path, + conda_env=onnx_custom_env) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + # Load model + onnx.checker.check_model = mock.Mock() + mlflow.onnx.load_model(model_uri) + assert onnx.checker.check_model.called + finally: + mlflow.end_run() + + +# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library +@pytest.mark.release +def test_model_log_evaluate_pyfunc_format(tracking_uri_mock, onnx_model, data, predicted): + import onnx + import mlflow.onnx + x, y = data + # should_start_run tests whether or not calling log_model() automatically starts a run. + for should_start_run in [False, True]: + try: + if should_start_run: + mlflow.start_run() + artifact_path = "onnx_model" + mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path=artifact_path) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + # Loading pyfunc model + pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_uri=model_uri) + assert np.allclose(pyfunc_loaded.predict(x).values, predicted, + rtol=1e-05, atol=1e-05) + finally: + mlflow.end_run() + + +@pytest.mark.large +def test_model_save_persists_specified_conda_env_in_mlflow_model_directory( + onnx_model, model_path, onnx_custom_env): + import mlflow.onnx + mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=onnx_custom_env) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != onnx_custom_env + + with open(onnx_custom_env, "r") as f: + onnx_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == onnx_custom_env_parsed + + +# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library +@pytest.mark.release +def test_model_save_accepts_conda_env_as_dict(onnx_model, model_path): + import mlflow.onnx + conda_env = dict(mlflow.onnx.get_default_conda_env()) + conda_env["dependencies"].append("pytest") + mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=conda_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == conda_env + + +@pytest.mark.large +def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(onnx_model, + onnx_custom_env): + import mlflow.onnx + artifact_path = "model" + with mlflow.start_run(): + mlflow.onnx.log_model( + onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env) + model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != onnx_custom_env + + with open(onnx_custom_env, "r") as f: + onnx_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == onnx_custom_env_parsed + + +# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library +@pytest.mark.release +def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies( + onnx_model, model_path): + import mlflow.onnx + mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=None) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.onnx.get_default_conda_env() + + +# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library +@pytest.mark.release +def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies( + onnx_model): + import mlflow.onnx + artifact_path = "model" + with mlflow.start_run(): + mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path=artifact_path, conda_env=None) + model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.onnx.get_default_conda_env() diff --git a/tests/projects/test_databricks.py b/tests/projects/test_databricks.py index 494178c1fea17..7f4376b758e70 100644 --- a/tests/projects/test_databricks.py +++ b/tests/projects/test_databricks.py @@ -15,9 +15,12 @@ from mlflow.projects import databricks, ExecutionException from mlflow.tracking import MlflowClient from mlflow.utils import file_utils +from mlflow.store.file_store import FileStore from mlflow.utils.mlflow_tags import MLFLOW_DATABRICKS_RUN_URL, \ MLFLOW_DATABRICKS_SHELL_JOB_RUN_ID, \ MLFLOW_DATABRICKS_WEBAPP_URL +from mlflow.utils.rest_utils import _DEFAULT_HEADERS + from tests.projects.utils import validate_exit_status, TEST_PROJECT_DIR from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import @@ -115,10 +118,10 @@ def mock_runs_get_result(succeeded): return {"state": run_state, "run_page_url": "test_url"} -def run_databricks_project(cluster_spec_path, block=False): +def run_databricks_project(cluster_spec, **kwargs): return mlflow.projects.run( - uri=TEST_PROJECT_DIR, mode="databricks", cluster_spec=cluster_spec_path, block=block, - parameters={"alpha": "0.4"}) + uri=TEST_PROJECT_DIR, backend="databricks", backend_config=cluster_spec, + parameters={"alpha": "0.4"}, **kwargs) def test_upload_project_to_dbfs( @@ -128,7 +131,7 @@ def test_upload_project_to_dbfs( dbfs_path_exists_mock.return_value = False runner = DatabricksJobRunner(databricks_profile="DEFAULT") dbfs_uri = runner._upload_project_to_dbfs( - project_dir=TEST_PROJECT_DIR, experiment_id=0) + project_dir=TEST_PROJECT_DIR, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) # Get expected tar local_tar_path = os.path.join(dbfs_root_mock, dbfs_uri.split("/dbfs/")[1]) expected_tar_path = str(tmpdir.join("expected.tar.gz")) @@ -146,7 +149,7 @@ def test_upload_existing_project_to_dbfs(dbfs_path_exists_mock): # pylint: disa dbfs_path_exists_mock.return_value = True runner = DatabricksJobRunner(databricks_profile="DEFAULT") runner._upload_project_to_dbfs( - project_dir=TEST_PROJECT_DIR, experiment_id=0) + project_dir=TEST_PROJECT_DIR, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) assert upload_to_dbfs_mock.call_count == 0 @@ -162,22 +165,24 @@ def test_run_databricks_validations( # Test bad tracking URI tracking_uri_mock.return_value = tmpdir.strpath with pytest.raises(ExecutionException): - run_databricks_project(cluster_spec_mock, block=True) + run_databricks_project(cluster_spec_mock, synchronous=True) assert db_api_req_mock.call_count == 0 db_api_req_mock.reset_mock() mlflow_service = mlflow.tracking.MlflowClient() - assert len(mlflow_service.list_run_infos(experiment_id=0)) == 0 + assert (len(mlflow_service.list_run_infos(experiment_id=FileStore.DEFAULT_EXPERIMENT_ID)) + == 0) tracking_uri_mock.return_value = "http://" # Test misspecified parameters with pytest.raises(ExecutionException): mlflow.projects.run( - TEST_PROJECT_DIR, mode="databricks", entry_point="greeter", - cluster_spec=cluster_spec_mock) + TEST_PROJECT_DIR, backend="databricks", entry_point="greeter", + backend_config=cluster_spec_mock) assert db_api_req_mock.call_count == 0 db_api_req_mock.reset_mock() # Test bad cluster spec with pytest.raises(ExecutionException): - mlflow.projects.run(TEST_PROJECT_DIR, mode="databricks", block=True, cluster_spec=None) + mlflow.projects.run(TEST_PROJECT_DIR, backend="databricks", synchronous=True, + backend_config=None) assert db_api_req_mock.call_count == 0 db_api_req_mock.reset_mock() # Test that validations pass with good tracking URIs @@ -194,25 +199,41 @@ def test_run_databricks( # Test that MLflow gets the correct run status when performing a Databricks run for run_succeeded, expect_status in [(True, RunStatus.FINISHED), (False, RunStatus.FAILED)]: runs_get_mock.return_value = mock_runs_get_result(succeeded=run_succeeded) - submitted_run = run_databricks_project(cluster_spec_mock) + submitted_run = run_databricks_project(cluster_spec_mock, synchronous=False) assert submitted_run.wait() == run_succeeded assert submitted_run.run_id is not None assert runs_submit_mock.call_count == 1 - assert set_tag_mock.call_count == 3 - set_tag_args, _ = set_tag_mock.call_args_list[0] - assert set_tag_args[1] == MLFLOW_DATABRICKS_RUN_URL - assert set_tag_args[2] == 'test_url' - set_tag_args, _ = set_tag_mock.call_args_list[1] - assert set_tag_args[1] == MLFLOW_DATABRICKS_SHELL_JOB_RUN_ID - assert set_tag_args[2] == '-1' - set_tag_args, _ = set_tag_mock.call_args_list[2] - assert set_tag_args[1] == MLFLOW_DATABRICKS_WEBAPP_URL - assert set_tag_args[2] == 'test-host' + tags = {} + for call_args, _ in set_tag_mock.call_args_list: + tags[call_args[1]] = call_args[2] + assert tags[MLFLOW_DATABRICKS_RUN_URL] == 'test_url' + assert tags[MLFLOW_DATABRICKS_SHELL_JOB_RUN_ID] == '-1' + assert tags[MLFLOW_DATABRICKS_WEBAPP_URL] == 'test-host' set_tag_mock.reset_mock() runs_submit_mock.reset_mock() validate_exit_status(submitted_run.get_status(), expect_status) +def test_run_databricks_cluster_spec_json( + before_run_validations_mock, # pylint: disable=unused-argument + tracking_uri_mock, runs_cancel_mock, dbfs_mocks, # pylint: disable=unused-argument + runs_submit_mock, runs_get_mock, + cluster_spec_mock, set_tag_mock): # pylint: disable=unused-argument + with mock.patch.dict(os.environ, {'DATABRICKS_HOST': 'test-host', 'DATABRICKS_TOKEN': 'foo'}): + runs_get_mock.return_value = mock_runs_get_result(succeeded=True) + cluster_spec = { + "spark_version": "5.0.x-scala2.11", + "num_workers": 2, + "node_type_id": "i3.xlarge", + } + # Run project synchronously, verify that it succeeds (doesn't throw) + run_databricks_project(cluster_spec=cluster_spec, synchronous=True) + assert runs_submit_mock.call_count == 1 + runs_submit_args, _ = runs_submit_mock.call_args_list[0] + req_body = runs_submit_args[0] + assert req_body["new_cluster"] == cluster_spec + + def test_run_databricks_cancel( before_run_validations_mock, tracking_uri_mock, # pylint: disable=unused-argument runs_submit_mock, dbfs_mocks, set_tag_mock, # pylint: disable=unused-argument @@ -222,14 +243,14 @@ def test_run_databricks_cancel( # waiting for run status. with mock.patch.dict(os.environ, {'DATABRICKS_HOST': 'test-host', 'DATABRICKS_TOKEN': 'foo'}): runs_get_mock.return_value = mock_runs_get_result(succeeded=False) - submitted_run = run_databricks_project(cluster_spec_mock) + submitted_run = run_databricks_project(cluster_spec_mock, synchronous=False) submitted_run.cancel() validate_exit_status(submitted_run.get_status(), RunStatus.FAILED) assert runs_cancel_mock.call_count == 1 # Test that we raise an exception when a blocking Databricks run fails runs_get_mock.return_value = mock_runs_get_result(succeeded=False) with pytest.raises(mlflow.projects.ExecutionException): - run_databricks_project(cluster_spec_mock, block=True) + run_databricks_project(cluster_spec_mock, synchronous=True) def test_get_tracking_uri_for_run(): @@ -257,12 +278,12 @@ def get_config(self): def test_databricks_http_request_integration(get_config, request): """Confirms that the databricks http request params can in fact be used as an HTTP request""" def confirm_request_params(**kwargs): + headers = dict(_DEFAULT_HEADERS) + headers['Authorization'] = 'Basic dXNlcjpwYXNz' assert kwargs == { 'method': 'PUT', 'url': 'host/clusters/list', - 'headers': { - 'Authorization': 'Basic dXNlcjpwYXNz' - }, + 'headers': headers, 'verify': True, 'json': {'a': 'b'} } diff --git a/tests/projects/test_docker_projects.py b/tests/projects/test_docker_projects.py new file mode 100644 index 0000000000000..ab3f994c4cfdf --- /dev/null +++ b/tests/projects/test_docker_projects.py @@ -0,0 +1,121 @@ +import os + +import mock +import pytest + +from databricks_cli.configure.provider import DatabricksConfig + +import mlflow +from mlflow.entities import ViewType +from mlflow.projects import ExecutionException, _get_docker_image_uri +from mlflow.store import file_store +from mlflow.utils.mlflow_tags import MLFLOW_PROJECT_ENV, MLFLOW_DOCKER_IMAGE_URI, \ + MLFLOW_DOCKER_IMAGE_ID + +from tests.projects.utils import TEST_DOCKER_PROJECT_DIR +from tests.projects.utils import build_docker_example_base_image +from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import +from mlflow.projects import _project_spec + + +def _build_uri(base_uri, subdirectory): + if subdirectory != "": + return "%s#%s" % (base_uri, subdirectory) + return base_uri + + +@pytest.mark.parametrize("use_start_run", map(str, [0, 1])) +def test_docker_project_execution( + use_start_run, tmpdir, tracking_uri_mock): # pylint: disable=unused-argument + build_docker_example_base_image() + expected_params = {"use_start_run": use_start_run} + submitted_run = mlflow.projects.run( + TEST_DOCKER_PROJECT_DIR, experiment_id=file_store.FileStore.DEFAULT_EXPERIMENT_ID, + parameters=expected_params, entry_point="test_tracking") + # Validate run contents in the FileStore + run_id = submitted_run.run_id + mlflow_service = mlflow.tracking.MlflowClient() + run_infos = mlflow_service.list_run_infos( + experiment_id=file_store.FileStore.DEFAULT_EXPERIMENT_ID, + run_view_type=ViewType.ACTIVE_ONLY) + assert len(run_infos) == 1 + store_run_id = run_infos[0].run_id + assert run_id == store_run_id + run = mlflow_service.get_run(run_id) + assert run.data.params == expected_params + assert run.data.metrics == {"some_key": 3} + exact_expected_tags = {MLFLOW_PROJECT_ENV: "docker"} + approx_expected_tags = { + MLFLOW_DOCKER_IMAGE_URI: "docker-example", + MLFLOW_DOCKER_IMAGE_ID: "sha256:", + } + run_tags = run.data.tags + for k, v in exact_expected_tags.items(): + assert run_tags[k] == v + for k, v in approx_expected_tags.items(): + assert run_tags[k].startswith(v) + + +@pytest.mark.parametrize("tracking_uri, expected_command_segment", [ + (None, "-e MLFLOW_TRACKING_URI=/mlflow/tmp/mlruns"), + ("http://some-tracking-uri", "-e MLFLOW_TRACKING_URI=http://some-tracking-uri"), + ("databricks://some-profile", "-e MLFLOW_TRACKING_URI=databricks ") +]) +@mock.patch('databricks_cli.configure.provider.ProfileConfigProvider') +def test_docker_project_tracking_uri_propagation( + ProfileConfigProvider, tmpdir, tracking_uri, + expected_command_segment): # pylint: disable=unused-argument + build_docker_example_base_image() + mock_provider = mock.MagicMock() + mock_provider.get_config.return_value = \ + DatabricksConfig("host", "user", "pass", None, insecure=True) + ProfileConfigProvider.return_value = mock_provider + # Create and mock local tracking directory + local_tracking_dir = os.path.join(tmpdir.strpath, "mlruns") + if tracking_uri is None: + tracking_uri = local_tracking_dir + old_uri = mlflow.get_tracking_uri() + try: + mlflow.set_tracking_uri(tracking_uri) + with mock.patch("mlflow.tracking.utils._get_store") as _get_store_mock: + _get_store_mock.return_value = file_store.FileStore(local_tracking_dir) + mlflow.projects.run( + TEST_DOCKER_PROJECT_DIR, experiment_id=file_store.FileStore.DEFAULT_EXPERIMENT_ID) + finally: + mlflow.set_tracking_uri(old_uri) + + +def test_docker_uri_mode_validation(tracking_uri_mock): # pylint: disable=unused-argument + with pytest.raises(ExecutionException): + build_docker_example_base_image() + mlflow.projects.run(TEST_DOCKER_PROJECT_DIR, backend="databricks") + + +@mock.patch('mlflow.projects._get_git_commit') +def test_docker_image_uri_with_git(get_git_commit_mock): + get_git_commit_mock.return_value = '1234567890' + image_uri = _get_docker_image_uri("my_project", "my_workdir") + assert image_uri == "my_project:1234567" + get_git_commit_mock.assert_called_with('my_workdir') + + +@mock.patch('mlflow.projects._get_git_commit') +def test_docker_image_uri_no_git(get_git_commit_mock): + get_git_commit_mock.return_value = None + image_uri = _get_docker_image_uri("my_project", "my_workdir") + assert image_uri == "my_project" + get_git_commit_mock.assert_called_with('my_workdir') + + +def test_docker_valid_project_backend_local(): + work_dir = "./examples/docker" + project = _project_spec.load_project(work_dir) + mlflow.projects._validate_docker_env(project) + + +def test_docker_invalid_project_backend_local(): + work_dir = "./examples/docker" + project = _project_spec.load_project(work_dir) + project.name = None + with pytest.raises(ExecutionException): + mlflow.projects._validate_docker_env(project) diff --git a/tests/projects/test_entry_point.py b/tests/projects/test_entry_point.py index 453cda0d79b6e..cbb9ff2ad9d4f 100644 --- a/tests/projects/test_entry_point.py +++ b/tests/projects/test_entry_point.py @@ -6,7 +6,7 @@ from mlflow.exceptions import ExecutionException from mlflow.projects._project_spec import EntryPoint -from mlflow.utils.file_utils import TempDir +from mlflow.utils.file_utils import TempDir, path_to_local_file_uri from tests.projects.utils import load_project, TEST_PROJECT_DIR @@ -68,6 +68,13 @@ def test_path_parameter(): storage_dir=dst_dir) assert params["path"] == os.path.abspath(local_path) assert download_uri_mock.call_count == 0 + + params, _ = entry_point.compute_parameters( + user_parameters={"path": path_to_local_file_uri(local_path)}, + storage_dir=dst_dir) + assert params["path"] == os.path.abspath(local_path) + assert download_uri_mock.call_count == 0 + # Verify that we raise an exception when passing a non-existent local file to a # parameter of type "path" with TempDir() as tmp, pytest.raises(ExecutionException): @@ -76,7 +83,7 @@ def test_path_parameter(): user_parameters={"path": os.path.join(dst_dir, "some/nonexistent/file")}, storage_dir=dst_dir) # Verify that we do call `download_uri` when passing a URI to a parameter of type "path" - for i, prefix in enumerate(["dbfs:/", "s3://"]): + for i, prefix in enumerate(["dbfs:/", "s3://", "gs://"]): with TempDir() as tmp: dst_dir = tmp.path() params, _ = entry_point.compute_parameters( diff --git a/tests/projects/test_kubernetes.py b/tests/projects/test_kubernetes.py new file mode 100644 index 0000000000000..fdcb92623574d --- /dev/null +++ b/tests/projects/test_kubernetes.py @@ -0,0 +1,224 @@ +import mock +import yaml +import pytest +import kubernetes +from mlflow.projects import kubernetes as kb +from mlflow.exceptions import ExecutionException +from mlflow.entities import RunStatus + + +def test_run_command_creation(): # pylint: disable=unused-argument + """ + Tests command creation. + """ + command = ['python train.py --alpha 0.5 --l1-ratio 0.1'] + command = kb._get_run_command(command) + assert ['python', 'train.py', '--alpha', '0.5', '--l1-ratio', '0.1'] == command + + +def test_valid_kubernetes_job_spec(): # pylint: disable=unused-argument + """ + Tests job specification for Kubernetes. + """ + custom_template = yaml.safe_load("apiVersion: batch/v1\n" + "kind: Job\n" + "metadata:\n" + " name: pi-with-ttl\n" + "spec:\n" + " ttlSecondsAfterFinished: 100\n" + " template:\n" + " spec:\n" + " containers:\n" + " - name: pi\n" + " image: perl\n" + " command: ['perl', '-Mbignum=bpi', '-wle']\n" + " env: \n" + " - name: DUMMY\n" + " value: \"test_var\"\n" + " restartPolicy: Never\n") + project_name = "mlflow-docker-example" + image_tag = "image_tag" + image_digest = "5e74a5a" + command = ['mlflow', 'run', '.', '--no-conda', '-P', 'alpha=0.5'] + env_vars = {'RUN_ID': '1'} + job_definition = kb._get_kubernetes_job_definition(project_name=project_name, + image_tag=image_tag, + image_digest=image_digest, + command=command, env_vars=env_vars, + job_template=custom_template) + container_spec = job_definition['spec']['template']['spec']['containers'][0] + assert container_spec['name'] == project_name + assert container_spec['image'] == image_tag + '@' + image_digest + assert container_spec['command'] == command + assert 2 == len(container_spec['env']) + assert container_spec['env'][0]['name'] == 'DUMMY' + assert container_spec['env'][0]['value'] == 'test_var' + assert container_spec['env'][1]['name'] == 'RUN_ID' + assert container_spec['env'][1]['value'] == '1' + + +def test_run_kubernetes_job(): + active_run = mock.Mock() + project_name = "mlflow-docker-example" + image_tag = "image_tag" + image_digest = "5e74a5a" + command = ['python train.py --alpha 0.5 --l1-ratio 0.1'] + env_vars = {'RUN_ID': '1'} + kube_context = "docker-for-desktop" + job_template = yaml.safe_load("apiVersion: batch/v1\n" + "kind: Job\n" + "metadata:\n" + " name: pi-with-ttl\n" + " namespace: mlflow\n" + "spec:\n" + " ttlSecondsAfterFinished: 100\n" + " template:\n" + " spec:\n" + " containers:\n" + " - name: pi\n" + " image: perl\n" + " command: ['perl', '-Mbignum=bpi', '-wle']\n" + " restartPolicy: Never\n") + with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock: + with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock: + submitted_run_obj = kb.run_kubernetes_job(project_name=project_name, + active_run=active_run, image_tag=image_tag, + image_digest=image_digest, command=command, + env_vars=env_vars, job_template=job_template, + kube_context=kube_context) + + assert submitted_run_obj._mlflow_run_id == active_run.info.run_id + assert submitted_run_obj._job_name.startswith(project_name) + assert submitted_run_obj._job_namespace == "mlflow" + assert kube_api_mock.call_count == 1 + args = kube_config_mock.call_args_list + assert args[0][1]['context'] == kube_context + + +def test_push_image_to_registry(): + image_uri = "dockerhub_account/mlflow-kubernetes-example" + with mock.patch("docker.from_env") as docker_mock: + client = mock.MagicMock() + docker_mock.return_value = client + kb.push_image_to_registry(image_uri) + assert client.images.push.call_count == 1 + args = client.images.push.call_args_list + assert args[0][1]['repository'] == image_uri + + +def test_push_image_to_registry_handling_errors(): + image_uri = "dockerhub_account/mlflow-kubernetes-example" + with pytest.raises(ExecutionException): + kb.push_image_to_registry(image_uri) + + +def test_submitted_run_get_status_killed(): + mlflow_run_id = 1 + job_name = 'job-name' + job_namespace = 'job-namespace' + with mock.patch("kubernetes.client.BatchV1Api.delete_namespaced_job") as kube_api_mock: + submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace) + submitted_run.cancel() + assert RunStatus.KILLED == submitted_run.get_status() + assert kube_api_mock.call_count == 1 + args = kube_api_mock.call_args_list + assert args[0][1]['name'] == job_name + assert args[0][1]['namespace'] == job_namespace + + +def test_submitted_run_get_status_failed(): + mlflow_run_id = 1 + job_name = 'job-name' + job_namespace = 'job-namespace' + condition = kubernetes.client.models.V1JobCondition(type="Failed", status="True") + job_status = kubernetes.client.models.V1JobStatus(active=1, + completion_time=None, + conditions=[condition], + failed=1, + start_time=1, + succeeded=None) + job = kubernetes.client.models.V1Job(status=job_status) + with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock: + kube_api_mock.return_value = job + submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace) + print("status", submitted_run.get_status()) + assert RunStatus.FAILED == submitted_run.get_status() + assert kube_api_mock.call_count == 1 + args = kube_api_mock.call_args_list + assert args[0][1]['name'] == job_name + assert args[0][1]['namespace'] == job_namespace + + +def test_submitted_run_get_status_succeeded(): + mlflow_run_id = 1 + job_name = 'job-name' + job_namespace = 'job-namespace' + condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True") + job_status = kubernetes.client.models.V1JobStatus(active=None, + completion_time=None, + conditions=[condition], + failed=None, + start_time=None, + succeeded=1) + job = kubernetes.client.models.V1Job(status=job_status) + with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock: + kube_api_mock.return_value = job + submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace) + print("status", submitted_run.get_status()) + assert RunStatus.FINISHED == submitted_run.get_status() + assert kube_api_mock.call_count == 1 + args = kube_api_mock.call_args_list + assert args[0][1]['name'] == job_name + assert args[0][1]['namespace'] == job_namespace + + +def test_submitted_run_get_status_running(): + mlflow_run_id = 1 + job_name = 'job-name' + job_namespace = 'job-namespace' + job_status = kubernetes.client.models.V1JobStatus(active=1, + completion_time=None, + conditions=None, + failed=1, + start_time=1, + succeeded=1) + job = kubernetes.client.models.V1Job(status=job_status) + with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock: + kube_api_mock.return_value = job + submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace) + assert RunStatus.RUNNING == submitted_run.get_status() + assert kube_api_mock.call_count == 1 + args = kube_api_mock.call_args_list + print(args) + assert args[0][1]['name'] == job_name + assert args[0][1]['namespace'] == job_namespace + + +def test_state_transitions(): + mlflow_run_id = 1 + job_name = 'job-name' + job_namespace = 'job-namespace' + submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace) + + with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock: + def set_return_value(**kwargs): + job_status = kubernetes.client.models.V1JobStatus(**kwargs) + kube_api_mock.return_value = kubernetes.client.models.V1Job(status=job_status) + set_return_value() + assert RunStatus.SCHEDULED == submitted_run.get_status() + set_return_value(start_time=1) + assert RunStatus.RUNNING == submitted_run.get_status() + set_return_value(start_time=1, failed=1) + assert RunStatus.RUNNING == submitted_run.get_status() + set_return_value(start_time=1, failed=1) + assert RunStatus.RUNNING == submitted_run.get_status() + set_return_value(start_time=1, failed=1, active=1) + assert RunStatus.RUNNING == submitted_run.get_status() + set_return_value(start_time=1, failed=1, succeeded=1) + assert RunStatus.RUNNING == submitted_run.get_status() + set_return_value(start_time=1, failed=1, succeeded=1, completion_time=2) + assert RunStatus.RUNNING == submitted_run.get_status() + condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True") + set_return_value(conditions=[condition], failed=1, start_time=1, completion_time=2, + succeeded=1) + assert RunStatus.FINISHED == submitted_run.get_status() diff --git a/tests/projects/test_project_spec.py b/tests/projects/test_project_spec.py index 903bb5cb1d6d3..1fddb8164726e 100644 --- a/tests/projects/test_project_spec.py +++ b/tests/projects/test_project_spec.py @@ -1,6 +1,7 @@ import os import pytest +import textwrap from mlflow.exceptions import ExecutionException from mlflow.projects import _project_spec @@ -63,3 +64,32 @@ def test_load_project(tmpdir, mlproject, conda_env_path, conda_env_contents): assert project.conda_env_path == expected_env_path if conda_env_path: assert open(project.conda_env_path).read() == conda_env_contents + + +def test_load_docker_project(tmpdir): + tmpdir.join("MLproject").write(textwrap.dedent(""" + docker_env: + image: some-image + """)) + project = _project_spec.load_project(tmpdir.strpath) + assert project._entry_points == {} + assert project.conda_env_path is None + assert project.docker_env.get("image") == "some-image" + + +@pytest.mark.parametrize("invalid_project_contents, expected_error_msg", [ + (textwrap.dedent(""" + docker_env: + image: some-image + conda_env: some-file.yaml + """), "cannot contain both a docker and conda env"), + (textwrap.dedent(""" + docker_env: + not-image-attribute: blah + """), "no image attribute found"), +]) +def test_load_invalid_project(tmpdir, invalid_project_contents, expected_error_msg): + tmpdir.join("MLproject").write(invalid_project_contents) + with pytest.raises(ExecutionException) as e: + _project_spec.load_project(tmpdir.strpath) + assert expected_error_msg in str(e.value) diff --git a/tests/projects/test_projects.py b/tests/projects/test_projects.py index 0b0dd77b5d84a..bcb2238c953e6 100644 --- a/tests/projects/test_projects.py +++ b/tests/projects/test_projects.py @@ -1,6 +1,7 @@ import os import git import tempfile +import yaml from distutils import dir_util @@ -8,15 +9,29 @@ import pytest import mlflow -from mlflow.entities import RunStatus, ViewType -from mlflow.exceptions import ExecutionException + +from mlflow.entities import RunStatus, ViewType, Experiment, SourceType +from mlflow.exceptions import ExecutionException, MlflowException +from mlflow.store.file_store import FileStore from mlflow.utils import env +from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID, MLFLOW_USER, MLFLOW_SOURCE_NAME, \ + MLFLOW_SOURCE_TYPE, MLFLOW_GIT_BRANCH, MLFLOW_GIT_REPO_URL, LEGACY_MLFLOW_GIT_BRANCH_NAME, \ + LEGACY_MLFLOW_GIT_REPO_URL, MLFLOW_PROJECT_ENTRY_POINT from tests.projects.utils import TEST_PROJECT_DIR, TEST_PROJECT_NAME, GIT_PROJECT_URI, \ validate_exit_status, assert_dirs_equal from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import +MOCK_USER = "janebloggs" + + +@pytest.fixture +def patch_user(): + with mock.patch("mlflow.projects._get_user", return_value=MOCK_USER): + yield + + def _build_uri(base_uri, subdirectory): if subdirectory != "": return "%s#%s" % (base_uri, subdirectory) @@ -28,7 +43,7 @@ def _get_version_local_git_repo(local_git_repo): return repo.git.rev_parse("HEAD") -@pytest.fixture() +@pytest.fixture def local_git_repo(tmpdir): local_git = tmpdir.join('git_repo').strpath repo = git.Repo.init(local_git) @@ -39,22 +54,56 @@ def local_git_repo(tmpdir): yield os.path.abspath(local_git) -@pytest.fixture() +@pytest.fixture def local_git_repo_uri(local_git_repo): return "file://%s" % local_git_repo -def test_fetch_project(local_git_repo, local_git_repo_uri): +@pytest.fixture +def zipped_repo(tmpdir): + import zipfile + zip_name = tmpdir.join('%s.zip' % TEST_PROJECT_NAME).strpath + with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED) as zip_file: + for root, _, files in os.walk(TEST_PROJECT_DIR): + for file_name in files: + file_path = os.path.join(root, file_name) + zip_file.write(file_path, file_path[len(TEST_PROJECT_DIR) + len(os.sep):]) + return zip_name + + +def test_is_zip_uri(): + assert mlflow.projects._is_zip_uri('http://foo.bar/moo.zip') + assert mlflow.projects._is_zip_uri('https://foo.bar/moo.zip') + assert mlflow.projects._is_zip_uri('file:///moo.zip') + assert mlflow.projects._is_zip_uri('file://C:/moo.zip') + assert mlflow.projects._is_zip_uri('/moo.zip') + assert mlflow.projects._is_zip_uri('C:/moo.zip') + assert not mlflow.projects._is_zip_uri('http://foo.bar/moo') + assert not mlflow.projects._is_zip_uri('https://foo.bar/moo') + assert not mlflow.projects._is_zip_uri('file:///moo') + assert not mlflow.projects._is_zip_uri('file://C:/moo') + assert not mlflow.projects._is_zip_uri('/moo') + assert not mlflow.projects._is_zip_uri('C:/moo') + + +def test_fetch_project(local_git_repo, local_git_repo_uri, zipped_repo, httpserver): + httpserver.serve_content(open(zipped_repo, 'rb').read()) # The tests are as follows: # 1. Fetching a locally saved project. # 2. Fetching a project located in a Git repo root directory. # 3. Fetching a project located in a Git repo subdirectory. # 4. Passing a subdirectory works for local directories. + # 5. Fetching a remote ZIP file + # 6. Using a local ZIP file + # 7. Using a file:// URL to a local ZIP file test_list = [ (TEST_PROJECT_DIR, '', TEST_PROJECT_DIR), (local_git_repo_uri, '', local_git_repo), (local_git_repo_uri, 'example_project', os.path.join(local_git_repo, 'example_project')), (os.path.dirname(TEST_PROJECT_DIR), os.path.basename(TEST_PROJECT_DIR), TEST_PROJECT_DIR), + (httpserver.url + '/%s.zip' % TEST_PROJECT_NAME, '', TEST_PROJECT_DIR), + (zipped_repo, '', TEST_PROJECT_DIR), + ('file://%s' % zipped_repo, '', TEST_PROJECT_DIR), ] for base_uri, subdirectory, expected in test_list: work_dir = mlflow.projects._fetch_project( @@ -80,22 +129,14 @@ def test_fetch_project_validations(local_git_repo_uri): with pytest.raises(ExecutionException): mlflow.projects._fetch_project(uri=TEST_PROJECT_DIR, force_tempdir=False, version="version") - # Passing only one of git_username, git_password results in an error - for username, password in [(None, "hi"), ("hi", None)]: - with pytest.raises(ExecutionException): - mlflow.projects._fetch_project( - local_git_repo_uri, force_tempdir=False, git_username=username, - git_password=password) - def test_dont_remove_mlruns(tmpdir): # Fetching a directory containing an "mlruns" folder doesn't remove the "mlruns" folder src_dir = tmpdir.mkdir("mlruns-src-dir") src_dir.mkdir("mlruns").join("some-file.txt").write("hi") src_dir.join("MLproject").write("dummy MLproject contents") - dst_dir = mlflow.projects._fetch_project( - uri=src_dir.strpath, version=None, git_username=None, - git_password=None, force_tempdir=False) + dst_dir = mlflow.projects._fetch_project(uri=src_dir.strpath, version=None, + force_tempdir=False) assert_dirs_equal(expected=src_dir.strpath, actual=dst_dir) @@ -115,7 +156,7 @@ def test_parse_subdirectory(): def test_invalid_run_mode(tracking_uri_mock): # pylint: disable=unused-argument """ Verify that we raise an exception given an invalid run mode """ with pytest.raises(ExecutionException): - mlflow.projects.run(uri=TEST_PROJECT_DIR, mode="some unsupported mode") + mlflow.projects.run(uri=TEST_PROJECT_DIR, backend="some unsupported mode") def test_use_conda(tracking_uri_mock): # pylint: disable=unused-argument @@ -137,9 +178,10 @@ def test_is_valid_branch_name(local_git_repo): @pytest.mark.parametrize("use_start_run", map(str, [0, 1])) @pytest.mark.parametrize("version", [None, "master", "git-commit"]) -def test_run_local_git_repo(local_git_repo, +def test_run_local_git_repo(patch_user, # pylint: disable=unused-argument + local_git_repo, local_git_repo_uri, - tracking_uri_mock, # pylint: disable=unused-argument + tracking_uri_mock, # pylint: disable=unused-argument use_start_run, version): if version is not None: @@ -151,7 +193,7 @@ def test_run_local_git_repo(local_git_repo, submitted_run = mlflow.projects.run( uri, entry_point="test_tracking", version=version, parameters={"use_start_run": use_start_run}, - use_conda=False, experiment_id=0) + use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) # Blocking runs should be finished when they return validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) @@ -160,46 +202,72 @@ def test_run_local_git_repo(local_git_repo, submitted_run.wait() validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Validate run contents in the FileStore - run_uuid = submitted_run.run_id + run_id = submitted_run.run_id mlflow_service = mlflow.tracking.MlflowClient() - run_infos = mlflow_service.list_run_infos(experiment_id=0, run_view_type=ViewType.ACTIVE_ONLY) - assert "file:" in run_infos[0].source_name + run_infos = mlflow_service.list_run_infos( + experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, run_view_type=ViewType.ACTIVE_ONLY) assert len(run_infos) == 1 - store_run_uuid = run_infos[0].run_uuid - assert run_uuid == store_run_uuid - run = mlflow_service.get_run(run_uuid) - expected_params = {"use_start_run": use_start_run} - assert run.info.status == RunStatus.FINISHED - assert len(run.data.params) == len(expected_params) - for param in run.data.params: - assert param.value == expected_params[param.key] - expected_metrics = {"some_key": 3} - assert len(run.data.metrics) == len(expected_metrics) - for metric in run.data.metrics: - assert metric.value == expected_metrics[metric.key] - # Validate the branch name tag is logged + store_run_id = run_infos[0].run_id + assert run_id == store_run_id + run = mlflow_service.get_run(run_id) + + assert run.info.status == RunStatus.to_string(RunStatus.FINISHED) + assert run.data.params == {"use_start_run": use_start_run} + assert run.data.metrics == {"some_key": 3} + + tags = run.data.tags + assert tags[MLFLOW_USER] == MOCK_USER + assert "file:" in tags[MLFLOW_SOURCE_NAME] + assert tags[MLFLOW_SOURCE_TYPE] == SourceType.to_string(SourceType.PROJECT) + assert tags[MLFLOW_PROJECT_ENTRY_POINT] == "test_tracking" + if version == "master": - expected_tags = {"mlflow.gitBranchName": "master"} - for tag in run.data.tags: - assert tag.value == expected_tags[tag.key] + assert tags[MLFLOW_GIT_BRANCH] == "master" + assert tags[MLFLOW_GIT_REPO_URL] == local_git_repo_uri + assert tags[LEGACY_MLFLOW_GIT_BRANCH_NAME] == "master" + assert tags[LEGACY_MLFLOW_GIT_REPO_URL] == local_git_repo_uri + + +@pytest.mark.parametrize("experiment_id,experiment_name,expected", + [("1", None, "1"), (None, 'name', "33")]) +def test_resolve_experiment_id(experiment_id, experiment_name, expected): + with mock.patch('mlflow.tracking.MlflowClient.get_experiment_by_name') \ + as get_experiment_by_name_mock: + get_experiment_by_name_mock.return_value = Experiment(experiment_id="33", name='Name', + artifact_location=None, + lifecycle_stage=None) + + exp_id = mlflow.projects._resolve_experiment_id(experiment_name=experiment_name, + experiment_id=experiment_id) + assert exp_id == expected + + +def test_resolve_experiment_id_should_not_allow_both_name_and_id_in_use(): + with pytest.raises(MlflowException, + match="Specify only one of 'experiment_name' or 'experiment_id'."): + _ = mlflow.projects._resolve_experiment_id(experiment_name='experiment_named', + experiment_id="44") def test_invalid_version_local_git_repo(local_git_repo_uri, - tracking_uri_mock): # pylint: disable=unused-argument + tracking_uri_mock): # pylint: disable=unused-argument # Run project with invalid commit hash with pytest.raises(ExecutionException, match=r'Unable to checkout version \'badc0de\''): mlflow.projects.run(local_git_repo_uri + "#" + TEST_PROJECT_NAME, entry_point="test_tracking", version="badc0de", - use_conda=False, experiment_id=0) + use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) @pytest.mark.parametrize("use_start_run", map(str, [0, 1])) -def test_run(tmpdir, tracking_uri_mock, use_start_run): # pylint: disable=unused-argument +def test_run(tmpdir, # pylint: disable=unused-argument + patch_user, # pylint: disable=unused-argument + tracking_uri_mock, # pylint: disable=unused-argument + use_start_run): submitted_run = mlflow.projects.run( TEST_PROJECT_DIR, entry_point="test_tracking", parameters={"use_start_run": use_start_run}, - use_conda=False, experiment_id=0) + use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) assert submitted_run.run_id is not None # Blocking runs should be finished when they return validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) @@ -208,34 +276,53 @@ def test_run(tmpdir, tracking_uri_mock, use_start_run): # pylint: disable=unuse submitted_run.wait() validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Validate run contents in the FileStore - run_uuid = submitted_run.run_id + run_id = submitted_run.run_id mlflow_service = mlflow.tracking.MlflowClient() - run_infos = mlflow_service.list_run_infos(experiment_id=0, run_view_type=ViewType.ACTIVE_ONLY) + + run_infos = mlflow_service.list_run_infos( + experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, run_view_type=ViewType.ACTIVE_ONLY) assert len(run_infos) == 1 - store_run_uuid = run_infos[0].run_uuid - assert run_uuid == store_run_uuid - run = mlflow_service.get_run(run_uuid) - expected_params = {"use_start_run": use_start_run} - assert run.info.status == RunStatus.FINISHED - assert len(run.data.params) == len(expected_params) - for param in run.data.params: - assert param.value == expected_params[param.key] - expected_metrics = {"some_key": 3} - assert len(run.data.metrics) == len(expected_metrics) - for metric in run.data.metrics: - assert metric.value == expected_metrics[metric.key] + store_run_id = run_infos[0].run_id + assert run_id == store_run_id + run = mlflow_service.get_run(run_id) + + assert run.info.status == RunStatus.to_string(RunStatus.FINISHED) + + assert run.data.params == {"use_start_run": use_start_run} + assert run.data.metrics == {"some_key": 3} + + tags = run.data.tags + assert tags[MLFLOW_USER] == MOCK_USER + assert "file:" in tags[MLFLOW_SOURCE_NAME] + assert tags[MLFLOW_SOURCE_TYPE] == SourceType.to_string(SourceType.PROJECT) + assert tags[MLFLOW_PROJECT_ENTRY_POINT] == "test_tracking" + + +def test_run_with_parent(tmpdir, tracking_uri_mock): # pylint: disable=unused-argument + """Verify that if we are in a nested run, mlflow.projects.run() will have a parent_run_id.""" + with mlflow.start_run(): + parent_run_id = mlflow.active_run().info.run_id + submitted_run = mlflow.projects.run( + TEST_PROJECT_DIR, entry_point="test_tracking", + parameters={"use_start_run": "1"}, + use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID) + assert submitted_run.run_id is not None + validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) + run_id = submitted_run.run_id + run = mlflow.tracking.MlflowClient().get_run(run_id) + assert run.data.tags[MLFLOW_PARENT_RUN_ID] == parent_run_id def test_run_async(tracking_uri_mock): # pylint: disable=unused-argument submitted_run0 = mlflow.projects.run( TEST_PROJECT_DIR, entry_point="sleep", parameters={"duration": 2}, - use_conda=False, experiment_id=0, block=False) + use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, synchronous=False) validate_exit_status(submitted_run0.get_status(), RunStatus.RUNNING) submitted_run0.wait() validate_exit_status(submitted_run0.get_status(), RunStatus.FINISHED) submitted_run1 = mlflow.projects.run( TEST_PROJECT_DIR, entry_point="sleep", parameters={"duration": -1, "invalid-param": 30}, - use_conda=False, experiment_id=0, block=False) + use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, synchronous=False) submitted_run1.wait() validate_exit_status(submitted_run1.get_status(), RunStatus.FAILED) @@ -246,7 +333,7 @@ def test_run_async(tracking_uri_mock): # pylint: disable=unused-argument ({}, "conda", "activate"), ({mlflow.projects.MLFLOW_CONDA_HOME: "/some/dir/"}, "/some/dir/bin/conda", "/some/dir/bin/activate") - ] + ] ) def test_conda_path(mock_env, expected_conda, expected_activate): """Verify that we correctly determine the path to conda executables""" @@ -258,7 +345,8 @@ def test_conda_path(mock_env, expected_conda, expected_activate): def test_cancel_run(tracking_uri_mock): # pylint: disable=unused-argument submitted_run0, submitted_run1 = [mlflow.projects.run( TEST_PROJECT_DIR, entry_point="sleep", parameters={"duration": 2}, - use_conda=False, experiment_id=0, block=False) for _ in range(2)] + use_conda=False, experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, + synchronous=False) for _ in range(2)] submitted_run0.cancel() validate_exit_status(submitted_run0.get_status(), RunStatus.FAILED) # Sanity check: cancelling one run has no effect on the other @@ -276,3 +364,48 @@ def test_storage_dir(tmpdir): """ assert os.path.dirname(mlflow.projects._get_storage_dir(tmpdir.strpath)) == tmpdir.strpath assert os.path.dirname(mlflow.projects._get_storage_dir(None)) == tempfile.gettempdir() + + +def test_parse_kubernetes_config(): + work_dir = "./examples/docker" + kubernetes_config = { + "kube-context": "docker-for-desktop", + "kube-job-template-path": os.path.join(work_dir, "kubernetes_job_template.yaml"), + "repository-uri": "dockerhub_account/mlflow-kubernetes-example" + } + yaml_obj = None + with open(kubernetes_config["kube-job-template-path"], 'r') as job_template: + yaml_obj = yaml.safe_load(job_template.read()) + kube_config = mlflow.projects._parse_kubernetes_config(kubernetes_config) + assert kube_config["kube-context"] == kubernetes_config["kube-context"] + assert kube_config["kube-job-template-path"] == kubernetes_config["kube-job-template-path"] + assert kube_config["repository-uri"] == kubernetes_config["repository-uri"] + assert kube_config["kube-job-template"] == yaml_obj + + +def test_parse_kubernetes_config_without_context(): + kubernetes_config = { + "repository-uri": "dockerhub_account/mlflow-kubernetes-example", + "kube-job-template-path": "kubernetes_job_template.yaml" + } + with pytest.raises(ExecutionException): + mlflow.projects._parse_kubernetes_config(kubernetes_config) + + +def test_parse_kubernetes_config_without_image_uri(): + kubernetes_config = { + "kube-context": "docker-for-desktop", + "kube-job-template-path": "kubernetes_job_template.yaml" + } + with pytest.raises(ExecutionException): + mlflow.projects._parse_kubernetes_config(kubernetes_config) + + +def test_parse_kubernetes_config_invalid_template_job_file(): + kubernetes_config = { + "kube-context": "docker-for-desktop", + "repository-uri": "username/mlflow-kubernetes-example", + "kube-job-template-path": "file_not_found.yaml" + } + with pytest.raises(ExecutionException): + mlflow.projects._parse_kubernetes_config(kubernetes_config) diff --git a/tests/projects/test_projects_cli.py b/tests/projects/test_projects_cli.py index 4c9969bd69d57..dc01ceaac86b5 100644 --- a/tests/projects/test_projects_cli.py +++ b/tests/projects/test_projects_cli.py @@ -1,15 +1,21 @@ +import json import hashlib +import mock import os +import logging +from click.testing import CliRunner import pytest from mlflow import cli -from mlflow.utils import process, logging_utils +from mlflow.utils import process from tests.integration.utils import invoke_cli_runner -from tests.projects.utils import TEST_PROJECT_DIR, GIT_PROJECT_URI, SSH_PROJECT_URI,\ +from tests.projects.utils import TEST_PROJECT_DIR, GIT_PROJECT_URI, SSH_PROJECT_URI, \ TEST_NO_SPEC_PROJECT_DIR from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import +_logger = logging.getLogger(__name__) + @pytest.mark.large def test_run_local_params(tracking_uri_mock): # pylint: disable=unused-argument @@ -28,9 +34,9 @@ def test_run_local_conda_env(tracking_uri_mock): # pylint: disable=unused-argum try: process.exec_cmd(cmd=["conda", "env", "remove", "--name", expected_env_name]) except process.ShellCommandException: - logging_utils.eprint( + _logger.error( "Unable to remove conda environment %s. The environment may not have been present, " - "continuing with running the test." % expected_env_name) + "continuing with running the test.", expected_env_name) invoke_cli_runner(cli.run, [TEST_PROJECT_DIR, "-e", "check_conda_env", "-P", "conda_env_name=%s" % expected_env_name]) @@ -61,3 +67,30 @@ def test_run_git_ssh(tracking_uri_mock): # pylint: disable=unused-argument assert SSH_PROJECT_URI.startswith("git@") invoke_cli_runner(cli.run, [SSH_PROJECT_URI, "--no-conda", "-P", "alpha=0.5"]) invoke_cli_runner(cli.run, [SSH_PROJECT_URI, "--no-conda", "-P", "alpha=0.5"]) + + +def test_run_databricks_cluster_spec(tmpdir): + cluster_spec = { + "spark_version": "5.0.x-scala2.11", + "num_workers": 2, + "node_type_id": "i3.xlarge", + } + cluster_spec_path = str(tmpdir.join("cluster-spec.json")) + with open(cluster_spec_path, "w") as handle: + json.dump(cluster_spec, handle) + + with mock.patch("mlflow.projects._run") as run_mock: + for cluster_spec_arg in [json.dumps(cluster_spec), cluster_spec_path]: + invoke_cli_runner( + cli.run, [TEST_PROJECT_DIR, "-b", "databricks", "--backend-config", + cluster_spec_arg, "-e", "greeter", "-P", "name=hi"], + env={'MLFLOW_TRACKING_URI': 'databricks://profile'}) + assert run_mock.call_count == 1 + _, run_kwargs = run_mock.call_args_list[0] + assert run_kwargs["backend_config"] == cluster_spec + run_mock.reset_mock() + res = CliRunner().invoke( + cli.run, [TEST_PROJECT_DIR, "-m", "databricks", "--cluster-spec", + json.dumps(cluster_spec) + "JUNK", "-e", "greeter", "-P", "name=hi"], + env={'MLFLOW_TRACKING_URI': 'databricks://profile'}) + assert res.exit_code != 0 diff --git a/tests/projects/utils.py b/tests/projects/utils.py index cdffb21536505..c699341c0c9ca 100644 --- a/tests/projects/utils.py +++ b/tests/projects/utils.py @@ -1,15 +1,19 @@ import filecmp import os +import docker +from docker.errors import BuildError, APIError import pytest import mlflow from mlflow.entities import RunStatus from mlflow.projects import _project_spec +from mlflow.utils.file_utils import path_to_local_sqlite_uri TEST_DIR = "tests" TEST_PROJECT_DIR = os.path.join(TEST_DIR, "resources", "example_project") +TEST_DOCKER_PROJECT_DIR = os.path.join(TEST_DIR, "resources", "example_docker_project") TEST_PROJECT_NAME = "example_project" TEST_NO_SPEC_PROJECT_DIR = os.path.join(TEST_DIR, "resources", "example_project_no_spec") GIT_PROJECT_URI = "https://github.com/mlflow/mlflow-example" @@ -33,10 +37,25 @@ def assert_dirs_equal(expected, actual): assert len(dir_comparison.funny_files) == 0 +def build_docker_example_base_image(): + print(os.path.join(TEST_DOCKER_PROJECT_DIR, 'Dockerfile')) + client = docker.from_env() + try: + client.images.build(tag='mlflow-docker-example', forcerm=True, + dockerfile='Dockerfile', path=TEST_DOCKER_PROJECT_DIR) + except BuildError as build_error: + for chunk in build_error.build_log: + print(chunk) + raise build_error + except APIError as api_error: + print(api_error.explanation) + raise api_error + + @pytest.fixture() def tracking_uri_mock(tmpdir): try: - mlflow.set_tracking_uri(os.path.join(tmpdir.strpath, 'mlruns')) + mlflow.set_tracking_uri(path_to_local_sqlite_uri(os.path.join(tmpdir.strpath, 'mlruns'))) yield tmpdir finally: mlflow.set_tracking_uri(None) diff --git a/tests/pyfunc/test_model_export.py b/tests/pyfunc/test_model_export.py deleted file mode 100644 index c936b8d0e84e4..0000000000000 --- a/tests/pyfunc/test_model_export.py +++ /dev/null @@ -1,228 +0,0 @@ -from __future__ import print_function - -import os -import time -import six -import pickle -import shutil -import tempfile -import unittest -import requests -import signal -from subprocess import Popen, STDOUT - -from click.testing import CliRunner -import numpy as np -import pandas -import sklearn.datasets -import sklearn.linear_model -import sklearn.neighbors - -import mlflow -from mlflow import pyfunc -from mlflow import tracking -import mlflow.pyfunc.cli -from mlflow.models import Model -from mlflow.utils.file_utils import TempDir - - -def _load_pyfunc(path): - with open(path, "rb") as f: - if six.PY2: - return pickle.load(f) - else: - return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg - - -class TestModelExport(unittest.TestCase): - def setUp(self): - self._tmp = tempfile.mkdtemp() - iris = sklearn.datasets.load_iris() - self._X = iris.data[:, :2] # we only take the first two features. - self._y = iris.target - self._knn = sklearn.neighbors.KNeighborsClassifier() - self._knn.fit(self._X, self._y) - self._knn_predict = self._knn.predict(self._X) - self._linear_lr = sklearn.linear_model.LogisticRegression() - self._linear_lr.fit(self._X, self._y) - self._linear_lr_predict = self._linear_lr.predict(self._X) - - def test_model_save_load(self): - with TempDir() as tmp: - model_path = tmp.path("knn.pkl") - with open(model_path, "wb") as f: - pickle.dump(self._knn, f) - path = tmp.path("knn") - m = Model(run_id="test", artifact_path="testtest") - pyfunc.save_model(dst_path=path, - data_path=model_path, - loader_module=os.path.basename(__file__)[:-3], - code_path=[__file__], - model=m) - m2 = Model.load(os.path.join(path, "MLmodel")) - print("m1", m.__dict__) - print("m2", m2.__dict__) - assert m.__dict__ == m2.__dict__ - assert pyfunc.FLAVOR_NAME in m2.flavors - assert pyfunc.PY_VERSION in m2.flavors[pyfunc.FLAVOR_NAME] - x = pyfunc.load_pyfunc(path) - xpred = x.predict(self._X) - np.testing.assert_array_equal(self._knn_predict, xpred) - - def test_model_log(self): - with TempDir(chdr=True, remove_on_exit=True) as tmp: - model_path = tmp.path("linear.pkl") - with open(model_path, "wb") as f: - pickle.dump(self._linear_lr, f) - tracking_dir = os.path.abspath(tmp.path("mlruns")) - mlflow.set_tracking_uri("file://%s" % tracking_dir) - mlflow.start_run() - try: - pyfunc.log_model(artifact_path="linear", - data_path=model_path, - loader_module=os.path.basename(__file__)[:-3], - code_path=[__file__]) - - run_id = mlflow.active_run().info.run_uuid - path = tracking.utils._get_model_log_dir("linear", run_id) - m = Model.load(os.path.join(path, "MLmodel")) - print(m.__dict__) - assert pyfunc.FLAVOR_NAME in m.flavors - assert pyfunc.PY_VERSION in m.flavors[pyfunc.FLAVOR_NAME] - x = pyfunc.load_pyfunc("linear", run_id=run_id) - xpred = x.predict(self._X) - np.testing.assert_array_equal(self._linear_lr_predict, xpred) - finally: - mlflow.end_run() - mlflow.set_tracking_uri(None) - # Remove the log directory in order to avoid adding new tests to pytest... - shutil.rmtree(tracking_dir) - - def _create_conda_env_file(self, tmp): - conda_env_path = tmp.path("conda.yml") - with open(conda_env_path, "w") as f: - f.write(""" - name: mlflow - channels: - - defaults - dependencies: - - pip: - - -e {} - """.format(os.path.abspath(os.path.join(mlflow.__path__[0], '..')))) - return conda_env_path - - def _model_serve_with_conda_env(self, extra_args): - with TempDir() as tmp: - model_path = tmp.path("knn.pkl") - with open(model_path, "wb") as f: - pickle.dump(self._knn, f) - path = tmp.path("knn") - - pyfunc.save_model(dst_path=path, - data_path=model_path, - loader_module=os.path.basename(__file__)[:-3], - code_path=[__file__], - conda_env=self._create_conda_env_file(tmp) - ) - input_csv_path = tmp.path("input.csv") - pandas.DataFrame(self._X).to_csv(input_csv_path, header=True, index=False) - port = 5000 - process = Popen(['mlflow', 'pyfunc', 'serve', - '--model-path', path, '--port', str(port)] + extra_args, - stderr=STDOUT, - preexec_fn=os.setsid) - time.sleep(5) - try: - assert process.poll() is None, "server died prematurely" - success = False - failcount = 0 - while not success and failcount < 3 and process.poll() is None: - try: - response = requests.post("http://localhost:{}/invocations".format(port), - data=open(input_csv_path, 'rb'), - headers={'Content-type': 'text/csv'}) - response.close() - success = True - except requests.ConnectionError: - time.sleep(5) - failcount += 1 - finally: - os.killpg(os.getpgid(process.pid), signal.SIGTERM) # kill process + children - time.sleep(0.5) - assert process.poll() is not None, "server not dead" - - # check result - if not success: - raise RuntimeError("Fail to connect to the server") - else: - result_df = pandas.read_json(response.content) - np.testing.assert_array_equal(result_df.values.transpose()[0], - self._knn.predict(self._X)) - - def test_model_serve_with_conda(self): - self._model_serve_with_conda_env(extra_args=[]) - - def test_model_serve_with_no_conda(self): - self._model_serve_with_conda_env(extra_args=['--no-conda']) - - def test_cli_predict(self): - with TempDir() as tmp: - model_path = tmp.path("knn.pkl") - with open(model_path, "wb") as f: - pickle.dump(self._knn, f) - path = tmp.path("knn") - pyfunc.save_model(dst_path=path, - data_path=model_path, - loader_module=os.path.basename(__file__)[:-3], - code_path=[__file__], - ) - input_csv_path = tmp.path("input with spaces.csv") - pandas.DataFrame(self._X).to_csv(input_csv_path, header=True, index=False) - output_csv_path = tmp.path("output.csv") - runner = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}) - result = runner.invoke(mlflow.pyfunc.cli.commands, - ['predict', '--model-path', path, '-i', - input_csv_path, '-o', output_csv_path]) - print("result", result.output) - print(result.exc_info) - print(result.exception) - assert result.exit_code == 0 - result_df = pandas.read_csv(output_csv_path, header=None) - np.testing.assert_array_equal(result_df.values.transpose()[0], - self._knn.predict(self._X)) - - def _cli_predict_with_conda_env(self, extra_args): - with TempDir() as tmp: - model_path = tmp.path("knn.pkl") - with open(model_path, "wb") as f: - pickle.dump(self._knn, f) - - # create a conda yaml that installs mlflow from source in-place mode - path = tmp.path("knn") - pyfunc.save_model(dst_path=path, - data_path=model_path, - loader_module=os.path.basename(__file__)[:-3], - code_path=[__file__], - conda_env=self._create_conda_env_file(tmp) - ) - input_csv_path = tmp.path("input with spaces.csv") - pandas.DataFrame(self._X).to_csv(input_csv_path, header=True, index=False) - output_csv_path = tmp.path("output.csv") - process = Popen(['mlflow', 'pyfunc', 'predict', - '--model-path', path, - '-i', input_csv_path, - '-o', output_csv_path] + extra_args, - stderr=STDOUT, - preexec_fn=os.setsid) - process.wait() - result_df = pandas.read_csv(output_csv_path, header=None) - np.testing.assert_array_equal(result_df.values.transpose()[0], - self._knn.predict(self._X)) - - def test_cli_predict_with_conda(self): - """Run prediction in MLModel specified conda env""" - self._cli_predict_with_conda_env([]) - - def test_cli_predict_with_no_conda(self): - """Run prediction in current conda env""" - self._cli_predict_with_conda_env(['--no-conda']) diff --git a/tests/pyfunc/test_model_export_with_class_and_artifacts.py b/tests/pyfunc/test_model_export_with_class_and_artifacts.py new file mode 100644 index 0000000000000..4422436e22745 --- /dev/null +++ b/tests/pyfunc/test_model_export_with_class_and_artifacts.py @@ -0,0 +1,768 @@ +from __future__ import print_function + +import cloudpickle +import os +import json +import mock +from subprocess import Popen, PIPE + +import numpy as np +import pandas as pd +import pandas.testing +import pytest +import sklearn.datasets +import sklearn.linear_model +import sklearn.neighbors +import yaml + +import mlflow +import mlflow.pyfunc +import mlflow.pyfunc.model +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server +import mlflow.sklearn +from mlflow.exceptions import MlflowException +from mlflow.models import Model +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.tracking.artifact_utils import get_artifact_uri as utils_get_artifact_uri, \ + _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration + +import tests +from tests.helper_functions import pyfunc_serve_and_score_model +from tests.helper_functions import score_model_in_sagemaker_docker_container +from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import +from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import + + +def get_model_class(): + """ + Defines a custom Python model class that wraps a scikit-learn estimator. + This can be invoked within a pytest fixture to define the class in the ``__main__`` scope. + Alternatively, it can be invoked within a module to define the class in the module's scope. + """ + + class CustomSklearnModel(mlflow.pyfunc.PythonModel): + + def __init__(self, predict_fn): + self.predict_fn = predict_fn + + def load_context(self, context): + super(CustomSklearnModel, self).load_context(context) + # pylint: disable=attribute-defined-outside-init + self.model = mlflow.sklearn.load_model(model_uri=context.artifacts["sk_model"]) + + def predict(self, context, model_input): + return self.predict_fn(self.model, model_input) + + return CustomSklearnModel + + +class ModuleScopedSklearnModel(get_model_class()): + """ + A custom Python model class defined in the test module scope. + """ + + +@pytest.fixture(scope="module") +def main_scoped_model_class(): + """ + A custom Python model class defined in the ``__main__`` scope. + """ + return get_model_class() + + +@pytest.fixture(scope="module") +def iris_data(): + iris = sklearn.datasets.load_iris() + x = iris.data[:, :2] + y = iris.target + return x, y + + +@pytest.fixture(scope="module") +def sklearn_knn_model(iris_data): + x, y = iris_data + knn_model = sklearn.neighbors.KNeighborsClassifier() + knn_model.fit(x, y) + return knn_model + + +@pytest.fixture(scope="module") +def sklearn_logreg_model(iris_data): + x, y = iris_data + linear_lr = sklearn.linear_model.LogisticRegression() + linear_lr.fit(x, y) + return linear_lr + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(str(tmpdir), "model") + + +@pytest.fixture +def pyfunc_custom_env(tmpdir): + conda_env = os.path.join(str(tmpdir), "conda_env.yml") + _mlflow_conda_env( + conda_env, + additional_conda_deps=["scikit-learn", "pytest", "cloudpickle"], + additional_pip_deps=["-e " + os.path.dirname(mlflow.__path__[0])]) + return conda_env + + +def _conda_env(): + # NB: We need mlflow as a dependency in the environment. + return _mlflow_conda_env( + additional_conda_deps=None, + install_mlflow=False, + additional_pip_deps=[ + "-e " + os.path.dirname(mlflow.__path__[0]), + "cloudpickle=={}".format(cloudpickle.__version__), + "scikit-learn=={}".format(sklearn.__version__) + ], + additional_conda_channels=None) + + +@pytest.mark.large +def test_model_save_load(sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir): + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + conda_env=_conda_env(), + python_model=main_scoped_model_class(test_predict)) + + loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_path) + np.testing.assert_array_equal( + loaded_pyfunc_model.predict(model_input=iris_data[0]), + test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0])) + + +@pytest.mark.large +def test_pyfunc_model_log_load_no_active_run(sklearn_knn_model, main_scoped_model_class, iris_data): + sklearn_artifact_path = "sk_model_no_run" + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=sklearn_knn_model, artifact_path=sklearn_artifact_path) + sklearn_model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=sklearn_artifact_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_artifact_path = "pyfunc_model" + assert mlflow.active_run() is None + mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path, + artifacts={"sk_model": sklearn_model_uri}, + python_model=main_scoped_model_class(test_predict)) + pyfunc_model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=pyfunc_artifact_path) + loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_uri) + np.testing.assert_array_equal( + loaded_pyfunc_model.predict(model_input=iris_data[0]), + test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0])) + mlflow.end_run() + + +@pytest.mark.large +def test_model_log_load(sklearn_knn_model, main_scoped_model_class, iris_data): + sklearn_artifact_path = "sk_model" + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=sklearn_knn_model, artifact_path=sklearn_artifact_path) + sklearn_model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=sklearn_artifact_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_artifact_path = "pyfunc_model" + with mlflow.start_run(): + mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path, + artifacts={ + "sk_model": sklearn_model_uri, + }, + python_model=main_scoped_model_class(test_predict)) + pyfunc_model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=pyfunc_artifact_path) + + loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_uri) + np.testing.assert_array_equal( + loaded_pyfunc_model.predict(model_input=iris_data[0]), + test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0])) + + +@pytest.mark.large +def test_model_load_from_remote_uri_succeeds( + sklearn_knn_model, main_scoped_model_class, tmpdir, mock_s3_bucket, iris_data): + artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) + artifact_repo = S3ArtifactRepository(artifact_root) + + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path) + sklearn_artifact_path = "sk_model" + artifact_repo.log_artifacts(sklearn_model_path, artifact_path=sklearn_artifact_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + python_model=main_scoped_model_class(test_predict), + conda_env=_conda_env()) + + pyfunc_artifact_path = "pyfunc_model" + artifact_repo.log_artifacts(pyfunc_model_path, artifact_path=pyfunc_artifact_path) + + model_uri = artifact_root + "/" + pyfunc_artifact_path + loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=model_uri) + np.testing.assert_array_equal( + loaded_pyfunc_model.predict(model_input=iris_data[0]), + test_predict(sk_model=sklearn_knn_model, model_input=iris_data[0])) + + +@pytest.mark.large +def test_add_to_model_adds_specified_kwargs_to_mlmodel_configuration(): + custom_kwargs = { + "key1": "value1", + "key2": 20, + "key3": range(10), + } + model_config = Model() + mlflow.pyfunc.add_to_model(model=model_config, + loader_module=os.path.basename(__file__)[:-3], + data="data", + code="code", + env=None, + **custom_kwargs) + + assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors + assert all([item in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME] for item in custom_kwargs]) + + +@pytest.mark.large +def test_pyfunc_model_serving_without_conda_env_activation_succeeds_with_main_scoped_class( + sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir): + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + python_model=main_scoped_model_class(test_predict), + conda_env=_conda_env()) + loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_path) + + sample_input = pd.DataFrame(iris_data[0]) + scoring_response = pyfunc_serve_and_score_model( + model_uri=pyfunc_model_path, + data=sample_input, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + extra_args=["--no-conda"]) + assert scoring_response.status_code == 200 + np.testing.assert_array_equal( + np.array(json.loads(scoring_response.text)), + loaded_pyfunc_model.predict(sample_input)) + + +@pytest.mark.large +def test_pyfunc_model_serving_with_conda_env_activation_succeeds_with_main_scoped_class( + sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir): + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + python_model=main_scoped_model_class(test_predict), + conda_env=_conda_env()) + loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_path) + + sample_input = pd.DataFrame(iris_data[0]) + scoring_response = pyfunc_serve_and_score_model( + model_uri=pyfunc_model_path, + data=sample_input, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + assert scoring_response.status_code == 200 + np.testing.assert_array_equal( + np.array(json.loads(scoring_response.text)), + loaded_pyfunc_model.predict(sample_input)) + + +@pytest.mark.large +def test_pyfunc_model_serving_without_conda_env_activation_succeeds_with_module_scoped_class( + sklearn_knn_model, iris_data, tmpdir): + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + python_model=ModuleScopedSklearnModel(test_predict), + code_path=[os.path.dirname(tests.__file__)], + conda_env=_conda_env()) + loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_path) + + sample_input = pd.DataFrame(iris_data[0]) + scoring_response = pyfunc_serve_and_score_model( + model_uri=pyfunc_model_path, + data=sample_input, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + extra_args=["--no-conda"]) + assert scoring_response.status_code == 200 + np.testing.assert_array_equal( + np.array(json.loads(scoring_response.text)), + loaded_pyfunc_model.predict(sample_input)) + + +@pytest.mark.large +def test_pyfunc_cli_predict_command_without_conda_env_activation_succeeds( + sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir): + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + python_model=main_scoped_model_class(test_predict), + conda_env=_conda_env()) + loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_path) + + sample_input = pd.DataFrame(iris_data[0]) + input_csv_path = os.path.join(str(tmpdir), "input with spaces.csv") + sample_input.to_csv(input_csv_path, header=True, index=False) + output_json_path = os.path.join(str(tmpdir), "output.json") + process = Popen(['mlflow', 'models', 'predict', + '--model-uri', pyfunc_model_path, + '-i', input_csv_path, + "--content-type", "csv", + '-o', output_json_path, + '--no-conda'], + stdout=PIPE, + stderr=PIPE, + preexec_fn=os.setsid) + _, stderr = process.communicate() + assert 0 == process.wait(), "stderr = \n\n{}\n\n".format(stderr) + + result_df = pandas.read_json(output_json_path, orient="records") + np.testing.assert_array_equal(result_df.values.transpose()[0], + loaded_pyfunc_model.predict(sample_input)) + + +@pytest.mark.large +def test_pyfunc_cli_predict_command_with_conda_env_activation_succeeds( + sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir): + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + python_model=main_scoped_model_class(test_predict), + conda_env=_conda_env()) + loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_path) + + sample_input = pd.DataFrame(iris_data[0]) + input_csv_path = os.path.join(str(tmpdir), "input with spaces.csv") + sample_input.to_csv(input_csv_path, header=True, index=False) + output_json_path = os.path.join(str(tmpdir), "output.json") + process = Popen(['mlflow', 'models', 'predict', + '--model-uri', pyfunc_model_path, + '-i', input_csv_path, + "--content-type", "csv", + '-o', output_json_path], + stderr=PIPE, + stdout=PIPE, + preexec_fn=os.setsid) + _, stderr = process.communicate() + assert 0 == process.wait(), "stderr = \n\n{}\n\n".format(stderr) + result_df = pandas.read_json(output_json_path, orient='records') + np.testing.assert_array_equal(result_df.values.transpose()[0], + loaded_pyfunc_model.predict(sample_input)) + + +@pytest.mark.large +def test_save_model_persists_specified_conda_env_in_mlflow_model_directory( + sklearn_knn_model, main_scoped_model_class, pyfunc_custom_env, tmpdir): + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, + path=sklearn_model_path, + serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE) + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + python_model=main_scoped_model_class(predict_fn=None), + conda_env=pyfunc_custom_env) + + pyfunc_conf = _get_flavor_configuration( + model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != pyfunc_custom_env + + with open(pyfunc_custom_env, "r") as f: + pyfunc_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == pyfunc_custom_env_parsed + + +@pytest.mark.large +def test_log_model_persists_specified_conda_env_in_mlflow_model_directory( + sklearn_knn_model, main_scoped_model_class, pyfunc_custom_env): + sklearn_artifact_path = "sk_model" + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=sklearn_knn_model, artifact_path=sklearn_artifact_path) + sklearn_run_id = mlflow.active_run().info.run_id + + pyfunc_artifact_path = "pyfunc_model" + with mlflow.start_run(): + mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path, + artifacts={ + "sk_model": utils_get_artifact_uri( + artifact_path=sklearn_artifact_path, + run_id=sklearn_run_id) + }, + python_model=main_scoped_model_class(predict_fn=None), + conda_env=pyfunc_custom_env) + pyfunc_model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path)) + + pyfunc_conf = _get_flavor_configuration( + model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != pyfunc_custom_env + + with open(pyfunc_custom_env, "r") as f: + pyfunc_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == pyfunc_custom_env_parsed + + +@pytest.mark.large +def test_save_model_without_specified_conda_env_uses_default_env_with_expected_dependencies( + sklearn_logreg_model, main_scoped_model_class, tmpdir): + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_logreg_model, path=sklearn_model_path) + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + python_model=main_scoped_model_class(predict_fn=None), + conda_env=_conda_env()) + + pyfunc_conf = _get_flavor_configuration( + model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == _conda_env() + + +@pytest.mark.large +def test_log_model_without_specified_conda_env_uses_default_env_with_expected_dependencies( + sklearn_knn_model, main_scoped_model_class): + sklearn_artifact_path = "sk_model" + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=sklearn_knn_model, artifact_path=sklearn_artifact_path) + sklearn_run_id = mlflow.active_run().info.run_id + + pyfunc_artifact_path = "pyfunc_model" + with mlflow.start_run(): + mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path, + artifacts={ + "sk_model": utils_get_artifact_uri( + artifact_path=sklearn_artifact_path, + run_id=sklearn_run_id) + }, + python_model=main_scoped_model_class(predict_fn=None)) + pyfunc_model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path)) + + pyfunc_conf = _get_flavor_configuration( + model_path=pyfunc_model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(pyfunc_model_path, pyfunc_conf[mlflow.pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.pyfunc.model.get_default_conda_env() + + +@pytest.mark.large +def test_save_model_correctly_resolves_directory_artifact_with_nested_contents( + tmpdir, model_path, iris_data): + directory_artifact_path = os.path.join(str(tmpdir), "directory_artifact") + nested_file_relative_path = os.path.join( + "my", "somewhat", "heavily", "nested", "directory", "myfile.txt") + nested_file_path = os.path.join(directory_artifact_path, nested_file_relative_path) + os.makedirs(os.path.dirname(nested_file_path)) + nested_file_text = "some sample file text" + with open(nested_file_path, "w") as f: + f.write(nested_file_text) + + class ArtifactValidationModel(mlflow.pyfunc.PythonModel): + def predict(self, context, model_input): + expected_file_path = os.path.join( + context.artifacts["testdir"], nested_file_relative_path) + if not os.path.exists(expected_file_path): + return False + else: + with open(expected_file_path, "r") as f: + return (f.read() == nested_file_text) + + mlflow.pyfunc.save_model(path=model_path, + artifacts={ + "testdir": directory_artifact_path + }, + python_model=ArtifactValidationModel(), + conda_env=_conda_env()) + + loaded_model = mlflow.pyfunc.load_pyfunc(model_uri=model_path) + assert loaded_model.predict(iris_data[0]) + + +@pytest.mark.large +def test_save_model_with_no_artifacts_does_not_produce_artifacts_dir(model_path): + mlflow.pyfunc.save_model(path=model_path, + python_model=ModuleScopedSklearnModel(predict_fn=None), + artifacts=None, + conda_env=_conda_env()) + + assert os.path.exists(model_path) + assert "artifacts" not in os.listdir(model_path) + pyfunc_conf = _get_flavor_configuration( + model_path=model_path, flavor_name=mlflow.pyfunc.FLAVOR_NAME) + assert mlflow.pyfunc.model.CONFIG_KEY_ARTIFACTS not in pyfunc_conf + + +@pytest.mark.large +def test_save_model_with_python_model_argument_of_invalid_type_raises_exeption(tmpdir): + with pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.save_model(path=os.path.join(str(tmpdir), "model1"), + python_model="not the right type") + assert "python_model` must be a subclass of `PythonModel`" in str(exc_info) + + with pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.save_model(path=os.path.join(str(tmpdir), "model2"), + python_model="not the right type") + assert "python_model` must be a subclass of `PythonModel`" in str(exc_info) + + +@pytest.mark.large +def test_save_model_with_unsupported_argument_combinations_throws_exception(model_path): + with pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.save_model(path=model_path, + artifacts={ + "artifact": "/path/to/artifact", + }, + python_model=None) + assert "Either `loader_module` or `python_model` must be specified" in str(exc_info) + + python_model = ModuleScopedSklearnModel(predict_fn=None) + loader_module = __name__ + with pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.save_model(path=model_path, + python_model=python_model, + loader_module=loader_module) + assert "The following sets of parameters cannot be specified together" in str(exc_info) + assert str(python_model) in str(exc_info) + assert str(loader_module) in str(exc_info) + + with pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.save_model(path=model_path, + python_model=python_model, + data_path="/path/to/data", + artifacts={ + "artifact": "/path/to/artifact", + }) + assert "The following sets of parameters cannot be specified together" in str(exc_info) + + with pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.save_model(path=model_path, + python_model=None, + loader_module=None) + assert "Either `loader_module` or `python_model` must be specified" in str(exc_info) + + +@pytest.mark.large +def test_log_model_with_unsupported_argument_combinations_throws_exception(): + with mlflow.start_run(), pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.log_model(artifact_path="pyfunc_model", + artifacts={ + "artifact": "/path/to/artifact", + }, + python_model=None) + assert "Either `loader_module` or `python_model` must be specified!" in str(exc_info) + + python_model = ModuleScopedSklearnModel(predict_fn=None) + loader_module = __name__ + with mlflow.start_run(), pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.log_model(artifact_path="pyfunc_model", + python_model=python_model, + loader_module=loader_module) + assert "The following sets of parameters cannot be specified together" in str(exc_info) + assert str(python_model) in str(exc_info) + assert str(loader_module) in str(exc_info) + + with mlflow.start_run(), pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.log_model(artifact_path="pyfunc_model", + python_model=python_model, + data_path="/path/to/data", + artifacts={ + "artifact1": "/path/to/artifact", + }) + assert "The following sets of parameters cannot be specified together" in str(exc_info) + + with mlflow.start_run(), pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.log_model(artifact_path="pyfunc_model", + python_model=None, + loader_module=None) + assert "Either `loader_module` or `python_model` must be specified" in str(exc_info) + + +@pytest.mark.large +def test_load_model_with_differing_cloudpickle_version_at_micro_granularity_logs_warning( + model_path): + class TestModel(mlflow.pyfunc.PythonModel): + def predict(self, context, model_input): + return model_input + + mlflow.pyfunc.save_model(path=model_path, python_model=TestModel()) + saver_cloudpickle_version = "0.5.8" + model_config_path = os.path.join(model_path, "MLmodel") + model_config = Model.load(model_config_path) + model_config.flavors[mlflow.pyfunc.FLAVOR_NAME][ + mlflow.pyfunc.model.CONFIG_KEY_CLOUDPICKLE_VERSION] = saver_cloudpickle_version + model_config.save(model_config_path) + + log_messages = [] + + def custom_warn(message_text, *args, **kwargs): + log_messages.append(message_text % args % kwargs) + + loader_cloudpickle_version = "0.5.7" + with mock.patch("mlflow.pyfunc._logger.warning") as warn_mock, \ + mock.patch("cloudpickle.__version__") as cloudpickle_version_mock: + cloudpickle_version_mock.__str__ = lambda *args, **kwargs: loader_cloudpickle_version + warn_mock.side_effect = custom_warn + mlflow.pyfunc.load_pyfunc(model_uri=model_path) + + assert any([ + "differs from the version of CloudPickle that is currently running" in log_message and + saver_cloudpickle_version in log_message and + loader_cloudpickle_version in log_message + for log_message in log_messages + ]) + + +@pytest.mark.large +def test_load_model_with_missing_cloudpickle_version_logs_warning( + model_path): + class TestModel(mlflow.pyfunc.PythonModel): + def predict(self, context, model_input): + return model_input + + mlflow.pyfunc.save_model(path=model_path, python_model=TestModel()) + model_config_path = os.path.join(model_path, "MLmodel") + model_config = Model.load(model_config_path) + del model_config.flavors[mlflow.pyfunc.FLAVOR_NAME][ + mlflow.pyfunc.model.CONFIG_KEY_CLOUDPICKLE_VERSION] + model_config.save(model_config_path) + + log_messages = [] + + def custom_warn(message_text, *args, **kwargs): + log_messages.append(message_text % args % kwargs) + + with mock.patch("mlflow.pyfunc._logger.warning") as warn_mock: + warn_mock.side_effect = custom_warn + mlflow.pyfunc.load_pyfunc(model_uri=model_path) + + assert any([ + ("The version of CloudPickle used to save the model could not be found in the MLmodel" + " configuration") in log_message + for log_message in log_messages + ]) + + +# TODO(czumar) Re-mark this test as "large" instead of "release" after SageMaker docker container +# build issues have been debugged +# @pytest.mark.large +@pytest.mark.release +def test_sagemaker_docker_model_scoring_with_default_conda_env( + sklearn_logreg_model, main_scoped_model_class, iris_data, tmpdir): + sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model") + mlflow.sklearn.save_model(sk_model=sklearn_logreg_model, path=sklearn_model_path) + + def test_predict(sk_model, model_input): + return sk_model.predict(model_input) * 2 + + pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model") + mlflow.pyfunc.save_model(path=pyfunc_model_path, + artifacts={ + "sk_model": sklearn_model_path + }, + python_model=main_scoped_model_class(test_predict), + conda_env=_conda_env()) + reloaded_pyfunc = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_path) + + inference_df = pd.DataFrame(iris_data[0]) + scoring_response = score_model_in_sagemaker_docker_container( + model_uri=pyfunc_model_path, + data=inference_df, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + flavor=mlflow.pyfunc.FLAVOR_NAME) + deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) + + pandas.testing.assert_frame_equal( + deployed_model_preds, + pd.DataFrame(reloaded_pyfunc.predict(inference_df)), + check_dtype=False, + check_less_precise=6) diff --git a/tests/pyfunc/test_model_export_with_loader_module_and_data_path.py b/tests/pyfunc/test_model_export_with_loader_module_and_data_path.py new file mode 100644 index 0000000000000..8825c9ffc78ee --- /dev/null +++ b/tests/pyfunc/test_model_export_with_loader_module_and_data_path.py @@ -0,0 +1,133 @@ +from __future__ import print_function + +import os +import pickle + +import numpy as np +import pytest +import six +import sklearn.datasets +import sklearn.linear_model +import sklearn.neighbors + +import mlflow +import mlflow.pyfunc +import mlflow.pyfunc.model +import mlflow.sklearn +from mlflow.exceptions import MlflowException +from mlflow.models import Model +from mlflow.tracking.artifact_utils import _download_artifact_from_uri + + +def _load_pyfunc(path): + with open(path, "rb") as f: + if six.PY2: + return pickle.load(f) + else: + return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg + + +@pytest.fixture(scope="module") +def iris_data(): + iris = sklearn.datasets.load_iris() + x = iris.data[:, :2] + y = iris.target + return x, y + + +@pytest.fixture(scope="module") +def sklearn_knn_model(iris_data): + x, y = iris_data + knn_model = sklearn.neighbors.KNeighborsClassifier() + knn_model.fit(x, y) + return knn_model + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(str(tmpdir), "model") + + +@pytest.mark.large +def test_model_save_load(sklearn_knn_model, iris_data, tmpdir, model_path): + sk_model_path = os.path.join(str(tmpdir), "knn.pkl") + with open(sk_model_path, "wb") as f: + pickle.dump(sklearn_knn_model, f) + + model_config = Model(run_id="test", artifact_path="testtest") + mlflow.pyfunc.save_model(path=model_path, + data_path=sk_model_path, + loader_module=os.path.basename(__file__)[:-3], + code_path=[__file__], + mlflow_model=model_config) + + reloaded_model_config = Model.load(os.path.join(model_path, "MLmodel")) + assert model_config.__dict__ == reloaded_model_config.__dict__ + assert mlflow.pyfunc.FLAVOR_NAME in reloaded_model_config.flavors + assert mlflow.pyfunc.PY_VERSION in reloaded_model_config.flavors[mlflow.pyfunc.FLAVOR_NAME] + reloaded_model = mlflow.pyfunc.load_pyfunc(model_path) + np.testing.assert_array_equal( + sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0])) + + +@pytest.mark.large +def test_model_log_load(sklearn_knn_model, iris_data, tmpdir): + sk_model_path = os.path.join(str(tmpdir), "knn.pkl") + with open(sk_model_path, "wb") as f: + pickle.dump(sklearn_knn_model, f) + + pyfunc_artifact_path = "pyfunc_model" + with mlflow.start_run(): + mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path, + data_path=sk_model_path, + loader_module=os.path.basename(__file__)[:-3], + code_path=[__file__]) + pyfunc_model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path)) + + model_config = Model.load(os.path.join(pyfunc_model_path, "MLmodel")) + assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors + assert mlflow.pyfunc.PY_VERSION in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME] + reloaded_model = mlflow.pyfunc.load_pyfunc(pyfunc_model_path) + np.testing.assert_array_equal( + sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0])) + + +@pytest.mark.large +def test_model_log_load_no_active_run(sklearn_knn_model, iris_data, tmpdir): + sk_model_path = os.path.join(str(tmpdir), "knn.pkl") + with open(sk_model_path, "wb") as f: + pickle.dump(sklearn_knn_model, f) + + pyfunc_artifact_path = "pyfunc_model" + assert mlflow.active_run() is None + mlflow.pyfunc.log_model(artifact_path=pyfunc_artifact_path, + data_path=sk_model_path, + loader_module=os.path.basename(__file__)[:-3], + code_path=[__file__]) + pyfunc_model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path)) + + model_config = Model.load(os.path.join(pyfunc_model_path, "MLmodel")) + assert mlflow.pyfunc.FLAVOR_NAME in model_config.flavors + assert mlflow.pyfunc.PY_VERSION in model_config.flavors[mlflow.pyfunc.FLAVOR_NAME] + reloaded_model = mlflow.pyfunc.load_pyfunc(pyfunc_model_path) + np.testing.assert_array_equal( + sklearn_knn_model.predict(iris_data[0]), reloaded_model.predict(iris_data[0])) + mlflow.end_run() + + +@pytest.mark.large +def test_save_model_with_unsupported_argument_combinations_throws_exception(model_path): + with pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.save_model(path=model_path, + data_path="/path/to/data") + assert "Either `loader_module` or `python_model` must be specified" in str(exc_info) + + +@pytest.mark.large +def test_log_model_with_unsupported_argument_combinations_throws_exception(): + with mlflow.start_run(), pytest.raises(MlflowException) as exc_info: + mlflow.pyfunc.log_model(artifact_path="pyfunc_model", + data_path="/path/to/data") + assert "Either `loader_module` or `python_model` must be specified" in str(exc_info) diff --git a/tests/pyfunc/test_scoring_server.py b/tests/pyfunc/test_scoring_server.py new file mode 100644 index 0000000000000..594db56dc1358 --- /dev/null +++ b/tests/pyfunc/test_scoring_server.py @@ -0,0 +1,265 @@ +import os +import json +import pandas as pd +import numpy as np +from collections import namedtuple, OrderedDict + +import pytest +import sklearn.datasets as datasets +import sklearn.neighbors as knn + +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server +import mlflow.sklearn +from mlflow.protos.databricks_pb2 import ErrorCode, MALFORMED_REQUEST, BAD_REQUEST + +from tests.helper_functions import pyfunc_serve_and_score_model, random_int, random_str + + +ModelWithData = namedtuple("ModelWithData", ["model", "inference_data"]) + + +@pytest.fixture(scope="session") +def sklearn_model(): + iris = datasets.load_iris() + X = iris.data[:, :2] # we only take the first two features. + y = iris.target + knn_model = knn.KNeighborsClassifier() + knn_model.fit(X, y) + return ModelWithData(model=knn_model, inference_data=X) + + +@pytest.fixture +def model_path(tmpdir): + return str(os.path.join(tmpdir.strpath, "model")) + + +@pytest.mark.large +def test_scoring_server_responds_to_invalid_json_input_with_stacktrace_and_error_code( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path) + + incorrect_json_content = json.dumps({"not": "a serialized dataframe"}) + response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=incorrect_json_content, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + response_json = json.loads(response.content) + assert "error_code" in response_json + assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST) + assert "message" in response_json + assert "stack_trace" in response_json + + +@pytest.mark.large +def test_scoring_server_responds_to_malformed_json_input_with_stacktrace_and_error_code( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path) + + malformed_json_content = "this is,,,, not valid json" + response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=malformed_json_content, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + response_json = json.loads(response.content) + assert "error_code" in response_json + assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST) + assert "message" in response_json + assert "stack_trace" in response_json + + +@pytest.mark.large +def test_scoring_server_responds_to_invalid_pandas_input_format_with_stacktrace_and_error_code( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path) + + # The pyfunc scoring server expects a serialized Pandas Dataframe in `split` or `records` + # format; passing a serialized Dataframe in `table` format should yield a readable error + pandas_table_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="table") + response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=pandas_table_content, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + response_json = json.loads(response.content) + assert "error_code" in response_json + assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST) + assert "message" in response_json + assert "stack_trace" in response_json + + +@pytest.mark.large +def test_scoring_server_responds_to_incompatible_inference_dataframe_with_stacktrace_and_error_code( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path) + incompatible_df = pd.DataFrame(np.array(range(10))) + + response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=incompatible_df, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + response_json = json.loads(response.content) + assert "error_code" in response_json + assert response_json["error_code"] == ErrorCode.Name(BAD_REQUEST) + assert "message" in response_json + assert "stack_trace" in response_json + + +@pytest.mark.large +def test_scoring_server_responds_to_invalid_csv_input_with_stacktrace_and_error_code( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path) + + # Any empty string is not valid pandas CSV + incorrect_csv_content = "" + response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=incorrect_csv_content, + content_type=pyfunc_scoring_server.CONTENT_TYPE_CSV) + response_json = json.loads(response.content) + assert "error_code" in response_json + assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST) + assert "message" in response_json + assert "stack_trace" in response_json + + +@pytest.mark.large +def test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_records_orientation( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path) + + pandas_record_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="records") + response_records_content_type = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=pandas_record_content, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED) + assert response_records_content_type.status_code == 200 + + +@pytest.mark.large +def test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_split_orientation( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path) + + pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="split") + response_default_content_type = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=pandas_split_content, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON) + assert response_default_content_type.status_code == 200 + + response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=pandas_split_content, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED) + assert response.status_code == 200 + + +@pytest.mark.large +def test_scoring_server_successfully_evaluates_correct_split_to_numpy( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path) + + pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="split") + response_records_content_type = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=pandas_split_content, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_NUMPY) + assert response_records_content_type.status_code == 200 + + +@pytest.mark.large +def test_scoring_server_responds_to_invalid_content_type_request_with_unsupported_content_type_code( + sklearn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path) + + pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="split") + response = pyfunc_serve_and_score_model( + model_uri=os.path.abspath(model_path), + data=pandas_split_content, + content_type="not_a_supported_content_type") + assert response.status_code == 415 + + +@pytest.mark.large +def test_parse_json_input_records_oriented(): + size = 20 + data = {"col_m": [random_int(0, 1000) for _ in range(size)], + "col_z": [random_str(4) for _ in range(size)], + "col_a": [random_int() for _ in range(size)]} + p1 = pd.DataFrame.from_dict(data) + p2 = pyfunc_scoring_server.parse_json_input(p1.to_json(orient="records"), orient="records") + # "records" orient may shuffle column ordering. Hence comparing each column Series + for col in data.keys(): + assert all(p1[col] == p2[col]) + + +@pytest.mark.large +def test_parse_json_input_split_oriented(): + size = 200 + data = {"col_m": [random_int(0, 1000) for _ in range(size)], + "col_z": [random_str(4) for _ in range(size)], + "col_a": [random_int() for _ in range(size)]} + p1 = pd.DataFrame.from_dict(data) + p2 = pyfunc_scoring_server.parse_json_input(p1.to_json(orient="split"), orient="split") + assert all(p1 == p2) + + +@pytest.mark.large +def test_parse_json_input_split_oriented_to_numpy_array(): + size = 200 + data = OrderedDict([("col_m", [random_int(0, 1000) for _ in range(size)]), + ("col_z", [random_str(4) for _ in range(size)]), + ("col_a", [random_int() for _ in range(size)])]) + p0 = pd.DataFrame.from_dict(data) + np_array = np.array([[a, b, c] for a, b, c in + zip(data['col_m'], data['col_z'], data['col_a'])], + dtype=object) + p1 = pd.DataFrame(np_array).infer_objects() + p2 = pyfunc_scoring_server.parse_split_oriented_json_input_to_numpy( + p0.to_json(orient="split")) + np.testing.assert_array_equal(p1, p2) + + +@pytest.mark.large +def test_records_oriented_json_to_df(): + # test that datatype for "zip" column is not converted to "int64" + jstr = '[' \ + '{"zip":"95120","cost":10.45,"score":8},' \ + '{"zip":"95128","cost":23.0,"score":0},' \ + '{"zip":"95128","cost":12.1,"score":10}' \ + ']' + df = pyfunc_scoring_server.parse_json_input(jstr, orient="records") + + assert set(df.columns) == {'zip', 'cost', 'score'} + assert set(str(dt) for dt in df.dtypes) == {'object', 'float64', 'int64'} + + +@pytest.mark.large +def test_split_oriented_json_to_df(): + # test that datatype for "zip" column is not converted to "int64" + jstr = '{"columns":["zip","cost","count"],"index":[0,1,2],' \ + '"data":[["95120",10.45,-8],["95128",23.0,-1],["95128",12.1,1000]]}' + df = pyfunc_scoring_server.parse_json_input(jstr, orient="split") + + assert set(df.columns) == {'zip', 'cost', 'count'} + assert set(str(dt) for dt in df.dtypes) == {'object', 'float64', 'int64'} + + +@pytest.mark.large +def test_split_oriented_json_to_numpy_array(): + # test that datatype for "zip" column is not converted to "int64" + jstr = '{"columns":["zip","cost","count"],"index":[0,1,2],' \ + '"data":[["95120",10.45,-8],["95128",23.0,-1],["95128",12.1,1000]]}' + df = pyfunc_scoring_server.parse_split_oriented_json_input_to_numpy(jstr) + + assert set(df.columns) == {'zip', 'cost', 'count'} + assert set(str(dt) for dt in df.dtypes) == {'object', 'float64', 'int64'} + + +def test_get_jsonnable_obj(): + from mlflow.pyfunc.scoring_server import _get_jsonable_obj + from mlflow.pyfunc.scoring_server import NumpyEncoder + py_ary = [["a", "b", "c"], ["e", "f", "g"]] + np_ary = _get_jsonable_obj(np.array(py_ary)) + assert json.dumps(py_ary, cls=NumpyEncoder) == json.dumps(np_ary, cls=NumpyEncoder) + np_ary = _get_jsonable_obj(np.array(py_ary, dtype=type(str))) + assert json.dumps(py_ary, cls=NumpyEncoder) == json.dumps(np_ary, cls=NumpyEncoder) diff --git a/tests/pyfunc/test_spark.py b/tests/pyfunc/test_spark.py index feade1166b5df..6d7e0e290f8fc 100644 --- a/tests/pyfunc/test_spark.py +++ b/tests/pyfunc/test_spark.py @@ -1,95 +1,147 @@ from __future__ import print_function import os -import shutil import sys -import tempfile -import unittest +import numpy as np import pandas as pd -import pyspark import pytest -import sklearn.datasets -from sklearn.neighbors import KNeighborsClassifier +import pyspark +from pyspark.sql.types import ArrayType, DoubleType, LongType, StringType, FloatType, IntegerType -from mlflow.pyfunc import load_pyfunc, spark_udf -from mlflow.pyfunc.spark_model_cache import SparkModelCache +import mlflow +import mlflow.pyfunc import mlflow.sklearn +from mlflow.pyfunc import spark_udf +from mlflow.pyfunc.spark_model_cache import SparkModelCache + +import tests +prediction = [int(1), int(2), "class1", float(0.1), 0.2] +types = [np.int32, np.int, np.str, np.float32, np.double] -def score_model_as_udf(model_path, run_id, pandas_df): - spark = pyspark.sql.SparkSession.builder \ - .config(key="spark.python.worker.reuse", value=True) \ - .master("local-cluster[2, 1, 1024]") \ + +def score_model_as_udf(model_uri, pandas_df, result_type="double"): + spark = pyspark.sql.SparkSession.builder\ + .config(key="spark.python.worker.reuse", value=True)\ + .master("local-cluster[2, 1, 1024]")\ .getOrCreate() spark_df = spark.createDataFrame(pandas_df) - pyfunc_udf = spark_udf(spark, model_path, run_id, result_type="double") + pyfunc_udf = spark_udf(spark=spark, model_uri=model_uri, result_type=result_type) new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns)) return [x['prediction'] for x in new_df.collect()] -class TestSparkUDFs(unittest.TestCase): - def setUp(self): - self._tmp = tempfile.mkdtemp("mlflow-spark-test", dir="/tmp") - # NB: local-cluster mode actually sets up 2 executors, each given 1 core - # and 1024 MB of memory. This is the best way to simulate pickling/serialization - # behavior to ensure it will work as expected on a real cluster. - self.spark = pyspark.sql.SparkSession.builder \ - .config(key="spark.python.worker.reuse", value=True) \ - .master("local-cluster[2, 1, 1024]") \ - .getOrCreate() - wine = sklearn.datasets.load_wine() - self._pandas_df = pd.DataFrame(wine.data[:, :11], columns=wine.feature_names[:11]) - - knn = KNeighborsClassifier() - knn.fit(self._pandas_df, wine.target) - self._model_path = os.path.join(self._tmp, "model") - mlflow.sklearn.save_model(knn, path=self._model_path) - self._predict = knn.predict(self._pandas_df) - - def tearDown(self): - shutil.rmtree(self._tmp) - - @pytest.mark.large - def test_spark_udf(self): - pandas_df = self._pandas_df - spark_df = self.spark.createDataFrame(pandas_df) - pyfunc_udf = spark_udf(self.spark, self._model_path, result_type="integer") - new_df = spark_df.withColumn("prediction", pyfunc_udf(*self._pandas_df.columns)) - spark_results = new_df.collect() - - # Compare against directly running the model. - direct_model = load_pyfunc(self._model_path) - pandas_results = direct_model.predict(pandas_df) - self.assertEqual(178, len(pandas_results)) - self.assertEqual(178, len(spark_results)) - for i in range(0, len(pandas_results)): # noqa - self.assertEqual(self._predict[i], pandas_results[i]) - self.assertEqual(pandas_results[i], spark_results[i]['prediction']) - - @pytest.mark.large - def test_model_cache(self): - archive_path = SparkModelCache.add_local_model(self.spark, self._model_path) - assert archive_path != self._model_path - - # Ensure we can use the model locally. - local_model = SparkModelCache.get_or_load(archive_path) - assert isinstance(local_model, KNeighborsClassifier) - - # Request the model on all executors, and see how many times we got cache hits. - def get_model(_): - model = SparkModelCache.get_or_load(archive_path) - assert isinstance(model, KNeighborsClassifier) - return SparkModelCache._cache_hits - - # This will run 30 distinct tasks, and we expect most to reuse an already-loaded model. - # Note that we can't necessarily expect an even split, or even that there were only - # exactly 2 python processes launched, due to Spark and its mysterious ways, but we do - # expect significant reuse. - results = self.spark.sparkContext.parallelize(range(0, 100), 30).map(get_model).collect() - - # TODO(tomas): Looks like spark does not reuse python workers with python==3.x - assert sys.version[0] == '3' or max(results) > 10 - # Running again should see no newly-loaded models. - results2 = self.spark.sparkContext.parallelize(range(0, 100), 30).map(get_model).collect() - assert sys.version[0] == '3' or min(results2) > 0 +class ConstantPyfuncWrapper(object): + @staticmethod + def predict(model_input): + m, _ = model_input.shape + prediction_df = pd.DataFrame(data={ + str(i): np.array([prediction[i] for j in range(m)], + dtype=types[i]) for i in range(len(prediction))}, + columns=[str(i) for i in range(len(prediction))]) + return prediction_df + + +def _load_pyfunc(_): + return ConstantPyfuncWrapper() + + +@pytest.fixture(autouse=True) +def configure_environment(): + os.environ["PYSPARK_PYTHON"] = sys.executable + + +@pytest.fixture +def spark(): + return pyspark.sql.SparkSession.builder\ + .config(key="spark.python.worker.reuse", value=True)\ + .master("local-cluster[2, 1, 1024]")\ + .getOrCreate() + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(str(tmpdir), "model") + + +@pytest.mark.large +def test_spark_udf(spark, model_path): + mlflow.pyfunc.save_model( + path=model_path, + loader_module=__name__, + code_path=[os.path.dirname(tests.__file__)], + ) + reloaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_path) + + pandas_df = pd.DataFrame(data=np.ones((10, 10)), columns=[str(i) for i in range(10)]) + spark_df = spark.createDataFrame(pandas_df) + + # Test all supported return types + type_map = {"float": (FloatType(), np.number), + "int": (IntegerType(), np.int32), + "double": (DoubleType(), np.number), + "long": (LongType(), np.int), + "string": (StringType(), None)} + + for tname, tdef in type_map.items(): + spark_type, np_type = tdef + prediction_df = reloaded_pyfunc_model.predict(pandas_df) + for is_array in [True, False]: + t = ArrayType(spark_type) if is_array else spark_type + if tname == "string": + expected = prediction_df.applymap(str) + else: + expected = prediction_df.select_dtypes(np_type) + if tname == "float": + expected = expected.astype(np.float32) + + expected = [list(row[1]) if is_array else row[1][0] for row in expected.iterrows()] + pyfunc_udf = spark_udf(spark, model_path, result_type=t) + new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns)) + actual = list(new_df.select("prediction").toPandas()['prediction']) + assert expected == actual + if not is_array: + pyfunc_udf = spark_udf(spark, model_path, result_type=tname) + new_df = spark_df.withColumn("prediction", pyfunc_udf(*pandas_df.columns)) + actual = list(new_df.select("prediction").toPandas()['prediction']) + assert expected == actual + + +@pytest.mark.large +def test_model_cache(spark, model_path): + mlflow.pyfunc.save_model( + path=model_path, + loader_module=__name__, + code_path=[os.path.dirname(tests.__file__)], + ) + + archive_path = SparkModelCache.add_local_model(spark, model_path) + assert archive_path != model_path + + # Ensure we can use the model locally. + local_model = SparkModelCache.get_or_load(archive_path) + assert isinstance(local_model, ConstantPyfuncWrapper) + + # Define the model class name as a string so that each Spark executor can reference it + # without attempting to resolve ConstantPyfuncWrapper, which is only available on the driver. + constant_model_name = ConstantPyfuncWrapper.__name__ + + # Request the model on all executors, and see how many times we got cache hits. + def get_model(_): + model = SparkModelCache.get_or_load(archive_path) + # NB: Can not use instanceof test as remote does not know about ConstantPyfuncWrapper class. + assert type(model).__name__ == constant_model_name + return SparkModelCache._cache_hits + + # This will run 30 distinct tasks, and we expect most to reuse an already-loaded model. + # Note that we can't necessarily expect an even split, or even that there were only + # exactly 2 python processes launched, due to Spark and its mysterious ways, but we do + # expect significant reuse. + results = spark.sparkContext.parallelize(range(0, 100), 30).map(get_model).collect() + + # TODO(tomas): Looks like spark does not reuse python workers with python==3.x + assert sys.version[0] == '3' or max(results) > 10 + # Running again should see no newly-loaded models. + results2 = spark.sparkContext.parallelize(range(0, 100), 30).map(get_model).collect() + assert sys.version[0] == '3' or min(results2) > 0 diff --git a/tests/pyfunc/test_utils.py b/tests/pyfunc/test_utils.py deleted file mode 100644 index 377ee4124f684..0000000000000 --- a/tests/pyfunc/test_utils.py +++ /dev/null @@ -1,44 +0,0 @@ -from __future__ import print_function - -import filecmp -import os - -from mlflow import pyfunc -from mlflow.utils.file_utils import TempDir - -# Tests for mlflow/pyfunc/__init__.py _copy_file_or_tree(src, dst, dst_dir) - - -# Tests whether copying a file works. -def test_file_copy(): - with TempDir() as tmp: - file_path = tmp.path("test_file.txt") - copy_path = tmp.path("test_dir1/") - os.mkdir(copy_path) - with open(file_path, 'a') as f: - f.write("testing") - pyfunc._copy_file_or_tree(file_path, copy_path, "") - assert filecmp.cmp(file_path, os.path.join(copy_path, "test_file.txt")) - - -# Tests whether creating a directory works. -def test_dir_create(): - with TempDir() as tmp: - file_path = tmp.path("test_file.txt") - create_dir = tmp.path("test_dir2/") - with open(file_path, 'a') as f: - f.write("testing") - name = pyfunc._copy_file_or_tree(file_path, file_path, create_dir) - assert filecmp.cmp(file_path, name) - - -# Tests whether copying a directory works. -def test_dir_copy(): - with TempDir() as tmp: - dir_path = tmp.path("test_dir1/") - copy_path = tmp.path("test_dir2") - os.mkdir(dir_path) - with open(os.path.join(dir_path, "test_file.txt"), 'a') as f: - f.write("testing") - pyfunc._copy_file_or_tree(dir_path, copy_path, "") - assert filecmp.dircmp(dir_path, copy_path) diff --git a/tests/pytorch/test_pytorch_model_export.py b/tests/pytorch/test_pytorch_model_export.py index e33afc42df5ac..e426911bdda0e 100644 --- a/tests/pytorch/test_pytorch_model_export.py +++ b/tests/pytorch/test_pytorch_model_export.py @@ -1,19 +1,49 @@ from __future__ import print_function -import pytest +import importlib +import os +import json +import logging +import mock +import pickle +import pytest import numpy as np import pandas as pd - +import pandas.testing import sklearn.datasets as datasets - import torch import torch.nn as nn +import yaml from torch.utils.data import DataLoader +import mlflow.pyfunc as pyfunc import mlflow.pytorch +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server from mlflow import tracking +from mlflow.exceptions import MlflowException +from mlflow.models import Model +from mlflow.pytorch import pickle_module as mlflow_pytorch_pickle_module +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env from mlflow.utils.file_utils import TempDir +from mlflow.utils.model_utils import _get_flavor_configuration + + +_logger = logging.getLogger(__name__) + +# This test suite is included as a code dependency when testing PyTorch model scoring in new +# processes and docker containers. In these environments, the `tests` module is not available. +# Therefore, we attempt to import from `tests` and gracefully emit a warning if it's unavailable. +try: + from tests.helper_functions import pyfunc_serve_and_score_model + from tests.helper_functions import score_model_in_sagemaker_docker_container + from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import + from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import +except ImportError: + _logger.warning( + "Failed to import test helper functions. Tests depending on these functions may fail!") @pytest.fixture(scope='module') @@ -33,25 +63,17 @@ def get_dataset(data): return dataset -@pytest.fixture(scope='module') -def model(data): +def train_model(model, data): dataset = get_dataset(data) - model = nn.Sequential( - nn.Linear(4, 3), - nn.ReLU(), - nn.Linear(3, 1), - ) - criterion = nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) - batch_size = 16 num_workers = 4 dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=True, drop_last=False) model.train() - for epoch in range(5): + for _ in range(5): for batch in dataloader: optimizer.zero_grad() batch_size = batch[0].shape[0] @@ -60,9 +82,83 @@ def model(data): loss.backward() optimizer.step() + +@pytest.fixture(scope='module') +def sequential_model(data): + model = nn.Sequential( + nn.Linear(4, 3), + nn.ReLU(), + nn.Linear(3, 1), + ) + + train_model(model=model, data=data) + return model + + +def get_subclassed_model_definition(): + """ + Defines a PyTorch model class that inherits from ``torch.nn.Module``. This method can be invoked + within a pytest fixture to define the model class in the ``__main__`` scope. Alternatively, it + can be invoked within a module to define the class in the module's scope. + """ + class SubclassedModel(torch.nn.Module): + + def __init__(self): + super(SubclassedModel, self).__init__() + self.linear = torch.nn.Linear(4, 1) + + def forward(self, x): + y_pred = self.linear(x) + return y_pred + + return SubclassedModel + + +@pytest.fixture(scope='module') +def main_scoped_subclassed_model(data): + """ + A custom PyTorch model inheriting from ``torch.nn.Module`` whose class is defined in the + "__main__" scope. + """ + model_class = get_subclassed_model_definition() + model = model_class() + train_model(model=model, data=data) + return model + + +class ModuleScopedSubclassedModel(get_subclassed_model_definition()): + """ + A custom PyTorch model class defined in the test module scope. This is a subclass of + ``torch.nn.Module``. + """ + + +@pytest.fixture(scope='module') +def module_scoped_subclassed_model(data): + """ + A custom PyTorch model inheriting from ``torch.nn.Module`` whose class is defined in the test + module scope. + """ + model = ModuleScopedSubclassedModel() + train_model(model=model, data=data) return model +@pytest.fixture +def model_path(tmpdir): + return os.path.join(str(tmpdir), "model") + + +@pytest.fixture +def pytorch_custom_env(tmpdir): + conda_env = os.path.join(str(tmpdir), "conda_env.yml") + _mlflow_conda_env( + conda_env, + additional_conda_deps=["pytorch", "torchvision", "pytest"], + additional_conda_channels=["pytorch"]) + return conda_env + + def _predict(model, data): dataset = get_dataset(data) batch_size = 16 @@ -79,12 +175,12 @@ def _predict(model, data): @pytest.fixture(scope='module') -def predicted(model, data): - return _predict(model, data) +def sequential_predicted(sequential_model, data): + return _predict(sequential_model, data) -def test_log_model(model, data, predicted): - +@pytest.mark.large +def test_log_model(sequential_model, data, sequential_predicted): old_uri = tracking.get_tracking_uri() # should_start_run tests whether or not calling log_model() automatically starts a run. for should_start_run in [False, True]: @@ -94,56 +190,497 @@ def test_log_model(model, data, predicted): if should_start_run: mlflow.start_run() - mlflow.pytorch.log_model(model, artifact_path="pytorch") + artifact_path = "pytorch" + mlflow.pytorch.log_model(sequential_model, artifact_path=artifact_path) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) # Load model - run_id = mlflow.active_run().info.run_uuid - model_loaded = mlflow.pytorch.load_model("pytorch", run_id=run_id) + sequential_model_loaded = mlflow.pytorch.load_model(model_uri=model_uri) - test_predictions = _predict(model_loaded, data) - assert np.all(test_predictions == predicted) + test_predictions = _predict(sequential_model_loaded, data) + np.testing.assert_array_equal(test_predictions, sequential_predicted) finally: mlflow.end_run() tracking.set_tracking_uri(old_uri) -def test_raise_exception(model): +@pytest.mark.large +def test_raise_exception(sequential_model): with TempDir(chdr=True, remove_on_exit=True) as tmp: path = tmp.path("model") - with pytest.raises(RuntimeError): + with pytest.raises(IOError): mlflow.pytorch.load_model(path) with pytest.raises(TypeError): mlflow.pytorch.save_model([1, 2, 3], path) - mlflow.pytorch.save_model(model, path) + mlflow.pytorch.save_model(sequential_model, path) with pytest.raises(RuntimeError): - mlflow.pytorch.save_model(model, path) + mlflow.pytorch.save_model(sequential_model, path) from mlflow import sklearn import sklearn.neighbors as knn - import pickle path = tmp.path("knn.pkl") knn = knn.KNeighborsClassifier() with open(path, "wb") as f: pickle.dump(knn, f) path = tmp.path("knn") sklearn.save_model(knn, path=path) - with pytest.raises(ValueError): + with pytest.raises(MlflowException): mlflow.pytorch.load_model(path) -def test_save_and_load_model(model, data, predicted): +@pytest.mark.large +def test_save_and_load_model(sequential_model, model_path, data, sequential_predicted): + mlflow.pytorch.save_model(sequential_model, model_path) - x, y = data - with TempDir(chdr=True, remove_on_exit=True) as tmp: - path = tmp.path("model") - mlflow.pytorch.save_model(model, path) + # Loading pytorch model + sequential_model_loaded = mlflow.pytorch.load_model(model_path) + np.testing.assert_array_equal(_predict(sequential_model_loaded, data), sequential_predicted) + + # Loading pyfunc model + pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path) + np.testing.assert_array_almost_equal( + pyfunc_loaded.predict(data[0]).values[:, 0], sequential_predicted, decimal=4) + + +@pytest.mark.large +def test_load_model_from_remote_uri_succeeds( + sequential_model, model_path, mock_s3_bucket, data, sequential_predicted): + mlflow.pytorch.save_model(sequential_model, model_path) + + artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) + artifact_path = "model" + artifact_repo = S3ArtifactRepository(artifact_root) + artifact_repo.log_artifacts(model_path, artifact_path=artifact_path) + + model_uri = artifact_root + "/" + artifact_path + sequential_model_loaded = mlflow.pytorch.load_model(model_uri=model_uri) + np.testing.assert_array_equal(_predict(sequential_model_loaded, data), sequential_predicted) - # Loading pytorch model - model_loaded = mlflow.pytorch.load_model(path) - assert np.all(_predict(model_loaded, data) == predicted) - # Loading pyfunc model - pyfunc_loaded = mlflow.pyfunc.load_pyfunc(path) - assert np.all(pyfunc_loaded.predict(x).values[:, 0] == predicted) +@pytest.mark.large +def test_model_save_persists_specified_conda_env_in_mlflow_model_directory( + sequential_model, model_path, pytorch_custom_env): + mlflow.pytorch.save_model( + pytorch_model=sequential_model, path=model_path, conda_env=pytorch_custom_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != pytorch_custom_env + + with open(pytorch_custom_env, "r") as f: + pytorch_custom_env_text = f.read() + with open(saved_conda_env_path, "r") as f: + saved_conda_env_text = f.read() + assert saved_conda_env_text == pytorch_custom_env_text + + +@pytest.mark.large +def test_model_save_accepts_conda_env_as_dict(sequential_model, model_path): + conda_env = dict(mlflow.pytorch.get_default_conda_env()) + conda_env["dependencies"].append("pytest") + mlflow.pytorch.save_model(pytorch_model=sequential_model, path=model_path, conda_env=conda_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == conda_env + + +@pytest.mark.large +def test_model_log_persists_specified_conda_env_in_mlflow_model_directory( + sequential_model, pytorch_custom_env): + artifact_path = "model" + with mlflow.start_run(): + mlflow.pytorch.log_model(pytorch_model=sequential_model, + artifact_path=artifact_path, + conda_env=pytorch_custom_env) + model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != pytorch_custom_env + + with open(pytorch_custom_env, "r") as f: + pytorch_custom_env_text = f.read() + with open(saved_conda_env_path, "r") as f: + saved_conda_env_text = f.read() + assert saved_conda_env_text == pytorch_custom_env_text + + +@pytest.mark.large +def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies( + sequential_model, model_path): + mlflow.pytorch.save_model(pytorch_model=sequential_model, path=model_path, conda_env=None) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.pytorch.get_default_conda_env() + + +@pytest.mark.large +def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies( + sequential_model): + artifact_path = "model" + with mlflow.start_run(): + mlflow.pytorch.log_model(pytorch_model=sequential_model, + artifact_path=artifact_path, + conda_env=None) + model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.pytorch.get_default_conda_env() + + +@pytest.mark.large +def test_load_model_with_differing_pytorch_version_logs_warning(sequential_model, model_path): + mlflow.pytorch.save_model(pytorch_model=sequential_model, path=model_path) + saver_pytorch_version = "1.0" + model_config_path = os.path.join(model_path, "MLmodel") + model_config = Model.load(model_config_path) + model_config.flavors[mlflow.pytorch.FLAVOR_NAME]["pytorch_version"] = saver_pytorch_version + model_config.save(model_config_path) + + log_messages = [] + + def custom_warn(message_text, *args, **kwargs): + log_messages.append(message_text % args % kwargs) + + loader_pytorch_version = "0.8.2" + with mock.patch("mlflow.pytorch._logger.warning") as warn_mock,\ + mock.patch("torch.__version__") as torch_version_mock: + torch_version_mock.__str__ = lambda *args, **kwargs: loader_pytorch_version + warn_mock.side_effect = custom_warn + mlflow.pytorch.load_model(model_uri=model_path) + + assert any([ + "does not match installed PyTorch version" in log_message and + saver_pytorch_version in log_message and + loader_pytorch_version in log_message + for log_message in log_messages + ]) + + +@pytest.mark.large +def test_pyfunc_model_serving_with_module_scoped_subclassed_model_and_default_conda_env( + module_scoped_subclassed_model, model_path, data): + mlflow.pytorch.save_model( + path=model_path, + pytorch_model=module_scoped_subclassed_model, + conda_env=None, + code_paths=[__file__]) + + scoring_response = pyfunc_serve_and_score_model( + model_uri=model_path, + data=data[0], + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + extra_args=["--no-conda"]) + assert scoring_response.status_code == 200 + + deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) + np.testing.assert_array_almost_equal( + deployed_model_preds.values[:, 0], + _predict(model=module_scoped_subclassed_model, data=data), + decimal=4) + + +@pytest.mark.large +def test_pyfunc_model_serving_with_main_scoped_subclassed_model_and_custom_pickle_module( + main_scoped_subclassed_model, model_path, data): + mlflow.pytorch.save_model( + path=model_path, + pytorch_model=main_scoped_subclassed_model, + conda_env=None, + pickle_module=mlflow_pytorch_pickle_module) + + scoring_response = pyfunc_serve_and_score_model( + model_uri=model_path, + data=data[0], + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + extra_args=["--no-conda"]) + assert scoring_response.status_code == 200 + + deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) + np.testing.assert_array_almost_equal( + deployed_model_preds.values[:, 0], + _predict(model=main_scoped_subclassed_model, data=data), + decimal=4) + + +@pytest.mark.large +def test_load_model_succeeds_with_dependencies_specified_via_code_paths( + module_scoped_subclassed_model, model_path, data): + # Save a PyTorch model whose class is defined in the current test suite. Because the + # `tests` module is not available when the model is deployed for local scoring, we include + # the test suite file as a code dependency + mlflow.pytorch.save_model( + path=model_path, + pytorch_model=module_scoped_subclassed_model, + conda_env=None, + code_paths=[__file__]) + + # Define a custom pyfunc model that loads a PyTorch model artifact using + # `mlflow.pytorch.load_model` + class TorchValidatorModel(pyfunc.PythonModel): + + def load_context(self, context): + self.pytorch_model = mlflow.pytorch.load_model(context.artifacts["pytorch_model"]) + + def predict(self, context, model_input): + with torch.no_grad(): + input_tensor = torch.from_numpy(model_input.values.astype(np.float32)) + output_tensor = self.pytorch_model(input_tensor) + return pd.DataFrame(output_tensor.numpy()) + + pyfunc_artifact_path = "pyfunc_model" + with mlflow.start_run(): + pyfunc.log_model(artifact_path=pyfunc_artifact_path, + python_model=TorchValidatorModel(), + artifacts={ + "pytorch_model": model_path, + }) + pyfunc_model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path)) + + # Deploy the custom pyfunc model and ensure that it is able to successfully load its + # constituent PyTorch model via `mlflow.pytorch.load_model` + scoring_response = pyfunc_serve_and_score_model( + model_uri=pyfunc_model_path, + data=data[0], + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + extra_args=["--no-conda"]) + assert scoring_response.status_code == 200 + + deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) + np.testing.assert_array_almost_equal( + deployed_model_preds.values[:, 0], + _predict(model=module_scoped_subclassed_model, data=data), + decimal=4) + + +@pytest.mark.large +def test_load_pyfunc_loads_torch_model_using_pickle_module_specified_at_save_time( + module_scoped_subclassed_model, model_path): + custom_pickle_module = pickle + + mlflow.pytorch.save_model( + path=model_path, + pytorch_model=module_scoped_subclassed_model, + conda_env=None, + pickle_module=custom_pickle_module) + + import_module_fn = importlib.import_module + imported_modules = [] + + def track_module_imports(module_name): + imported_modules.append(module_name) + return import_module_fn(module_name) + + with mock.patch("importlib.import_module") as import_mock,\ + mock.patch("torch.load") as torch_load_mock: + import_mock.side_effect = track_module_imports + pyfunc.load_pyfunc(model_path) + + torch_load_mock.assert_called_with(mock.ANY, pickle_module=custom_pickle_module) + assert custom_pickle_module.__name__ in imported_modules + + +@pytest.mark.large +def test_load_model_loads_torch_model_using_pickle_module_specified_at_save_time( + module_scoped_subclassed_model): + custom_pickle_module = pickle + + artifact_path = "pytorch_model" + with mlflow.start_run(): + mlflow.pytorch.log_model( + artifact_path=artifact_path, + pytorch_model=module_scoped_subclassed_model, + conda_env=None, + pickle_module=custom_pickle_module) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + import_module_fn = importlib.import_module + imported_modules = [] + + def track_module_imports(module_name): + imported_modules.append(module_name) + return import_module_fn(module_name) + + with mock.patch("importlib.import_module") as import_mock,\ + mock.patch("torch.load") as torch_load_mock: + import_mock.side_effect = track_module_imports + pyfunc.load_pyfunc(model_uri=model_uri) + + torch_load_mock.assert_called_with(mock.ANY, pickle_module=custom_pickle_module) + assert custom_pickle_module.__name__ in imported_modules + + +@pytest.mark.large +def test_load_pyfunc_succeeds_when_data_is_model_file_instead_of_directory( + module_scoped_subclassed_model, model_path, data): + """ + This test verifies that PyTorch models saved in older versions of MLflow are loaded successfully + by ``mlflow.pytorch.load_model``. The ``data`` path associated with these older models is + serialized PyTorch model file, as opposed to the current format: a directory containing a + serialized model file and pickle module information. + """ + mlflow.pytorch.save_model( + path=model_path, + pytorch_model=module_scoped_subclassed_model, + conda_env=None) + + model_conf_path = os.path.join(model_path, "MLmodel") + model_conf = Model.load(model_conf_path) + pyfunc_conf = model_conf.flavors.get(pyfunc.FLAVOR_NAME) + assert pyfunc_conf is not None + model_data_path = os.path.join(model_path, pyfunc_conf[pyfunc.DATA]) + assert os.path.exists(model_data_path) + assert mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME in os.listdir(model_data_path) + pyfunc_conf[pyfunc.DATA] = os.path.join( + model_data_path, mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME) + model_conf.save(model_conf_path) + + loaded_pyfunc = pyfunc.load_pyfunc(model_path) + + np.testing.assert_array_almost_equal( + loaded_pyfunc.predict(data[0]), + pd.DataFrame(_predict(model=module_scoped_subclassed_model, data=data)), + decimal=4) + + +@pytest.mark.large +def test_load_model_succeeds_when_data_is_model_file_instead_of_directory( + module_scoped_subclassed_model, model_path, data): + """ + This test verifies that PyTorch models saved in older versions of MLflow are loaded successfully + by ``mlflow.pytorch.load_model``. The ``data`` path associated with these older models is + serialized PyTorch model file, as opposed to the current format: a directory containing a + serialized model file and pickle module information. + """ + artifact_path = "pytorch_model" + with mlflow.start_run(): + mlflow.pytorch.log_model( + artifact_path=artifact_path, + pytorch_model=module_scoped_subclassed_model, + conda_env=None) + model_path = _download_artifact_from_uri("runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)) + + model_conf_path = os.path.join(model_path, "MLmodel") + model_conf = Model.load(model_conf_path) + pyfunc_conf = model_conf.flavors.get(pyfunc.FLAVOR_NAME) + assert pyfunc_conf is not None + model_data_path = os.path.join(model_path, pyfunc_conf[pyfunc.DATA]) + assert os.path.exists(model_data_path) + assert mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME in os.listdir(model_data_path) + pyfunc_conf[pyfunc.DATA] = os.path.join( + model_data_path, mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME) + model_conf.save(model_conf_path) + + loaded_pyfunc = pyfunc.load_pyfunc(model_path) + + np.testing.assert_array_almost_equal( + loaded_pyfunc.predict(data[0]), + pd.DataFrame(_predict(model=module_scoped_subclassed_model, data=data)), + decimal=4) + + +@pytest.mark.large +def test_load_model_allows_user_to_override_pickle_module_via_keyword_argument( + module_scoped_subclassed_model, model_path): + mlflow.pytorch.save_model( + path=model_path, + pytorch_model=module_scoped_subclassed_model, + conda_env=None, + pickle_module=pickle) + + mlflow_torch_pickle_load = mlflow_pytorch_pickle_module.load + pickle_call_results = { + "mlflow_torch_pickle_load_called": False, + } + + def validate_mlflow_torch_pickle_load_called(*args, **kwargs): + pickle_call_results["mlflow_torch_pickle_load_called"] = True + return mlflow_torch_pickle_load(*args, **kwargs) + + log_messages = [] + + def custom_warn(message_text, *args, **kwargs): + log_messages.append(message_text % args % kwargs) + + with mock.patch("mlflow.pytorch.pickle_module.load") as mlflow_torch_pickle_load_mock,\ + mock.patch("mlflow.pytorch._logger.warning") as warn_mock: + mlflow_torch_pickle_load_mock.side_effect = validate_mlflow_torch_pickle_load_called + warn_mock.side_effect = custom_warn + mlflow.pytorch.load_model(model_uri=model_path, pickle_module=mlflow_pytorch_pickle_module) + + assert all(pickle_call_results.values()) + assert any([ + "does not match the pickle module that was used to save the model" in log_message and + pickle.__name__ in log_message and + mlflow_pytorch_pickle_module.__name__ in log_message + for log_message in log_messages + ]) + + +@pytest.mark.large +def test_load_model_raises_exception_when_pickle_module_cannot_be_imported( + main_scoped_subclassed_model, model_path): + mlflow.pytorch.save_model( + path=model_path, + pytorch_model=main_scoped_subclassed_model, + conda_env=None) + + bad_pickle_module_name = "not.a.real.module" + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + model_data_path = os.path.join(model_path, pyfunc_conf[pyfunc.DATA]) + assert os.path.exists(model_data_path) + assert mlflow.pytorch._PICKLE_MODULE_INFO_FILE_NAME in os.listdir(model_data_path) + with open( + os.path.join(model_data_path, mlflow.pytorch._PICKLE_MODULE_INFO_FILE_NAME), "w") as f: + f.write(bad_pickle_module_name) + + with pytest.raises(MlflowException) as exc_info: + mlflow.pytorch.load_model(model_uri=model_path) + + assert "Failed to import the pickle module" in str(exc_info) + assert bad_pickle_module_name in str(exc_info) + + +@pytest.mark.release +def test_sagemaker_docker_model_scoring_with_sequential_model_and_default_conda_env( + model, model_path, data, sequential_predicted): + mlflow.pytorch.save_model(pytorch_model=model, path=model_path, conda_env=None) + + scoring_response = score_model_in_sagemaker_docker_container( + model_uri=model_path, + data=data[0], + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + flavor=mlflow.pyfunc.FLAVOR_NAME, + activity_polling_timeout_seconds=360) + deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) + + np.testing.assert_array_almost_equal( + deployed_model_preds.values[:, 0], + sequential_predicted, + decimal=4) diff --git a/tests/resources/__init__.py b/tests/resources/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/resources/db/__init__.py b/tests/resources/db/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/resources/db/initial_models.py b/tests/resources/db/initial_models.py new file mode 100644 index 0000000000000..0b7e6ae9cfa42 --- /dev/null +++ b/tests/resources/db/initial_models.py @@ -0,0 +1,236 @@ +# Snapshot of MLflow DB models as of the 0.9.1 release, prior to the first database migration. +# This file corresponds to the first database schema that we can reasonably expect users to be +# running and exists to test that the oldest database schema can be brought up-to-date. +# Copied from https://github.com/mlflow/mlflow/blob/v0.9.1/mlflow/store/dbmodels/models.py, with +# modifications to substitute constants from MLflow with hard-coded values (e.g. replacing +# SourceType.to_string(SourceType.NOTEBOOK) with the constant "NOTEBOOK"). +import time +from sqlalchemy.orm import relationship, backref +from sqlalchemy import ( + Column, String, Float, ForeignKey, Integer, CheckConstraint, + BigInteger, PrimaryKeyConstraint) +from sqlalchemy.ext.declarative import declarative_base + +Base = declarative_base() + + +SourceTypes = [ + "NOTEBOOK", + "JOB", + "LOCAL", + "UNKNOWN", + "PROJECT", +] + +RunStatusTypes = [ + "SCHEDULED", + "FAILED", + "FINISHED", + "RUNNING", +] + + +class SqlExperiment(Base): + """ + DB model for :py:class:`mlflow.entities.Experiment`. These are recorded in ``experiment`` table. + """ + __tablename__ = 'experiments' + + experiment_id = Column(Integer, autoincrement=True) + """ + Experiment ID: `Integer`. *Primary Key* for ``experiment`` table. + """ + name = Column(String(256), unique=True, nullable=False) + """ + Experiment name: `String` (limit 256 characters). Defined as *Unique* and *Non null* in + table schema. + """ + artifact_location = Column(String(256), nullable=True) + """ + Default artifact location for this experiment: `String` (limit 256 characters). Defined as + *Non null* in table schema. + """ + lifecycle_stage = Column(String(32), default="active") + """ + Lifecycle Stage of experiment: `String` (limit 32 characters). + Can be either ``active`` (default) or ``deleted``. + """ + + __table_args__ = ( + CheckConstraint( + lifecycle_stage.in_(["active", "deleted"]), + name='lifecycle_stage'), + PrimaryKeyConstraint('experiment_id', name='experiment_pk') + ) + + def __repr__(self): + return ''.format(self.experiment_id, self.name) + + +class SqlRun(Base): + """ + DB model for :py:class:`mlflow.entities.Run`. These are recorded in ``runs`` table. + """ + __tablename__ = 'runs' + + run_uuid = Column(String(32), nullable=False) + """ + Run UUID: `String` (limit 32 characters). *Primary Key* for ``runs`` table. + """ + name = Column(String(250)) + """ + Run name: `String` (limit 250 characters). + """ + source_type = Column(String(20), default="LOCAL") + """ + Source Type: `String` (limit 20 characters). Can be one of ``NOTEBOOK``, ``JOB``, ``PROJECT``, + ``LOCAL`` (default), or ``UNKNOWN``. + """ + source_name = Column(String(500)) + """ + Name of source recording the run: `String` (limit 500 characters). + """ + entry_point_name = Column(String(50)) + """ + Entry-point name that launched the run run: `String` (limit 50 characters). + """ + user_id = Column(String(256), nullable=True, default=None) + """ + User ID: `String` (limit 256 characters). Defaults to ``null``. + """ + status = Column(String(20), default="SCHEDULED") + """ + Run Status: `String` (limit 20 characters). Can be one of ``RUNNING``, ``SCHEDULED`` (default), + ``FINISHED``, ``FAILED``. + """ + start_time = Column(BigInteger, default=int(time.time())) + """ + Run start time: `BigInteger`. Defaults to current system time. + """ + end_time = Column(BigInteger, nullable=True, default=None) + """ + Run end time: `BigInteger`. + """ + source_version = Column(String(50)) + """ + Source version: `String` (limit 50 characters). + """ + lifecycle_stage = Column(String(20), default="active") + """ + Lifecycle Stage of run: `String` (limit 32 characters). + Can be either ``active`` (default) or ``deleted``. + """ + artifact_uri = Column(String(200), default=None) + """ + Default artifact location for this run: `String` (limit 200 characters). + """ + experiment_id = Column(Integer, ForeignKey('experiments.experiment_id')) + """ + Experiment ID to which this run belongs to: *Foreign Key* into ``experiment`` table. + """ + experiment = relationship('SqlExperiment', backref=backref('runs', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlExperiment`. + """ + + __table_args__ = ( + CheckConstraint(source_type.in_(SourceTypes), name='source_type'), + CheckConstraint(status.in_(RunStatusTypes), name='status'), + CheckConstraint(lifecycle_stage.in_(["active", "deleted"]), + name='lifecycle_stage'), + PrimaryKeyConstraint('run_uuid', name='run_pk') + ) + + +class SqlTag(Base): + """ + DB model for :py:class:`mlflow.entities.RunTag`. These are recorded in ``tags`` table. + """ + __tablename__ = 'tags' + + key = Column(String(250)) + """ + Tag key: `String` (limit 250 characters). *Primary Key* for ``tags`` table. + """ + value = Column(String(250), nullable=True) + """ + Value associated with tag: `String` (limit 250 characters). Could be *null*. + """ + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + """ + Run UUID to which this tag belongs to: *Foreign Key* into ``runs`` table. + """ + run = relationship('SqlRun', backref=backref('tags', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`. + """ + + __table_args__ = ( + PrimaryKeyConstraint('key', 'run_uuid', name='tag_pk'), + ) + + def __repr__(self): + return ''.format(self.key, self.value) + + +class SqlMetric(Base): + __tablename__ = 'metrics' + + key = Column(String(250)) + """ + Metric key: `String` (limit 250 characters). Part of *Primary Key* for ``metrics`` table. + """ + value = Column(Float, nullable=False) + """ + Metric value: `Float`. Defined as *Non-null* in schema. + """ + timestamp = Column(BigInteger, default=lambda: int(time.time())) + """ + Timestamp recorded for this metric entry: `BigInteger`. Part of *Primary Key* for + ``metrics`` table. + """ + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + """ + Run UUID to which this metric belongs to: Part of *Primary Key* for ``metrics`` table. + *Foreign Key* into ``runs`` table. + """ + run = relationship('SqlRun', backref=backref('metrics', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`. + """ + + __table_args__ = ( + PrimaryKeyConstraint('key', 'timestamp', 'run_uuid', name='metric_pk'), + ) + + def __repr__(self): + return ''.format(self.key, self.value, self.timestamp) + + +class SqlParam(Base): + __tablename__ = 'params' + + key = Column(String(250)) + """ + Param key: `String` (limit 250 characters). Part of *Primary Key* for ``params`` table. + """ + value = Column(String(250), nullable=False) + """ + Param value: `String` (limit 250 characters). Defined as *Non-null* in schema. + """ + run_uuid = Column(String(32), ForeignKey('runs.run_uuid')) + """ + Run UUID to which this metric belongs to: Part of *Primary Key* for ``params`` table. + *Foreign Key* into ``runs`` table. + """ + run = relationship('SqlRun', backref=backref('params', cascade='all')) + """ + SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`. + """ + + __table_args__ = ( + PrimaryKeyConstraint('key', 'run_uuid', name='param_pk'), + ) + + def __repr__(self): + return ''.format(self.key, self.value) diff --git a/tests/resources/db/latest_schema.sql b/tests/resources/db/latest_schema.sql new file mode 100644 index 0000000000000..a4a09255757d4 --- /dev/null +++ b/tests/resources/db/latest_schema.sql @@ -0,0 +1,68 @@ + +CREATE TABLE alembic_version ( + version_num VARCHAR(32) NOT NULL, + CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num) +) + + +CREATE TABLE experiments ( + experiment_id INTEGER NOT NULL, + name VARCHAR(256) NOT NULL, + artifact_location VARCHAR(256), + lifecycle_stage VARCHAR(32), + CONSTRAINT experiment_pk PRIMARY KEY (experiment_id), + UNIQUE (name), + CONSTRAINT experiments_lifecycle_stage CHECK (lifecycle_stage IN ('active', 'deleted')) +) + + +CREATE TABLE runs ( + run_uuid VARCHAR(32) NOT NULL, + name VARCHAR(250), + source_type VARCHAR(20), + source_name VARCHAR(500), + entry_point_name VARCHAR(50), + user_id VARCHAR(256), + status VARCHAR(20), + start_time BIGINT, + end_time BIGINT, + source_version VARCHAR(50), + lifecycle_stage VARCHAR(20), + artifact_uri VARCHAR(200), + experiment_id INTEGER, + CONSTRAINT run_pk PRIMARY KEY (run_uuid), + FOREIGN KEY(experiment_id) REFERENCES experiments (experiment_id), + CONSTRAINT status CHECK (status IN ('SCHEDULED', 'FAILED', 'FINISHED', 'RUNNING')), + CONSTRAINT source_type CHECK (source_type IN ('NOTEBOOK', 'JOB', 'LOCAL', 'UNKNOWN', 'PROJECT')), + CONSTRAINT runs_lifecycle_stage CHECK (lifecycle_stage IN ('active', 'deleted')) +) + + +CREATE TABLE metrics ( + key VARCHAR(250) NOT NULL, + value FLOAT NOT NULL, + timestamp BIGINT NOT NULL, + run_uuid VARCHAR(32) NOT NULL, + step BIGINT DEFAULT '0' NOT NULL, + CONSTRAINT metric_pk PRIMARY KEY (key, value, timestamp, run_uuid, step), + FOREIGN KEY(run_uuid) REFERENCES runs (run_uuid) +) + + +CREATE TABLE params ( + key VARCHAR(250) NOT NULL, + value VARCHAR(250) NOT NULL, + run_uuid VARCHAR(32) NOT NULL, + CONSTRAINT param_pk PRIMARY KEY (key, run_uuid), + FOREIGN KEY(run_uuid) REFERENCES runs (run_uuid) +) + + +CREATE TABLE tags ( + key VARCHAR(250) NOT NULL, + value VARCHAR(250), + run_uuid VARCHAR(32) NOT NULL, + CONSTRAINT tag_pk PRIMARY KEY (key, run_uuid), + FOREIGN KEY(run_uuid) REFERENCES runs (run_uuid) +) + diff --git a/tests/resources/example_docker_project/Dockerfile b/tests/resources/example_docker_project/Dockerfile new file mode 100644 index 0000000000000..e0e3738b05249 --- /dev/null +++ b/tests/resources/example_docker_project/Dockerfile @@ -0,0 +1,3 @@ +FROM continuumio/miniconda3:4.6.14 + +RUN pip install mlflow && pip install sqlalchemy diff --git a/tests/resources/example_docker_project/MLproject b/tests/resources/example_docker_project/MLproject new file mode 100644 index 0000000000000..7284b5a60dba8 --- /dev/null +++ b/tests/resources/example_docker_project/MLproject @@ -0,0 +1,12 @@ +name: docker-example + +docker_env: + image: mlflow-docker-example + +entry_points: + main: + command: "echo 'Main entry point'" + test_tracking: + parameters: + use_start_run: bool + command: "python scripts/docker_tracking_test.py {use_start_run}" diff --git a/tests/resources/example_docker_project/kubernetes_config.json b/tests/resources/example_docker_project/kubernetes_config.json new file mode 100644 index 0000000000000..a8b44709e41c2 --- /dev/null +++ b/tests/resources/example_docker_project/kubernetes_config.json @@ -0,0 +1,5 @@ +{ + "kube-context": "docker-for-desktop", + "kube-job-template-path": "examples/docker/kubernetes_job_template.yaml", + "repository-uri": "username/mlflow-kubernetes-example" +} diff --git a/tests/resources/example_docker_project/scripts/__init__.py b/tests/resources/example_docker_project/scripts/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/resources/example_docker_project/scripts/docker_tracking_test.py b/tests/resources/example_docker_project/scripts/docker_tracking_test.py new file mode 100644 index 0000000000000..c5a8bc7976f96 --- /dev/null +++ b/tests/resources/example_docker_project/scripts/docker_tracking_test.py @@ -0,0 +1,23 @@ +""" Example script that calls tracking APIs within / outside of a start_run() block. """ +import mlflow +import os +import sys + + +def call_tracking_apis(): + mlflow.log_metric("some_key", 3) + + +def main(use_start_run): + + if use_start_run: + print("Running with start_run API") + with mlflow.start_run(): + call_tracking_apis() + else: + print("Running without start_run API") + call_tracking_apis() + + +if __name__ == "__main__": + main(use_start_run=int(sys.argv[1])) diff --git a/tests/resources/example_project/conda.yaml b/tests/resources/example_project/conda.yaml index a8679489271fa..cb29b86499c66 100644 --- a/tests/resources/example_project/conda.yaml +++ b/tests/resources/example_project/conda.yaml @@ -2,7 +2,6 @@ # conda.yaml files in the test environment. name: tutorial channels: - - anaconda - defaults dependencies: - python=3.6 diff --git a/tests/resources/mlflow-test-plugin/mlflow_test_plugin/__init__.py b/tests/resources/mlflow-test-plugin/mlflow_test_plugin/__init__.py new file mode 100644 index 0000000000000..2e8c3404ccad6 --- /dev/null +++ b/tests/resources/mlflow-test-plugin/mlflow_test_plugin/__init__.py @@ -0,0 +1,29 @@ +from six.moves import urllib + +from mlflow.store.file_store import FileStore +from mlflow.store.local_artifact_repo import LocalArtifactRepository +from mlflow.tracking.context.abstract_context import RunContextProvider + + +class PluginFileStore(FileStore): + """FileStore provided through entrypoints system""" + + def __init__(self, store_uri=None, artifact_uri=None): + path = urllib.parse.urlparse(store_uri).path if store_uri else None + self.is_plugin = True + super(PluginFileStore, self).__init__(path, artifact_uri) + + +class PluginLocalArtifactRepository(LocalArtifactRepository): + """LocalArtifactRepository provided through plugin system""" + is_plugin = True + + +class PluginRunContextProvider(RunContextProvider): + """RunContextProvider provided through plugin system""" + + def in_context(self): + return False + + def tags(self): + return {"test": "tag"} diff --git a/tests/resources/mlflow-test-plugin/setup.py b/tests/resources/mlflow-test-plugin/setup.py new file mode 100644 index 0000000000000..3f8bd12b09ec3 --- /dev/null +++ b/tests/resources/mlflow-test-plugin/setup.py @@ -0,0 +1,16 @@ +from setuptools import setup, find_packages + + +setup( + name="mflow-test-plugin", + version="0.0.1", + description="Test plugin for MLflow", + packages=find_packages(), + install_requires=["mlflow"], + entry_points={ + "mlflow.tracking_store": "file-plugin=mlflow_test_plugin:PluginFileStore", + "mlflow.artifact_repository": + "file-plugin=mlflow_test_plugin:PluginLocalArtifactRepository", + "mlflow.run_context_provider": "unused=mlflow_test_plugin:PluginRunContextProvider" + }, +) diff --git a/tests/sagemaker/mock/__init__.py b/tests/sagemaker/mock/__init__.py new file mode 100644 index 0000000000000..b46c76e7f58b4 --- /dev/null +++ b/tests/sagemaker/mock/__init__.py @@ -0,0 +1,712 @@ +from __future__ import absolute_import + +import time +import json +from collections import namedtuple +from datetime import datetime + +from moto.core import BaseBackend, BaseModel +from moto.core.responses import BaseResponse +from moto.ec2 import ec2_backends + +from moto.iam.models import ACCOUNT_ID +from moto.core.models import base_decorator, deprecated_base_decorator + +SageMakerResourceWithArn = namedtuple("SageMakerResourceWithArn", ["resource", "arn"]) + + +class SageMakerResponse(BaseResponse): + """ + A collection of handlers for SageMaker API calls that produce API-conforming + JSON responses. + """ + + @property + def sagemaker_backend(self): + return sagemaker_backends[self.region] + + @property + def request_params(self): + return json.loads(self.body) + + def create_endpoint_config(self): + """ + Handler for the SageMaker "CreateEndpointConfig" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html. + """ + config_name = self.request_params["EndpointConfigName"] + production_variants = self.request_params.get("ProductionVariants") + tags = self.request_params.get("Tags", []) + new_config = self.sagemaker_backend.create_endpoint_config( + config_name=config_name, production_variants=production_variants, tags=tags, + region_name=self.region) + return json.dumps({ + 'EndpointConfigArn': new_config.arn + }) + + def describe_endpoint_config(self): + """ + Handler for the SageMaker "DescribeEndpoint" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html. + """ + config_name = self.request_params["EndpointConfigName"] + config_description = self.sagemaker_backend.describe_endpoint_config(config_name) + return json.dumps(config_description.response_object) + + def delete_endpoint_config(self): + """ + Handler for the SageMaker "DeleteEndpointConfig" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DeleteEndpointConfig.html. + """ + config_name = self.request_params["EndpointConfigName"] + self.sagemaker_backend.delete_endpoint_config(config_name) + return "" + + def create_endpoint(self): + """ + Handler for the SageMaker "CreateEndpoint" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html. + """ + endpoint_name = self.request_params["EndpointName"] + endpoint_config_name = self.request_params["EndpointConfigName"] + tags = self.request_params.get("Tags", []) + new_endpoint = self.sagemaker_backend.create_endpoint( + endpoint_name=endpoint_name, + endpoint_config_name=endpoint_config_name, + tags=tags, region_name=self.region) + return json.dumps({ + 'EndpointArn': new_endpoint.arn + }) + + def describe_endpoint(self): + """ + Handler for the SageMaker "DescribeEndpoint" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html. + """ + endpoint_name = self.request_params["EndpointName"] + endpoint_description = self.sagemaker_backend.describe_endpoint(endpoint_name) + return json.dumps(endpoint_description.response_object) + + def update_endpoint(self): + """ + Handler for the SageMaker "UpdateEndpoint" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_UpdateEndpoint.html. + """ + endpoint_name = self.request_params["EndpointName"] + new_config_name = self.request_params["EndpointConfigName"] + updated_endpoint = self.sagemaker_backend.update_endpoint( + endpoint_name=endpoint_name, new_config_name=new_config_name) + return json.dumps({ + 'EndpointArn': updated_endpoint.arn + }) + + def delete_endpoint(self): + """ + Handler for the SageMaker "DeleteEndpoint" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DeleteEndpoint.html. + """ + endpoint_name = self.request_params["EndpointName"] + self.sagemaker_backend.delete_endpoint(endpoint_name) + return "" + + def list_endpoints(self): + """ + Handler for the SageMaker "ListEndpoints" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListEndpoints.html. + + This function does not support pagination. All endpoint configs are returned in a + single response. + """ + endpoint_summaries = self.sagemaker_backend.list_endpoints() + return json.dumps({ + 'Endpoints': [summary.response_object for summary in endpoint_summaries] + }) + + def list_endpoint_configs(self): + """ + Handler for the SageMaker "ListEndpointConfigs" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListEndpointConfigs.html. + + This function does not support pagination. All endpoint configs are returned in a + single response. + """ + # Note: + endpoint_config_summaries = self.sagemaker_backend.list_endpoint_configs() + return json.dumps({ + 'EndpointConfigs': [summary.response_object for summary in endpoint_config_summaries] + }) + + def list_models(self): + """ + Handler for the SageMaker "ListModels" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListModels.html. + + This function does not support pagination. All endpoint configs are returned in a + single response. + """ + model_summaries = self.sagemaker_backend.list_models() + return json.dumps({ + 'Models': [summary.response_object for summary in model_summaries] + }) + + def create_model(self): + """ + Handler for the SageMaker "CreateModel" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html. + """ + model_name = self.request_params["ModelName"] + primary_container = self.request_params["PrimaryContainer"] + execution_role_arn = self.request_params["ExecutionRoleArn"] + tags = self.request_params.get("Tags", []) + vpc_config = self.request_params.get("VpcConfig", None) + new_model = self.sagemaker_backend.create_model( + model_name=model_name, primary_container=primary_container, + execution_role_arn=execution_role_arn, tags=tags, vpc_config=vpc_config, + region_name=self.region) + return json.dumps({ + 'ModelArn': new_model.arn + }) + + def describe_model(self): + """ + Handler for the SageMaker "DescribeModel" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeModel.html. + """ + model_name = self.request_params["ModelName"] + model_description = self.sagemaker_backend.describe_model(model_name) + return json.dumps(model_description.response_object) + + def delete_model(self): + """ + Handler for the SageMaker "DeleteModel" API call documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DeleteModel.html. + """ + model_name = self.request_params["ModelName"] + self.sagemaker_backend.delete_model(model_name) + return "" + + +class SageMakerBackend(BaseBackend): + """ + A mock backend for managing and exposing SageMaker resource state. + """ + + BASE_SAGEMAKER_ARN = "arn:aws:sagemaker:{region_name}:{account_id}:" + + def __init__(self): + self.models = {} + self.endpoints = {} + self.endpoint_configs = {} + self._endpoint_update_latency_seconds = 0 + + def set_endpoint_update_latency(self, latency_seconds): + """ + Sets the latency for the following operations that update endpoint state: + - "create_endpoint" + - "update_endpoint" + """ + self._endpoint_update_latency_seconds = latency_seconds + + def set_endpoint_latest_operation(self, endpoint_name, operation): + if endpoint_name not in self.endpoints: + raise ValueError( + "Attempted to manually set the latest operation for an endpoint" + " that does not exist!") + self.endpoints[endpoint_name].resource.latest_operation = operation + + @property + def _url_module(self): + """ + Required override from the Moto "BaseBackend" object that reroutes requests from the + specified SageMaker URLs to the mocked SageMaker backend. + """ + urls_module_name = "tests.sagemaker.mock.mock_sagemaker_urls" + urls_module = __import__(urls_module_name, fromlist=['url_bases', 'url_paths']) + return urls_module + + def _get_base_arn(self, region_name): + """ + :return: A SageMaker ARN prefix that can be prepended to a resource name. + """ + return SageMakerBackend.BASE_SAGEMAKER_ARN.format( + region_name=region_name, account_id=ACCOUNT_ID) + + def create_endpoint_config(self, config_name, production_variants, tags, region_name): + """ + Modifies backend state during calls to the SageMaker "CreateEndpointConfig" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html. + """ + if config_name in self.endpoint_configs: + raise ValueError("Attempted to create an endpoint configuration with name:" + " {config_name}, but an endpoint configuration with this" + " name already exists.".format(config_name=config_name)) + for production_variant in production_variants: + if "ModelName" not in production_variant: + raise ValueError("Production variant must specify a model name.") + elif production_variant["ModelName"] not in self.models: + raise ValueError( + "Production variant specifies a model name that does not exist" + " Model name: '{model_name}'".format( + model_name=production_variant["ModelName"])) + + new_config = EndpointConfig(config_name=config_name, + production_variants=production_variants, + tags=tags) + new_config_arn = self._get_base_arn(region_name=region_name) + new_config.arn_descriptor + new_resource = SageMakerResourceWithArn(resource=new_config, arn=new_config_arn) + self.endpoint_configs[config_name] = new_resource + return new_resource + + def describe_endpoint_config(self, config_name): + """ + Modifies backend state during calls to the SageMaker "DescribeEndpointConfig" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpointConfig.html. + """ + if config_name not in self.endpoint_configs: + raise ValueError("Attempted to describe an endpoint config with name: `{config_name}`" + " that does not exist.".format(config_name=config_name)) + + config = self.endpoint_configs[config_name] + return EndpointConfigDescription(config=config.resource, arn=config.arn) + + def delete_endpoint_config(self, config_name): + """ + Modifies backend state during calls to the SageMaker "DeleteEndpointConfig" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DeleteEndpointConfig.html. + """ + if config_name not in self.endpoint_configs: + raise ValueError("Attempted to delete an endpoint config with name: `{config_name}`" + " that does not exist.".format(config_name=config_name)) + + del self.endpoint_configs[config_name] + + def create_endpoint(self, endpoint_name, endpoint_config_name, tags, region_name): + """ + Modifies backend state during calls to the SageMaker "CreateEndpoint" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html. + """ + if endpoint_name in self.endpoints: + raise ValueError("Attempted to create an endpoint with name: `{endpoint_name}`" + " but an endpoint with this name already exists.".format( + endpoint_name=endpoint_name)) + + if endpoint_config_name not in self.endpoint_configs: + raise ValueError("Attempted to create an endpoint with a configuration named:" + " `{config_name}` However, this configuration does not exist.".format( + config_name=endpoint_config_name)) + + new_endpoint = Endpoint(endpoint_name=endpoint_name, + config_name=endpoint_config_name, + tags=tags, + latest_operation=EndpointOperation.create_successful( + latency_seconds=self._endpoint_update_latency_seconds)) + new_endpoint_arn = self._get_base_arn(region_name=region_name) + new_endpoint.arn_descriptor + new_resource = SageMakerResourceWithArn(resource=new_endpoint, arn=new_endpoint_arn) + self.endpoints[endpoint_name] = new_resource + return new_resource + + def describe_endpoint(self, endpoint_name): + """ + Modifies backend state during calls to the SageMaker "DescribeEndpoint" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html. + """ + if endpoint_name not in self.endpoints: + raise ValueError("Attempted to describe an endpoint with name: `{endpoint_name}`" + " that does not exist.".format(endpoint_name=endpoint_name)) + + endpoint = self.endpoints[endpoint_name] + config = self.endpoint_configs[endpoint.resource.config_name] + return EndpointDescription( + endpoint=endpoint.resource, config=config.resource, arn=endpoint.arn) + + def update_endpoint(self, endpoint_name, new_config_name): + """ + Modifies backend state during calls to the SageMaker "UpdateEndpoint" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_UpdateEndpoint.html. + """ + if endpoint_name not in self.endpoints: + raise ValueError("Attempted to update an endpoint with name: `{endpoint_name}`" + " that does not exist.".format(endpoint_name=endpoint_name)) + + if new_config_name not in self.endpoint_configs: + raise ValueError("Attempted to update an endpoint named `{endpoint_name}` with a new" + " configuration named: `{config_name}`. However, this configuration" + " does not exist.".format( + endpoint_name=endpoint_name, config_name=new_config_name)) + + endpoint = self.endpoints[endpoint_name] + endpoint.resource.latest_operation = EndpointOperation.update_successful( + latency_seconds=self._endpoint_update_latency_seconds) + endpoint.resource.config_name = new_config_name + return endpoint + + def delete_endpoint(self, endpoint_name): + """ + Modifies backend state during calls to the SageMaker "DeleteEndpoint" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DeleteEndpoint.html. + """ + if endpoint_name not in self.endpoints: + raise ValueError("Attempted to delete an endpoint with name: `{endpoint_name}`" + " that does not exist.".format(endpoint_name=endpoint_name)) + + del self.endpoints[endpoint_name] + + def list_endpoints(self): + """ + Modifies backend state during calls to the SageMaker "ListEndpoints" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListEndpoints.html. + """ + summaries = [] + for _, endpoint in self.endpoints.items(): + summary = EndpointSummary(endpoint=endpoint.resource, arn=endpoint.arn) + summaries.append(summary) + return summaries + + def list_endpoint_configs(self): + """ + Modifies backend state during calls to the SageMaker "ListEndpointConfigs" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListEndpointConfigs.html. + """ + summaries = [] + for _, endpoint_config in self.endpoint_configs.items(): + summary = EndpointConfigSummary( + config=endpoint_config.resource, arn=endpoint_config.arn) + summaries.append(summary) + return summaries + + def list_models(self): + """ + Modifies backend state during calls to the SageMaker "ListModels" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListModels.html. + """ + summaries = [] + for _, model in self.models.items(): + summary = ModelSummary(model=model.resource, arn=model.arn) + summaries.append(summary) + return summaries + + def create_model(self, model_name, primary_container, execution_role_arn, tags, region_name, + vpc_config=None): + """ + Modifies backend state during calls to the SageMaker "CreateModel" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html. + """ + if model_name in self.models: + raise ValueError("Attempted to create a model with name: `{model_name}`" + " but a model with this name already exists.".format( + model_name=model_name)) + + new_model = Model(model_name=model_name, primary_container=primary_container, + execution_role_arn=execution_role_arn, tags=tags, vpc_config=vpc_config) + new_model_arn = self._get_base_arn(region_name=region_name) + new_model.arn_descriptor + new_resource = SageMakerResourceWithArn(resource=new_model, arn=new_model_arn) + self.models[model_name] = new_resource + return new_resource + + def describe_model(self, model_name): + """ + Modifies backend state during calls to the SageMaker "DescribeModel" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeModel.html. + """ + if model_name not in self.models: + raise ValueError("Attempted to describe a model with name: `{model_name}`" + " that does not exist.".format(model_name=model_name)) + + model = self.models[model_name] + return ModelDescription(model=model.resource, arn=model.arn) + + def delete_model(self, model_name): + """ + Modifies backend state during calls to the SageMaker "DeleteModel" API + documented here: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DeleteModel.html. + """ + if model_name not in self.models: + raise ValueError("Attempted to delete an model with name: `{model_name}`" + " that does not exist.".format(model_name=model_name)) + + del self.models[model_name] + + +class TimestampedResource(BaseModel): + + TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" + + def __init__(self): + curr_time = datetime.now().strftime(TimestampedResource.TIMESTAMP_FORMAT) + self.creation_time = curr_time + self.last_modified_time = curr_time + + +class Endpoint(TimestampedResource): + """ + Object representing a SageMaker endpoint. The SageMakerBackend will create + and manage Endpoints. + """ + + STATUS_IN_SERVICE = "InService" + STATUS_FAILED = "Failed" + STATUS_CREATING = "Creating" + STATUS_UPDATING = "Updating" + + def __init__(self, endpoint_name, config_name, tags, latest_operation): + """ + :param endpoint_name: The name of the Endpoint. + :param config_name: The name of the EndpointConfiguration to associate with the Endpoint. + :param tags: Arbitrary tags to associate with the endpoint. + :param latest_operation: The most recent operation that was invoked on the endpoint, + represented as an EndpointOperation object. + """ + super(Endpoint, self).__init__() + self.endpoint_name = endpoint_name + self.config_name = config_name + self.tags = tags + self.latest_operation = latest_operation + + @property + def arn_descriptor(self): + return ":endpoint/{endpoint_name}".format(endpoint_name=self.endpoint_name) + + @property + def status(self): + return self.latest_operation.status() + + +class EndpointOperation: + """ + Object representing a SageMaker endpoint operation ("create" or "update"). Every + Endpoint is associated with the operation that was most recently invoked on it. + """ + + def __init__(self, latency_seconds, pending_status, completed_status): + """ + :param latency: The latency of the operation, in seconds. Before the time window specified + by this latency elapses, the operation will have the status specified by + ``pending_status``. After the time window elapses, the operation will + have the status specified by ``completed_status``. + :param pending_status: The status that the operation should reflect *before* the latency + window has elapsed. + :param completed_status: The status that the operation should reflect *after* the latency + window has elapsed. + """ + self.latency_seconds = latency_seconds + self.pending_status = pending_status + self.completed_status = completed_status + self.start_time = time.time() + + def status(self): + if time.time() - self.start_time < self.latency_seconds: + return self.pending_status + else: + return self.completed_status + + @classmethod + def create_successful(cls, latency_seconds): + return cls(latency_seconds=latency_seconds, pending_status=Endpoint.STATUS_CREATING, + completed_status=Endpoint.STATUS_IN_SERVICE) + + @classmethod + def create_unsuccessful(cls, latency_seconds): + return cls(latency_seconds=latency_seconds, pending_status=Endpoint.STATUS_CREATING, + completed_status=Endpoint.STATUS_FAILED) + + @classmethod + def update_successful(cls, latency_seconds): + return cls(latency_seconds=latency_seconds, pending_status=Endpoint.STATUS_UPDATING, + completed_status=Endpoint.STATUS_IN_SERVICE) + + @classmethod + def update_unsuccessful(cls, latency_seconds): + return cls(latency_seconds=latency_seconds, pending_status=Endpoint.STATUS_UPDATING, + completed_status=Endpoint.STATUS_FAILED) + + +class EndpointSummary: + """ + Object representing an endpoint entry in the endpoints list returned by + SageMaker's "ListEndpoints" API: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListEndpoints.html. + """ + + def __init__(self, endpoint, arn): + self.endpoint = endpoint + self.arn = arn + + @property + def response_object(self): + response = { + 'EndpointName': self.endpoint.endpoint_name, + 'CreationTime': self.endpoint.creation_time, + 'LastModifiedTime': self.endpoint.last_modified_time, + 'EndpointStatus': self.endpoint.status, + 'EndpointArn': self.arn, + } + return response + + +class EndpointDescription: + """ + Object representing an endpoint description returned by SageMaker's + "DescribeEndpoint" API: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html. + """ + + def __init__(self, endpoint, config, arn): + self.endpoint = endpoint + self.config = config + self.arn = arn + + @property + def response_object(self): + response = { + 'EndpointName': self.endpoint.endpoint_name, + 'EndpointArn': self.arn, + 'EndpointConfigName': self.endpoint.config_name, + 'ProductionVariants': self.config.production_variants, + 'EndpointStatus': self.endpoint.status, + 'CreationTime': self.endpoint.creation_time, + 'LastModifiedTime': self.endpoint.last_modified_time, + } + return response + + +class EndpointConfig(TimestampedResource): + """ + Object representing a SageMaker endpoint configuration. The SageMakerBackend will create + and manage EndpointConfigs. + """ + + def __init__(self, config_name, production_variants, tags): + super(EndpointConfig, self).__init__() + self.config_name = config_name + self.production_variants = production_variants + self.tags = tags + + @property + def arn_descriptor(self): + return ":endpoint-config/{config_name}".format(config_name=self.config_name) + + +class EndpointConfigSummary: + """ + Object representing an endpoint configuration entry in the configurations list returned by + SageMaker's "ListEndpointConfigs" API: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListEndpointConfigs.html. + """ + + def __init__(self, config, arn): + self.config = config + self.arn = arn + + @property + def response_object(self): + response = { + 'EndpointConfigName': self.config.config_name, + 'EndpointArn': self.arn, + 'CreationTime': self.config.creation_time, + } + return response + + +class EndpointConfigDescription: + """ + Object representing an endpoint configuration description returned by SageMaker's + "DescribeEndpointConfig" API: + https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpointConfig.html. + """ + + def __init__(self, config, arn): + self.config = config + self.arn = arn + + @property + def response_object(self): + response = { + 'EndpointConfigName': self.config.config_name, + 'EndpointConfigArn': self.arn, + 'ProductionVariants': self.config.production_variants, + 'CreationTime': self.config.creation_time, + } + return response + + +class Model(TimestampedResource): + """ + Object representing a SageMaker model. The SageMakerBackend will create and manage Models. + """ + + def __init__(self, model_name, primary_container, execution_role_arn, tags, vpc_config): + super(Model, self).__init__() + self.model_name = model_name + self.primary_container = primary_container + self.execution_role_arn = execution_role_arn + self.tags = tags + self.vpc_config = vpc_config + + @property + def arn_descriptor(self): + return ":model/{model_name}".format(model_name=self.model_name) + + +class ModelSummary: + """ + Object representing a model entry in the models list returned by SageMaker's + "ListModels" API: https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListModels.html. + """ + + def __init__(self, model, arn): + self.model = model + self.arn = arn + + @property + def response_object(self): + response = { + 'ModelArn': self.arn, + 'ModelName': self.model.model_name, + 'CreationTime': self.model.creation_time, + } + return response + + +class ModelDescription: + """ + Object representing a model description returned by SageMaker's + "DescribeModel" API: https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeModel.html. + """ + + def __init__(self, model, arn): + self.model = model + self.arn = arn + + @property + def response_object(self): + response = { + 'ModelArn': self.arn, + 'ModelName': self.model.model_name, + 'PrimaryContainer': self.model.primary_container, + 'ExecutionRoleArn': self.model.execution_role_arn, + 'VpcConfig': self.model.vpc_config if self.model.vpc_config else {}, + 'CreationTime': self.model.creation_time, + } + return response + + +# Create a SageMaker backend for each EC2 region +sagemaker_backends = {} +for region, ec2_backend in ec2_backends.items(): + new_backend = SageMakerBackend() + sagemaker_backends[region] = new_backend + +mock_sagemaker = base_decorator(sagemaker_backends) diff --git a/tests/sagemaker/mock/mock_sagemaker_urls.py b/tests/sagemaker/mock/mock_sagemaker_urls.py new file mode 100644 index 0000000000000..f0ffb75c3a046 --- /dev/null +++ b/tests/sagemaker/mock/mock_sagemaker_urls.py @@ -0,0 +1,9 @@ +from tests.sagemaker.mock import SageMakerResponse + +url_bases = [ + "https?://api.sagemaker.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': SageMakerResponse.dispatch, +} diff --git a/tests/sagemaker/mock/test_sagemaker_service_mock.py b/tests/sagemaker/mock/test_sagemaker_service_mock.py new file mode 100644 index 0000000000000..1cfed8f71e4ee --- /dev/null +++ b/tests/sagemaker/mock/test_sagemaker_service_mock.py @@ -0,0 +1,513 @@ +import boto3 +import pytest + +from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import +from tests.sagemaker.mock import mock_sagemaker + + +@pytest.fixture +def sagemaker_client(): + return boto3.client("sagemaker", region_name="us-west-2") + + +def create_sagemaker_model(sagemaker_client, model_name): + return sagemaker_client.create_model( + ExecutionRoleArn='arn:aws:iam::012345678910:role/sample-role', + ModelName=model_name, + PrimaryContainer={ + 'Image': '012345678910.dkr.ecr.us-west-2.amazonaws.com/sample-container', + } + ) + + +def create_endpoint_config(sagemaker_client, endpoint_config_name, model_name): + return sagemaker_client.create_endpoint_config( + EndpointConfigName=endpoint_config_name, + ProductionVariants=[ + { + 'VariantName': 'sample-variant', + 'ModelName': model_name, + 'InitialInstanceCount': 1, + 'InstanceType': 'ml.m4.xlarge', + 'InitialVariantWeight': 1.0, + }, + ], + ) + + +@mock_sagemaker +def test_created_model_is_listed_by_list_models_function(sagemaker_client): + model_name = "sample-model" + create_sagemaker_model( + sagemaker_client=sagemaker_client, model_name=model_name) + + models_response = sagemaker_client.list_models() + assert "Models" in models_response + models = models_response["Models"] + assert all(["ModelName" in model for model in models]) + assert model_name in [model["ModelName"] for model in models] + + +@mock_sagemaker +def test_create_model_returns_arn_containing_model_name(sagemaker_client): + model_name = "sample-model" + model_create_response = create_sagemaker_model( + sagemaker_client=sagemaker_client, model_name=model_name) + assert "ModelArn" in model_create_response + assert model_name in model_create_response["ModelArn"] + + +@mock_sagemaker +def test_creating_model_with_name_already_in_use_raises_exception(sagemaker_client): + model_name = "sample-model-name" + + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + with pytest.raises(ValueError): + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + +@mock_sagemaker +def test_all_models_are_listed_after_creating_many_models(sagemaker_client): + model_names = [] + + for i in range(100): + model_name = "sample-model-{idx}".format(idx=i) + model_names.append(model_name) + + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + listed_models = sagemaker_client.list_models()["Models"] + listed_model_names = [model["ModelName"] for model in listed_models] + for model_name in model_names: + assert model_name in listed_model_names + + +@mock_sagemaker +def test_describe_model_response_contains_expected_attributes(sagemaker_client): + model_name = "sample-model" + execution_role_arn = "arn:aws:iam::012345678910:role/sample-role" + primary_container = { + "Image": "012345678910.dkr.ecr.us-west-2.amazonaws.com/sample-container", + } + + sagemaker_client.create_model( + ModelName=model_name, + ExecutionRoleArn=execution_role_arn, + PrimaryContainer=primary_container, + ) + + describe_model_response = sagemaker_client.describe_model(ModelName=model_name) + assert "CreationTime" in describe_model_response + assert "ModelArn" in describe_model_response + assert "ExecutionRoleArn" in describe_model_response + assert describe_model_response["ExecutionRoleArn"] == execution_role_arn + assert "ModelName" in describe_model_response + assert describe_model_response["ModelName"] == model_name + assert "PrimaryContainer" in describe_model_response + assert describe_model_response["PrimaryContainer"] == primary_container + + +@mock_sagemaker +def test_describe_model_throws_exception_for_nonexistent_model(sagemaker_client): + with pytest.raises(ValueError): + sagemaker_client.describe_model(ModelName="nonexistent-model") + + +@mock_sagemaker +def test_model_is_no_longer_listed_after_deletion(sagemaker_client): + model_name = "sample-model-name" + + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + sagemaker_client.delete_model(ModelName=model_name) + + listed_models = sagemaker_client.list_models()["Models"] + listed_model_names = [model["ModelName"] for model in listed_models] + assert model_name not in listed_model_names + + +@mock_sagemaker +def test_created_endpoint_config_is_listed_by_list_endpoints_function(sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + endpoint_configs_response = sagemaker_client.list_endpoint_configs() + assert "EndpointConfigs" in endpoint_configs_response + endpoint_configs = endpoint_configs_response["EndpointConfigs"] + assert all([ + "EndpointConfigName" in endpoint_config for endpoint_config in endpoint_configs]) + assert endpoint_config_name in [ + endpoint_config["EndpointConfigName"] for endpoint_config in endpoint_configs + ] + + +@mock_sagemaker +def test_create_endpoint_config_returns_arn_containing_config_name( + sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_config_response = create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + assert "EndpointConfigArn" in create_config_response + assert endpoint_config_name in create_config_response["EndpointConfigArn"] + + +@mock_sagemaker +def test_creating_endpoint_config_with_name_already_in_use_raises_exception(sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + with pytest.raises(ValueError): + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + +@mock_sagemaker +def test_all_endpoint_configs_are_listed_after_creating_many_configs(sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + endpoint_config_names = [] + + for i in range(100): + endpoint_config_name = "sample-config-{idx}".format(idx=i) + endpoint_config_names.append(endpoint_config_name) + + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + listed_endpoint_configs = sagemaker_client.list_endpoint_configs()["EndpointConfigs"] + listed_endpoint_config_names = [ + endpoint_config["EndpointConfigName"] + for endpoint_config in listed_endpoint_configs] + for endpoint_config_name in endpoint_config_names: + assert endpoint_config_name in listed_endpoint_config_names + + +@mock_sagemaker +def test_describe_endpoint_config_response_contains_expected_attributes(sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + production_variants = [ + { + 'VariantName': 'sample-variant', + 'ModelName': model_name, + 'InitialInstanceCount': 1, + 'InstanceType': 'ml.m4.xlarge', + 'InitialVariantWeight': 1.0, + }, + ] + sagemaker_client.create_endpoint_config( + EndpointConfigName=endpoint_config_name, + ProductionVariants=production_variants, + ) + + describe_endpoint_config_response = sagemaker_client.describe_endpoint_config( + EndpointConfigName=endpoint_config_name) + assert "CreationTime" in describe_endpoint_config_response + assert "EndpointConfigArn" in describe_endpoint_config_response + assert "EndpointConfigName" in describe_endpoint_config_response + assert describe_endpoint_config_response["EndpointConfigName"] == endpoint_config_name + assert "ProductionVariants" in describe_endpoint_config_response + assert describe_endpoint_config_response["ProductionVariants"] == production_variants + + +@mock_sagemaker +def test_describe_endpoint_config_throws_exception_for_nonexistent_config(sagemaker_client): + with pytest.raises(ValueError): + sagemaker_client.describe_endpoint_config(EndpointConfigName="nonexistent-config") + + +@mock_sagemaker +def test_endpoint_config_is_no_longer_listed_after_deletion(sagemaker_client): + model_name = "sample-model-name" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name) + + listed_endpoint_configs = sagemaker_client.list_endpoint_configs()["EndpointConfigs"] + listed_endpoint_config_names = [ + endpoint_config["EndpointConfigName"] for endpoint_config in listed_endpoint_configs + ] + assert endpoint_config_name not in listed_endpoint_config_names + + +@mock_sagemaker +def test_created_endpoint_is_listed_by_list_endpoints_function(sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + endpoint_name = "sample-endpoint" + + sagemaker_client.create_endpoint( + EndpointConfigName=endpoint_config_name, + EndpointName=endpoint_name, + Tags=[ + { + "Key": "Some Key", + "Value": "Some Value", + }, + ], + ) + + endpoints_response = sagemaker_client.list_endpoints() + assert "Endpoints" in endpoints_response + endpoints = endpoints_response["Endpoints"] + assert all(["EndpointName" in endpoint for endpoint in endpoints]) + assert endpoint_name in [endpoint["EndpointName"] for endpoint in endpoints] + + +@mock_sagemaker +def test_create_endpoint_returns_arn_containing_endpoint_name( + sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + endpoint_name = "sample-endpoint" + + create_endpoint_response = sagemaker_client.create_endpoint( + EndpointConfigName=endpoint_config_name, + EndpointName=endpoint_name, + Tags=[ + { + "Key": "Some Key", + "Value": "Some Value", + }, + ], + ) + + assert "EndpointArn" in create_endpoint_response + assert endpoint_name in create_endpoint_response["EndpointArn"] + + +@mock_sagemaker +def test_creating_endpoint_with_name_already_in_use_raises_exception(sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + endpoint_name = "sample-endpoint" + + sagemaker_client.create_endpoint( + EndpointConfigName=endpoint_config_name, + EndpointName=endpoint_name, + Tags=[ + { + "Key": "Some Key", + "Value": "Some Value", + }, + ], + ) + + with pytest.raises(ValueError): + sagemaker_client.create_endpoint( + EndpointConfigName=endpoint_config_name, + EndpointName=endpoint_name, + Tags=[ + { + "Key": "Some Key", + "Value": "Some Value", + }, + ], + ) + + +@mock_sagemaker +def test_all_endpoint_are_listed_after_creating_many_endpoints(sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + endpoint_names = [] + + for i in range(100): + endpoint_name = "sample-endpoint-{idx}".format(idx=i) + endpoint_names.append(endpoint_name) + + sagemaker_client.create_endpoint( + EndpointConfigName=endpoint_config_name, + EndpointName=endpoint_name, + Tags=[ + { + "Key": "Some Key", + "Value": "Some Value", + }, + ], + ) + + listed_endpoints = sagemaker_client.list_endpoints()["Endpoints"] + listed_endpoint_names = [endpoint["EndpointName"] for endpoint in listed_endpoints] + for endpoint_name in endpoint_names: + assert endpoint_name in listed_endpoint_names + + +@mock_sagemaker +def test_describe_endpoint_response_contains_expected_attributes(sagemaker_client): + model_name = "sample-model" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + production_variants = [ + { + 'VariantName': 'sample-variant', + 'ModelName': model_name, + 'InitialInstanceCount': 1, + 'InstanceType': 'ml.m4.xlarge', + 'InitialVariantWeight': 1.0, + }, + ] + sagemaker_client.create_endpoint_config( + EndpointConfigName=endpoint_config_name, + ProductionVariants=production_variants, + ) + + endpoint_name = "sample-endpoint" + sagemaker_client.create_endpoint( + EndpointName=endpoint_name, + EndpointConfigName=endpoint_config_name, + ) + + describe_endpoint_response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) + assert "CreationTime" in describe_endpoint_response + assert "LastModifiedTime" in describe_endpoint_response + assert "EndpointArn" in describe_endpoint_response + assert "EndpointStatus" in describe_endpoint_response + assert "ProductionVariants" in describe_endpoint_response + + +@mock_sagemaker +def test_describe_endpoint_throws_exception_for_nonexistent_endpoint(sagemaker_client): + with pytest.raises(ValueError): + sagemaker_client.describe_endpoint(EndpointName="nonexistent-endpoint") + + +@mock_sagemaker +def test_endpoint_is_no_longer_listed_after_deletion(sagemaker_client): + model_name = "sample-model-name" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + endpoint_name = "sample-endpoint" + sagemaker_client.create_endpoint( + EndpointConfigName=endpoint_config_name, + EndpointName=endpoint_name, + ) + + sagemaker_client.delete_endpoint(EndpointName=endpoint_name) + + listed_endpoints = sagemaker_client.list_endpoints()["Endpoints"] + listed_endpoint_names = [endpoint["EndpointName"] for endpoint in listed_endpoints] + assert endpoint_name not in listed_endpoint_names + + +@mock_sagemaker +def test_update_endpoint_modifies_config_correctly(sagemaker_client): + model_name = "sample-model-name" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + first_endpoint_config_name = "sample-config-1" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=first_endpoint_config_name, + model_name=model_name) + + second_endpoint_config_name = "sample-config-2" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=second_endpoint_config_name, + model_name=model_name) + + endpoint_name = "sample-endpoint" + sagemaker_client.create_endpoint( + EndpointConfigName=first_endpoint_config_name, + EndpointName=endpoint_name, + ) + + first_describe_endpoint_response = sagemaker_client.describe_endpoint( + EndpointName=endpoint_name) + assert first_describe_endpoint_response["EndpointConfigName"] == first_endpoint_config_name + + sagemaker_client.update_endpoint( + EndpointName=endpoint_name, EndpointConfigName=second_endpoint_config_name) + + second_describe_endpoint_response = sagemaker_client.describe_endpoint( + EndpointName=endpoint_name) + assert second_describe_endpoint_response["EndpointConfigName"] == second_endpoint_config_name + + +@mock_sagemaker +def test_update_endpoint_with_nonexistent_config_throws_exception(sagemaker_client): + model_name = "sample-model-name" + create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name) + + endpoint_config_name = "sample-config" + create_endpoint_config( + sagemaker_client=sagemaker_client, + endpoint_config_name=endpoint_config_name, + model_name=model_name) + + endpoint_name = "sample-endpoint" + sagemaker_client.create_endpoint( + EndpointConfigName=endpoint_config_name, + EndpointName=endpoint_name, + ) + + with pytest.raises(ValueError): + sagemaker_client.update_endpoint( + EndpointName=endpoint_name, EndpointConfigName="nonexistent-config") diff --git a/tests/sagemaker/test_deployment.py b/tests/sagemaker/test_deployment.py index f9fd87f2b778d..7da85f636b285 100644 --- a/tests/sagemaker/test_deployment.py +++ b/tests/sagemaker/test_deployment.py @@ -1,18 +1,33 @@ +from __future__ import absolute_import + import os import pytest +import time +import mock from collections import namedtuple +import boto3 +import botocore import numpy as np +from click.testing import CliRunner from sklearn.linear_model import LogisticRegression import mlflow import mlflow.pyfunc import mlflow.sklearn import mlflow.sagemaker as mfs +import mlflow.sagemaker.cli as mfscli +from mlflow.exceptions import MlflowException from mlflow.models import Model -from mlflow.tracking.utils import _get_model_log_dir +from mlflow.protos.databricks_pb2 import ErrorCode, RESOURCE_DOES_NOT_EXIST, \ + INVALID_PARAMETER_VALUE, INTERNAL_ERROR +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.tracking.artifact_utils import _download_artifact_from_uri + +from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import +from tests.sagemaker.mock import mock_sagemaker, Endpoint, EndpointOperation -TrainedModel = namedtuple("TrainedModel", ["model_path", "run_id"]) +TrainedModel = namedtuple("TrainedModel", ["model_path", "run_id", "model_uri"]) @pytest.fixture @@ -21,59 +36,604 @@ def pretrained_model(): with mlflow.start_run(): X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1) y = np.array([0, 0, 1, 1, 1, 0]) - lr = LogisticRegression() + lr = LogisticRegression(solver='lbfgs') lr.fit(X, y) mlflow.sklearn.log_model(lr, model_path) - run_id = mlflow.active_run().info.run_uuid - return TrainedModel(model_path, run_id) + run_id = mlflow.active_run().info.run_id + model_uri = "runs:/" + run_id + "/" + model_path + return TrainedModel(model_path, run_id, model_uri) + + +@pytest.fixture +def sagemaker_client(): + return boto3.client("sagemaker", region_name="us-west-2") + + +def get_sagemaker_backend(region_name): + return mock_sagemaker.backends[region_name] + + +def mock_sagemaker_aws_services(fn): + # Import `wraps` from `six` instead of `functools` to properly set the + # wrapped function's `__wrapped__` attribute to the required value + # in Python 2 + from six import wraps + from moto import mock_s3, mock_ecr, mock_sts, mock_iam + + @mock_ecr + @mock_iam + @mock_s3 + @mock_sagemaker + @mock_sts + @wraps(fn) + def mock_wrapper(*args, **kwargs): + # Create an ECR repository for the `mlflow-pyfunc` SageMaker docker image + ecr_client = boto3.client("ecr", region_name="us-west-2") + ecr_client.create_repository(repositoryName=mfs.DEFAULT_IMAGE_NAME) + + # Create the moto IAM role + role_policy = """ + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "*", + "Resource": "*" + } + ] + } + """ + iam_client = boto3.client("iam", region_name="us-west-2") + iam_client.create_role(RoleName="moto", AssumeRolePolicyDocument=role_policy) + return fn(*args, **kwargs) -def test_deployment_with_unsupported_flavor_throws_value_error(pretrained_model): + return mock_wrapper + + +@pytest.mark.large +def test_deployment_with_unsupported_flavor_raises_exception(pretrained_model): unsupported_flavor = "this is not a valid flavor" - with pytest.raises(ValueError): + with pytest.raises(MlflowException) as exc: mfs.deploy(app_name="bad_flavor", - model_path=pretrained_model.model_path, - run_id=pretrained_model.run_id, + model_uri=pretrained_model.model_uri, flavor=unsupported_flavor) + assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) + -def test_deployment_with_missing_flavor_throws_value_error(pretrained_model): +@pytest.mark.large +def test_deployment_with_missing_flavor_raises_exception(pretrained_model): missing_flavor = "mleap" - with pytest.raises(ValueError): + with pytest.raises(MlflowException) as exc: mfs.deploy(app_name="missing-flavor", - model_path=pretrained_model.model_path, - run_id=pretrained_model.run_id, + model_uri=pretrained_model.model_uri, flavor=missing_flavor) + assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) -def test_deployment_of_model_with_no_supported_flavors_throws_value_error(pretrained_model): - logged_model_path = _get_model_log_dir(pretrained_model.model_path, pretrained_model.run_id) + +@pytest.mark.large +def test_deployment_of_model_with_no_supported_flavors_raises_exception(pretrained_model): + logged_model_path = _download_artifact_from_uri(pretrained_model.model_uri) model_config_path = os.path.join(logged_model_path, "MLmodel") model_config = Model.load(model_config_path) del model_config.flavors[mlflow.pyfunc.FLAVOR_NAME] model_config.save(path=model_config_path) - with pytest.raises(ValueError): + with pytest.raises(MlflowException) as exc: mfs.deploy(app_name="missing-flavor", - model_path=logged_model_path, + model_uri=logged_model_path, flavor=None) + assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) + +@pytest.mark.large def test_validate_deployment_flavor_validates_python_function_flavor_successfully( pretrained_model): - model_config_path = os.path.join(_get_model_log_dir( - pretrained_model.model_path, pretrained_model.run_id), "MLmodel") + model_config_path = os.path.join( + _download_artifact_from_uri(pretrained_model.model_uri), "MLmodel") model_config = Model.load(model_config_path) mfs._validate_deployment_flavor( model_config=model_config, flavor=mlflow.pyfunc.FLAVOR_NAME) +@pytest.mark.large def test_get_preferred_deployment_flavor_obtains_valid_flavor_from_model(pretrained_model): - model_config_path = os.path.join(_get_model_log_dir( - pretrained_model.model_path, pretrained_model.run_id), "MLmodel") + model_config_path = os.path.join( + _download_artifact_from_uri(pretrained_model.model_uri), "MLmodel") model_config = Model.load(model_config_path) selected_flavor = mfs._get_preferred_deployment_flavor(model_config=model_config) assert selected_flavor in mfs.SUPPORTED_DEPLOYMENT_FLAVORS assert selected_flavor in model_config.flavors + + +@pytest.mark.large +def test_attempting_to_deploy_in_asynchronous_mode_without_archiving_throws_exception( + pretrained_model): + with pytest.raises(MlflowException) as exc: + mfs.deploy(app_name="test-app", + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE, + archive=False, + synchronous=False) + + assert "Resources must be archived" in exc.value.message + assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_creates_sagemaker_and_s3_resources_with_expected_names_from_local( + pretrained_model, sagemaker_client): + app_name = "test-app" + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE) + + region_name = sagemaker_client.meta.region_name + s3_client = boto3.client("s3", region_name=region_name) + default_bucket = mfs._get_default_s3_bucket(region_name) + endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name) + endpoint_production_variants = endpoint_description["ProductionVariants"] + assert len(endpoint_production_variants) == 1 + model_name = endpoint_production_variants[0]["VariantName"] + assert model_name in [ + model["ModelName"] for model in sagemaker_client.list_models()["Models"] + ] + object_names = [ + entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"] + ] + assert any([model_name in object_name for object_name in object_names]) + assert any([app_name in config["EndpointConfigName"] + for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]]) + assert app_name in [endpoint["EndpointName"] + for endpoint in sagemaker_client.list_endpoints()["Endpoints"]] + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_cli_creates_sagemaker_and_s3_resources_with_expected_names_from_local( + pretrained_model, sagemaker_client): + app_name = "test-app" + result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke( + mfscli.commands, + [ + 'deploy', + '-a', app_name, + '-m', pretrained_model.model_uri, + '--mode', mfs.DEPLOYMENT_MODE_CREATE, + ]) + assert result.exit_code == 0 + + region_name = sagemaker_client.meta.region_name + s3_client = boto3.client("s3", region_name=region_name) + default_bucket = mfs._get_default_s3_bucket(region_name) + endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name) + endpoint_production_variants = endpoint_description["ProductionVariants"] + assert len(endpoint_production_variants) == 1 + model_name = endpoint_production_variants[0]["VariantName"] + assert model_name in [ + model["ModelName"] for model in sagemaker_client.list_models()["Models"] + ] + object_names = [ + entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"] + ] + assert any([model_name in object_name for object_name in object_names]) + assert any([app_name in config["EndpointConfigName"] + for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]]) + assert app_name in [endpoint["EndpointName"] + for endpoint in sagemaker_client.list_endpoints()["Endpoints"]] + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_creates_sagemaker_and_s3_resources_with_expected_names_from_s3( + pretrained_model, sagemaker_client): + local_model_path = _download_artifact_from_uri(pretrained_model.model_uri) + artifact_path = "model" + region_name = sagemaker_client.meta.region_name + default_bucket = mfs._get_default_s3_bucket(region_name) + s3_artifact_repo = S3ArtifactRepository('s3://{}'.format(default_bucket)) + s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path) + model_s3_uri = 's3://{bucket_name}/{artifact_path}'.format( + bucket_name=default_bucket, artifact_path=pretrained_model.model_path) + + app_name = "test-app" + mfs.deploy(app_name=app_name, + model_uri=model_s3_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE) + + endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name) + endpoint_production_variants = endpoint_description["ProductionVariants"] + assert len(endpoint_production_variants) == 1 + model_name = endpoint_production_variants[0]["VariantName"] + assert model_name in [ + model["ModelName"] for model in sagemaker_client.list_models()["Models"] + ] + + s3_client = boto3.client("s3", region_name=region_name) + object_names = [ + entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"] + ] + assert any([model_name in object_name for object_name in object_names]) + assert any([app_name in config["EndpointConfigName"] + for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]]) + assert app_name in [endpoint["EndpointName"] + for endpoint in sagemaker_client.list_endpoints()["Endpoints"]] + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_cli_creates_sagemaker_and_s3_resources_with_expected_names_from_s3( + pretrained_model, sagemaker_client): + local_model_path = _download_artifact_from_uri(pretrained_model.model_uri) + artifact_path = "model" + region_name = sagemaker_client.meta.region_name + default_bucket = mfs._get_default_s3_bucket(region_name) + s3_artifact_repo = S3ArtifactRepository('s3://{}'.format(default_bucket)) + s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path) + model_s3_uri = 's3://{bucket_name}/{artifact_path}'.format( + bucket_name=default_bucket, artifact_path=pretrained_model.model_path) + + app_name = "test-app" + result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke( + mfscli.commands, + [ + 'deploy', + '-a', app_name, + '-m', model_s3_uri, + '--mode', mfs.DEPLOYMENT_MODE_CREATE, + ]) + assert result.exit_code == 0 + + region_name = sagemaker_client.meta.region_name + s3_client = boto3.client("s3", region_name=region_name) + default_bucket = mfs._get_default_s3_bucket(region_name) + endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name) + endpoint_production_variants = endpoint_description["ProductionVariants"] + assert len(endpoint_production_variants) == 1 + model_name = endpoint_production_variants[0]["VariantName"] + assert model_name in [ + model["ModelName"] for model in sagemaker_client.list_models()["Models"] + ] + object_names = [ + entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"] + ] + assert any([model_name in object_name for object_name in object_names]) + assert any([app_name in config["EndpointConfigName"] + for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]]) + assert app_name in [endpoint["EndpointName"] + for endpoint in sagemaker_client.list_endpoints()["Endpoints"]] + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploying_application_with_preexisting_name_in_create_mode_throws_exception( + pretrained_model): + app_name = "test-app" + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE) + + with pytest.raises(MlflowException) as exc: + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE) + + assert "an application with the same name already exists" in exc.value.message + assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_in_synchronous_mode_waits_for_endpoint_creation_to_complete_before_returning( + pretrained_model, sagemaker_client): + endpoint_creation_latency = 10 + get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency( + endpoint_creation_latency) + + app_name = "test-app" + deployment_start_time = time.time() + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE, + synchronous=True) + deployment_end_time = time.time() + + assert (deployment_end_time - deployment_start_time) >= endpoint_creation_latency + endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name) + assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_IN_SERVICE + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_create_in_asynchronous_mode_returns_before_endpoint_creation_completes( + pretrained_model, sagemaker_client): + endpoint_creation_latency = 10 + get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency( + endpoint_creation_latency) + + app_name = "test-app" + deployment_start_time = time.time() + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE, + synchronous=False, + archive=True) + deployment_end_time = time.time() + + assert (deployment_end_time - deployment_start_time) < endpoint_creation_latency + endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name) + assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_CREATING + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_replace_in_asynchronous_mode_returns_before_endpoint_creation_completes( + pretrained_model, sagemaker_client): + endpoint_update_latency = 10 + get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency( + endpoint_update_latency) + + app_name = "test-app" + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE, + synchronous=True) + + update_start_time = time.time() + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_REPLACE, + synchronous=False, + archive=True) + update_end_time = time.time() + + assert (update_end_time - update_start_time) < endpoint_update_latency + endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name) + assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_UPDATING + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_in_create_mode_throws_exception_after_endpoint_creation_fails( + pretrained_model, sagemaker_client): + endpoint_creation_latency = 10 + sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name) + sagemaker_backend.set_endpoint_update_latency(endpoint_creation_latency) + + boto_caller = botocore.client.BaseClient._make_api_call + + def fail_endpoint_creations(self, operation_name, operation_kwargs): + """ + Processes all boto3 client operations according to the following rules: + - If the operation is an endpoint creation, create the endpoint and set its status to + ``Endpoint.STATUS_FAILED``. + - Else, execute the client operation as normal + """ + result = boto_caller(self, operation_name, operation_kwargs) + if operation_name == "CreateEndpoint": + endpoint_name = operation_kwargs["EndpointName"] + sagemaker_backend.set_endpoint_latest_operation( + endpoint_name=endpoint_name, + operation=EndpointOperation.create_unsuccessful( + latency_seconds=endpoint_creation_latency)) + return result + + with mock.patch("botocore.client.BaseClient._make_api_call", new=fail_endpoint_creations),\ + pytest.raises(MlflowException) as exc: + mfs.deploy(app_name="test-app", + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE) + + assert "deployment operation failed" in exc.value.message + assert exc.value.error_code == ErrorCode.Name(INTERNAL_ERROR) + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_in_add_mode_adds_new_model_to_existing_endpoint(pretrained_model, sagemaker_client): + app_name = "test-app" + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE) + models_added = 1 + for _ in range(11): + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_ADD, + archive=True, + synchronous=False) + models_added += 1 + + endpoint_response = sagemaker_client.describe_endpoint(EndpointName=app_name) + endpoint_config_name = endpoint_response["EndpointConfigName"] + endpoint_config_response = sagemaker_client.describe_endpoint_config( + EndpointConfigName=endpoint_config_name) + production_variants = endpoint_config_response["ProductionVariants"] + assert len(production_variants) == models_added + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_in_replace_model_removes_preexisting_models_from_endpoint( + pretrained_model, sagemaker_client): + app_name = "test-app" + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_ADD) + + for _ in range(11): + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_ADD, + archive=True, + synchronous=False) + + endpoint_response_before_replacement = sagemaker_client.describe_endpoint(EndpointName=app_name) + endpoint_config_name_before_replacement =\ + endpoint_response_before_replacement["EndpointConfigName"] + endpoint_config_response_before_replacement = sagemaker_client.describe_endpoint_config( + EndpointConfigName=endpoint_config_name_before_replacement) + production_variants_before_replacement =\ + endpoint_config_response_before_replacement["ProductionVariants"] + deployed_models_before_replacement = [ + variant["ModelName"] for variant in production_variants_before_replacement] + + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_REPLACE, + archive=True, + synchronous=False) + + endpoint_response_after_replacement = sagemaker_client.describe_endpoint(EndpointName=app_name) + endpoint_config_name_after_replacement =\ + endpoint_response_after_replacement["EndpointConfigName"] + endpoint_config_response_after_replacement = sagemaker_client.describe_endpoint_config( + EndpointConfigName=endpoint_config_name_after_replacement) + production_variants_after_replacement =\ + endpoint_config_response_after_replacement["ProductionVariants"] + deployed_models_after_replacement = [ + variant["ModelName"] for variant in production_variants_after_replacement] + assert len(deployed_models_after_replacement) == 1 + assert all([model_name not in deployed_models_after_replacement + for model_name in deployed_models_before_replacement]) + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_in_replace_mode_throws_exception_after_endpoint_update_fails( + pretrained_model, sagemaker_client): + endpoint_update_latency = 5 + sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name) + sagemaker_backend.set_endpoint_update_latency(endpoint_update_latency) + + app_name = "test-app" + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE) + + boto_caller = botocore.client.BaseClient._make_api_call + + def fail_endpoint_updates(self, operation_name, operation_kwargs): + """ + Processes all boto3 client operations according to the following rules: + - If the operation is an endpoint update, update the endpoint and set its status to + ``Endpoint.STATUS_FAILED``. + - Else, execute the client operation as normal + """ + result = boto_caller(self, operation_name, operation_kwargs) + if operation_name == "UpdateEndpoint": + endpoint_name = operation_kwargs["EndpointName"] + sagemaker_backend.set_endpoint_latest_operation( + endpoint_name=endpoint_name, + operation=EndpointOperation.update_unsuccessful( + latency_seconds=endpoint_update_latency)) + return result + + with mock.patch("botocore.client.BaseClient._make_api_call", new=fail_endpoint_updates),\ + pytest.raises(MlflowException) as exc: + mfs.deploy(app_name="test-app", + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_REPLACE) + + assert "deployment operation failed" in exc.value.message + assert exc.value.error_code == ErrorCode.Name(INTERNAL_ERROR) + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_in_replace_mode_waits_for_endpoint_update_completion_before_deleting_resources( + pretrained_model, sagemaker_client): + endpoint_update_latency = 10 + sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name) + sagemaker_backend.set_endpoint_update_latency(endpoint_update_latency) + + app_name = "test-app" + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE) + endpoint_config_name_before_replacement = sagemaker_client.describe_endpoint( + EndpointName=app_name)["EndpointConfigName"] + + boto_caller = botocore.client.BaseClient._make_api_call + update_start_time = time.time() + + def validate_deletes(self, operation_name, operation_kwargs): + """ + Processes all boto3 client operations according to the following rules: + - If the operation deletes an S3 or SageMaker resource, ensure that the deletion was + initiated after the completion of the endpoint update + - Else, execute the client operation as normal + """ + result = boto_caller(self, operation_name, operation_kwargs) + if "Delete" in operation_name: + # Confirm that a successful endpoint update occurred prior to the invocation of this + # delete operation + endpoint_info = sagemaker_client.describe_endpoint(EndpointName=app_name) + assert endpoint_info["EndpointStatus"] == Endpoint.STATUS_IN_SERVICE + assert endpoint_info["EndpointConfigName"] != endpoint_config_name_before_replacement + assert time.time() - update_start_time >= endpoint_update_latency + return result + + with mock.patch("botocore.client.BaseClient._make_api_call", new=validate_deletes): + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_REPLACE, + archive=False) + + +@pytest.mark.large +@mock_sagemaker_aws_services +def test_deploy_in_replace_mode_with_archiving_does_not_delete_resources( + pretrained_model, sagemaker_client): + region_name = sagemaker_client.meta.region_name + sagemaker_backend = get_sagemaker_backend(region_name) + sagemaker_backend.set_endpoint_update_latency(5) + + app_name = "test-app" + mfs.deploy(app_name=app_name, + model_uri=pretrained_model.model_uri, + mode=mfs.DEPLOYMENT_MODE_CREATE) + + s3_client = boto3.client("s3", region_name=region_name) + default_bucket = mfs._get_default_s3_bucket(region_name) + object_names_before_replacement = [ + entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]] + endpoint_configs_before_replacement = [ + config["EndpointConfigName"] for config in + sagemaker_client.list_endpoint_configs()["EndpointConfigs"]] + models_before_replacement = [ + model["ModelName"] for model in sagemaker_client.list_models()["Models"]] + + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=pretrained_model.run_id, artifact_path=pretrained_model.model_path) + sk_model = mlflow.sklearn.load_model(model_uri=model_uri) + new_artifact_path = "model" + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=sk_model, artifact_path=new_artifact_path) + new_model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, artifact_path=new_artifact_path) + mfs.deploy(app_name=app_name, + model_uri=new_model_uri, + mode=mfs.DEPLOYMENT_MODE_REPLACE, + archive=True, + synchronous=True) + + object_names_after_replacement = [ + entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]] + endpoint_configs_after_replacement = [ + config["EndpointConfigName"] for config in + sagemaker_client.list_endpoint_configs()["EndpointConfigs"]] + models_after_replacement = [ + model["ModelName"] for model in sagemaker_client.list_models()["Models"]] + assert all([object_name in object_names_after_replacement + for object_name in object_names_before_replacement]) + assert all([endpoint_config in endpoint_configs_after_replacement + for endpoint_config in endpoint_configs_before_replacement]) + assert all([model in models_after_replacement for model in models_before_replacement]) diff --git a/tests/sagemaker/test_model_export.py b/tests/sagemaker/test_model_export.py deleted file mode 100644 index 43fc8fc956e14..0000000000000 --- a/tests/sagemaker/test_model_export.py +++ /dev/null @@ -1,66 +0,0 @@ -from __future__ import print_function - -import os -import pickle -import tempfile -import unittest - -import sklearn.datasets as datasets -import sklearn.linear_model as glm - -import numpy as np -import pandas as pd -import pytest - -from mlflow.utils.file_utils import TempDir -from mlflow import pyfunc - -from mlflow.utils.environment import _mlflow_conda_env - -from tests.helper_functions import score_model_in_sagemaker_docker_container - - -class TestModelExport(unittest.TestCase): - def setUp(self): - self._tmp = tempfile.mkdtemp() - iris = datasets.load_iris() - self._X = iris.data[:, :2] # we only take the first two features. - self._y = iris.target - self._iris_df = pd.DataFrame(self._X, columns=iris.feature_names[:2]) - self._linear_lr = glm.LogisticRegression() - self._linear_lr.fit(self._X, self._y) - self._linear_lr_predict = self._linear_lr.predict(self._X) - os.environ["LC_ALL"] = "en_US.UTF-8" - os.environ["LANG"] = "en_US.UTF-8" - - @pytest.mark.large - def test_model_export(self): - path_to_remove = None - try: - with TempDir(chdr=True, remove_on_exit=False) as tmp: - path_to_remove = tmp._path - # NOTE: Changed dir to temp dir and use relative paths to get around the way temp - # dirs are handled in python. - model_pkl = tmp.path("model.pkl") - with open(model_pkl, "wb") as f: - pickle.dump(self._linear_lr, f) - input_path = tmp.path("input_model") - conda_env = "conda.env" - pyfunc.save_model(input_path, loader_module="mlflow.sklearn", - data_path=model_pkl, - conda_env=_mlflow_conda_env(tmp.path(conda_env))) - xpred = score_model_in_sagemaker_docker_container(input_path, self._iris_df) - print('expected', self._linear_lr_predict) - print('actual ', xpred) - np.testing.assert_array_equal(self._linear_lr_predict, xpred) - finally: - if path_to_remove: - try: - import shutil - shutil.rmtree(path_to_remove) - except OSError: - print("Failed to remove", path_to_remove) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/server/test_handlers.py b/tests/server/test_handlers.py index 370b189e99b3b..004e9dd406553 100644 --- a/tests/server/test_handlers.py +++ b/tests/server/test_handlers.py @@ -5,10 +5,12 @@ from mlflow.entities import ViewType from mlflow.exceptions import MlflowException -from mlflow.protos.databricks_pb2 import INTERNAL_ERROR, ErrorCode +from mlflow.protos.databricks_pb2 import INTERNAL_ERROR, INVALID_PARAMETER_VALUE, ErrorCode from mlflow.server.handlers import get_endpoints, _create_experiment, _get_request_message, \ - _search_runs, catch_mlflow_exception + _search_runs, _log_batch, catch_mlflow_exception +from mlflow.store.abstract_store import PagedList from mlflow.protos.service_pb2 import CreateExperiment, SearchRuns +from mlflow.utils.validation import MAX_BATCH_LOG_REQUEST_SIZE @pytest.fixture() @@ -17,6 +19,12 @@ def mock_get_request_message(): yield m +@pytest.fixture() +def mock_get_request_json(): + with mock.patch('mlflow.server.handlers._get_request_json') as m: + yield m + + @pytest.fixture() def mock_store(): with mock.patch('mlflow.server.handlers._get_store') as m: @@ -28,7 +36,7 @@ def mock_store(): def test_get_endpoints(): endpoints = get_endpoints() create_experiment_endpoint = [e for e in endpoints if e[1] == _create_experiment] - assert len(create_experiment_endpoint) == 2 + assert len(create_experiment_endpoint) == 4 def test_can_parse_json(): @@ -72,12 +80,23 @@ def test_search_runs_default_view_type(mock_get_request_message, mock_store): """ Search Runs default view type is filled in as ViewType.ACTIVE_ONLY """ - mock_get_request_message.return_value = SearchRuns(experiment_ids=[0], anded_expressions=[]) + mock_get_request_message.return_value = SearchRuns(experiment_ids=["0"]) + mock_store.search_runs.return_value = PagedList([], None) _search_runs() args, _ = mock_store.search_runs.call_args assert args[2] == ViewType.ACTIVE_ONLY +def test_log_batch_api_req(mock_get_request_json): + mock_get_request_json.return_value = "a" * (MAX_BATCH_LOG_REQUEST_SIZE + 1) + response = _log_batch() + assert response.status_code == 400 + json_response = json.loads(response.get_data()) + assert json_response["error_code"] == ErrorCode.Name(INVALID_PARAMETER_VALUE) + assert ("Batched logging API requests must be at most %s bytes" % MAX_BATCH_LOG_REQUEST_SIZE + in json_response["message"]) + + def test_catch_mlflow_exception(): @catch_mlflow_exception def test_handler(): diff --git a/tests/sklearn/test_sklearn_model_export.py b/tests/sklearn/test_sklearn_model_export.py index 44306deab4f94..139b7f04b1e05 100644 --- a/tests/sklearn/test_sklearn_model_export.py +++ b/tests/sklearn/test_sklearn_model_export.py @@ -1,86 +1,399 @@ from __future__ import print_function +import sys import os import pickle -import tempfile -import unittest +import pytest +import yaml +import json +from collections import namedtuple import numpy as np +import pandas as pd +import pandas.testing +import sklearn import sklearn.datasets as datasets import sklearn.linear_model as glm import sklearn.neighbors as knn +from sklearn.pipeline import Pipeline as SKPipeline +from sklearn.preprocessing import FunctionTransformer as SKFunctionTransformer -from mlflow import sklearn, pyfunc -import mlflow +import mlflow.sklearn +import mlflow.utils +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server +from mlflow import pyfunc +from mlflow.exceptions import MlflowException +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE from mlflow.models import Model -from mlflow.tracking.utils import _get_model_log_dir -from mlflow.utils.file_utils import TempDir +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.tracking.artifact_utils import _download_artifact_from_uri from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.file_utils import TempDir +from mlflow.utils.model_utils import _get_flavor_configuration + +from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import +from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import +from tests.helper_functions import score_model_in_sagemaker_docker_container + +ModelWithData = namedtuple("ModelWithData", ["model", "inference_data"]) + + +@pytest.fixture(scope="session") +def sklearn_knn_model(): + iris = datasets.load_iris() + X = iris.data[:, :2] # we only take the first two features. + y = iris.target + knn_model = knn.KNeighborsClassifier() + knn_model.fit(X, y) + return ModelWithData(model=knn_model, inference_data=X) + + +@pytest.fixture(scope="session") +def sklearn_logreg_model(): + iris = datasets.load_iris() + X = iris.data[:, :2] # we only take the first two features. + y = iris.target + linear_lr = glm.LogisticRegression() + linear_lr.fit(X, y) + return ModelWithData(model=linear_lr, inference_data=X) + + +@pytest.fixture(scope="session") +def sklearn_custom_transformer_model(sklearn_knn_model): + def transform(vec): + print("Invoking custom transformer!") + return vec + 1 + + transformer = SKFunctionTransformer(transform, validate=True) + pipeline = SKPipeline([("custom_transformer", transformer), ("knn", sklearn_knn_model.model)]) + return ModelWithData(pipeline, inference_data=datasets.load_iris().data[:, :2]) + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(str(tmpdir), "model") + + +@pytest.fixture +def sklearn_custom_env(tmpdir): + conda_env = os.path.join(str(tmpdir), "conda_env.yml") + _mlflow_conda_env( + conda_env, + additional_conda_deps=["scikit-learn", "pytest"]) + return conda_env + + +@pytest.mark.large +def test_model_save_load(sklearn_knn_model, model_path): + knn_model = sklearn_knn_model.model + + mlflow.sklearn.save_model(sk_model=knn_model, path=model_path) + reloaded_knn_model = mlflow.sklearn.load_model(model_uri=model_path) + reloaded_knn_pyfunc = pyfunc.load_pyfunc(model_uri=model_path) + + np.testing.assert_array_equal( + knn_model.predict(sklearn_knn_model.inference_data), + reloaded_knn_model.predict(sklearn_knn_model.inference_data)) + + np.testing.assert_array_equal( + reloaded_knn_model.predict(sklearn_knn_model.inference_data), + reloaded_knn_pyfunc.predict(sklearn_knn_model.inference_data)) -def _load_pyfunc(path): - with open(path, "rb") as f: - return pickle.load(f) - - -class TestModelExport(unittest.TestCase): - def setUp(self): - self._tmp = tempfile.mkdtemp() - iris = datasets.load_iris() - self._X = iris.data[:, :2] # we only take the first two features. - self._y = iris.target - self._knn = knn.KNeighborsClassifier() - self._knn.fit(self._X, self._y) - self._knn_predict = self._knn.predict(self._X) - self._linear_lr = glm.LogisticRegression() - self._linear_lr.fit(self._X, self._y) - self._linear_lr_predict = self._linear_lr.predict(self._X) - - def test_model_save_load(self): - with TempDir(chdr=True, remove_on_exit=True) as tmp: - model_path = tmp.path("knn.pkl") - with open(model_path, "wb") as f: - pickle.dump(self._knn, f) - path = tmp.path("knn") - sklearn.save_model(self._knn, path=path) - x = sklearn.load_model(path) - xpred = x.predict(self._X) - np.testing.assert_array_equal(self._knn_predict, xpred) - # sklearn should also be stored as a valid pyfunc model - # test pyfunc compatibility - y = pyfunc.load_pyfunc(path) - ypred = y.predict(self._X) - np.testing.assert_array_equal(self._knn_predict, ypred) - - def test_model_log(self): - old_uri = mlflow.get_tracking_uri() - # should_start_run tests whether or not calling log_model() automatically starts a run. +@pytest.mark.large +def test_model_load_from_remote_uri_succeeds(sklearn_knn_model, model_path, mock_s3_bucket): + mlflow.sklearn.save_model(sk_model=sklearn_knn_model.model, path=model_path) + + artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) + artifact_path = "model" + artifact_repo = S3ArtifactRepository(artifact_root) + artifact_repo.log_artifacts(model_path, artifact_path=artifact_path) + + model_uri = artifact_root + "/" + artifact_path + reloaded_knn_model = mlflow.sklearn.load_model(model_uri=model_uri) + np.testing.assert_array_equal( + sklearn_knn_model.model.predict(sklearn_knn_model.inference_data), + reloaded_knn_model.predict(sklearn_knn_model.inference_data)) + + +@pytest.mark.large +def test_model_log(sklearn_logreg_model, model_path): + old_uri = mlflow.get_tracking_uri() + with TempDir(chdr=True, remove_on_exit=True) as tmp: for should_start_run in [False, True]: - with TempDir(chdr=True, remove_on_exit=True) as tmp: - try: - mlflow.set_tracking_uri("test") - if should_start_run: - mlflow.start_run() - artifact_path = "linear" - conda_env = os.path.join(tmp.path(), "conda_env.yaml") - _mlflow_conda_env(conda_env, additional_pip_deps=["sklearn"]) - sklearn.log_model(sk_model=self._linear_lr, - artifact_path=artifact_path, - conda_env=conda_env) - x = sklearn.load_model(artifact_path, run_id=mlflow.active_run().info.run_uuid) - model_path = _get_model_log_dir( - artifact_path, mlflow.active_run().info.run_uuid) - model_config = Model.load(os.path.join(model_path, "MLmodel")) - assert pyfunc.FLAVOR_NAME in model_config.flavors - assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME] - env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV] - assert os.path.exists(os.path.join(model_path, env_path)) - xpred = x.predict(self._X) - np.testing.assert_array_equal(self._linear_lr_predict, xpred) - finally: - mlflow.end_run() - mlflow.set_tracking_uri(old_uri) - - -if __name__ == '__main__': - unittest.main() + try: + mlflow.set_tracking_uri("test") + if should_start_run: + mlflow.start_run() + + artifact_path = "linear" + conda_env = os.path.join(tmp.path(), "conda_env.yaml") + _mlflow_conda_env(conda_env, additional_pip_deps=["scikit-learn"]) + + mlflow.sklearn.log_model( + sk_model=sklearn_logreg_model.model, + artifact_path=artifact_path, + conda_env=conda_env) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + reloaded_logreg_model = mlflow.sklearn.load_model(model_uri=model_uri) + np.testing.assert_array_equal( + sklearn_logreg_model.model.predict(sklearn_logreg_model.inference_data), + reloaded_logreg_model.predict(sklearn_logreg_model.inference_data)) + + model_path = _download_artifact_from_uri(artifact_uri=model_uri) + model_config = Model.load(os.path.join(model_path, "MLmodel")) + assert pyfunc.FLAVOR_NAME in model_config.flavors + assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME] + env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV] + assert os.path.exists(os.path.join(model_path, env_path)) + + finally: + mlflow.end_run() + mlflow.set_tracking_uri(old_uri) + + +@pytest.mark.large +def test_custom_transformer_can_be_saved_and_loaded_with_cloudpickle_format( + sklearn_custom_transformer_model, tmpdir): + custom_transformer_model = sklearn_custom_transformer_model.model + + # Because the model contains a customer transformer that is not defined at the top level of the + # current test module, we expect pickle to fail when attempting to serialize it. In contrast, + # we expect cloudpickle to successfully locate the transformer definition and serialize the + # model successfully. + if sys.version_info >= (3, 0): + expect_exception_context = pytest.raises(AttributeError) + else: + expect_exception_context = pytest.raises(pickle.PicklingError) + with expect_exception_context: + pickle_format_model_path = os.path.join(str(tmpdir), "pickle_model") + mlflow.sklearn.save_model(sk_model=custom_transformer_model, + path=pickle_format_model_path, + serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE) + + cloudpickle_format_model_path = os.path.join(str(tmpdir), "cloud_pickle_model") + mlflow.sklearn.save_model(sk_model=custom_transformer_model, + path=cloudpickle_format_model_path, + serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE) + + reloaded_custom_transformer_model = mlflow.sklearn.load_model( + model_uri=cloudpickle_format_model_path) + + np.testing.assert_array_equal( + custom_transformer_model.predict(sklearn_custom_transformer_model.inference_data), + reloaded_custom_transformer_model.predict( + sklearn_custom_transformer_model.inference_data)) + + +@pytest.mark.large +def test_model_save_persists_specified_conda_env_in_mlflow_model_directory( + sklearn_knn_model, model_path, sklearn_custom_env): + mlflow.sklearn.save_model( + sk_model=sklearn_knn_model.model, path=model_path, conda_env=sklearn_custom_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != sklearn_custom_env + + with open(sklearn_custom_env, "r") as f: + sklearn_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == sklearn_custom_env_parsed + + +@pytest.mark.large +def test_model_save_accepts_conda_env_as_dict(sklearn_knn_model, model_path): + conda_env = dict(mlflow.sklearn.get_default_conda_env()) + conda_env["dependencies"].append("pytest") + mlflow.sklearn.save_model( + sk_model=sklearn_knn_model.model, path=model_path, conda_env=conda_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == conda_env + + +@pytest.mark.large +def test_model_log_persists_specified_conda_env_in_mlflow_model_directory( + sklearn_knn_model, sklearn_custom_env): + artifact_path = "model" + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=sklearn_knn_model.model, + artifact_path=artifact_path, + conda_env=sklearn_custom_env) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + model_path = _download_artifact_from_uri(artifact_uri=model_uri) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != sklearn_custom_env + + with open(sklearn_custom_env, "r") as f: + sklearn_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == sklearn_custom_env_parsed + + +@pytest.mark.large +def test_model_save_throws_exception_if_serialization_format_is_unrecognized( + sklearn_knn_model, model_path): + with pytest.raises(MlflowException) as exc: + mlflow.sklearn.save_model(sk_model=sklearn_knn_model.model, path=model_path, + serialization_format="not a valid format") + assert exc.error_code == INVALID_PARAMETER_VALUE + + # The unsupported serialization format should have been detected prior to the execution of + # any directory creation or state-mutating persistence logic that would prevent a second + # serialization call with the same model path from succeeding + assert not os.path.exists(model_path) + mlflow.sklearn.save_model(sk_model=sklearn_knn_model.model, path=model_path) + + +@pytest.mark.large +def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies( + sklearn_knn_model, model_path): + knn_model = sklearn_knn_model.model + mlflow.sklearn.save_model(sk_model=knn_model, path=model_path, conda_env=None, + serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.sklearn.get_default_conda_env() + + +@pytest.mark.large +def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies( + sklearn_knn_model): + artifact_path = "model" + knn_model = sklearn_knn_model.model + with mlflow.start_run(): + mlflow.sklearn.log_model(sk_model=knn_model, artifact_path=artifact_path, conda_env=None, + serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_PICKLE) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + model_path = _download_artifact_from_uri(artifact_uri=model_uri) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.sklearn.get_default_conda_env() + + +@pytest.mark.large +def test_model_save_uses_cloudpickle_serialization_format_by_default(sklearn_knn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_knn_model.model, path=model_path, conda_env=None) + + sklearn_conf = _get_flavor_configuration( + model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME) + assert "serialization_format" in sklearn_conf + assert sklearn_conf["serialization_format"] == mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE + + +@pytest.mark.large +def test_model_log_uses_cloudpickle_serialization_format_by_default(sklearn_knn_model): + artifact_path = "model" + with mlflow.start_run(): + mlflow.sklearn.log_model( + sk_model=sklearn_knn_model.model, artifact_path=artifact_path, conda_env=None) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + model_path = _download_artifact_from_uri(artifact_uri=model_uri) + sklearn_conf = _get_flavor_configuration( + model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME) + assert "serialization_format" in sklearn_conf + assert sklearn_conf["serialization_format"] == mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE + + +@pytest.mark.large +def test_model_save_with_cloudpickle_format_adds_cloudpickle_to_conda_environment( + sklearn_knn_model, model_path): + mlflow.sklearn.save_model( + sk_model=sklearn_knn_model.model, + path=model_path, + conda_env=None, + serialization_format=mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE) + + sklearn_conf = _get_flavor_configuration( + model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME) + assert "serialization_format" in sklearn_conf + assert sklearn_conf["serialization_format"] == mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + + pip_deps = [dependency for dependency in saved_conda_env_parsed["dependencies"] + if type(dependency) == dict and "pip" in dependency] + assert len(pip_deps) == 1 + assert any(["cloudpickle" in pip_dep for pip_dep in pip_deps[0]["pip"]]) + + +@pytest.mark.large +def test_model_save_without_cloudpickle_format_does_not_add_cloudpickle_to_conda_environment( + sklearn_knn_model, model_path): + non_cloudpickle_serialization_formats = list(mlflow.sklearn.SUPPORTED_SERIALIZATION_FORMATS) + non_cloudpickle_serialization_formats.remove(mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE) + + for serialization_format in non_cloudpickle_serialization_formats: + mlflow.sklearn.save_model( + sk_model=sklearn_knn_model.model, + path=model_path, + conda_env=None, + serialization_format=serialization_format) + + sklearn_conf = _get_flavor_configuration( + model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME) + assert "serialization_format" in sklearn_conf + assert sklearn_conf["serialization_format"] == serialization_format + + pyfunc_conf = _get_flavor_configuration( + model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert all(["cloudpickle" not in dependency + for dependency in saved_conda_env_parsed["dependencies"]]) + + +@pytest.mark.release +def test_sagemaker_docker_model_scoring_with_default_conda_env(sklearn_knn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_knn_model.model, path=model_path, conda_env=None) + reloaded_knn_pyfunc = pyfunc.load_pyfunc(model_uri=model_path) + + inference_df = pd.DataFrame(sklearn_knn_model.inference_data) + scoring_response = score_model_in_sagemaker_docker_container( + model_uri=model_path, + data=inference_df, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + flavor=mlflow.pyfunc.FLAVOR_NAME) + deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) + + pandas.testing.assert_frame_equal( + deployed_model_preds, + pd.DataFrame(reloaded_knn_pyfunc.predict(inference_df)), + check_dtype=False, + check_less_precise=6) diff --git a/tests/spark/test_spark_model_export.py b/tests/spark/test_spark_model_export.py index 8db58ce35d7d0..2e4a0ab366059 100644 --- a/tests/spark/test_spark_model_export.py +++ b/tests/spark/test_spark_model_export.py @@ -1,13 +1,14 @@ import os import json +import numpy as np import pandas as pd +import pandas.testing import pyspark from pyspark.ml.classification import LogisticRegression from pyspark.ml.feature import VectorAssembler from pyspark.ml.pipeline import Pipeline from pyspark.ml.wrapper import JavaModel -from pyspark.ml.util import _jvm from pyspark.version import __version__ as pyspark_version from pyspark.sql import SQLContext from pyspark.sql.types import DateType @@ -15,23 +16,33 @@ from sklearn import datasets import shutil from collections import namedtuple +import yaml import mlflow +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server import mlflow.tracking from mlflow import active_run, pyfunc, mleap from mlflow import spark as sparkm +from mlflow.exceptions import MlflowException from mlflow.models import Model - +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.tracking.artifact_utils import _download_artifact_from_uri from mlflow.utils.environment import _mlflow_conda_env -from tests.helper_functions import score_model_in_sagemaker_docker_container +from mlflow.utils.file_utils import TempDir +from mlflow.utils.model_utils import _get_flavor_configuration +from tests.helper_functions import score_model_in_sagemaker_docker_container from tests.pyfunc.test_spark import score_model_as_udf +from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import +from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import @pytest.fixture -def spark_conda_env(tmpdir): +def spark_custom_env(tmpdir): conda_env = os.path.join(str(tmpdir), "conda_env.yml") - _mlflow_conda_env(conda_env, additional_pip_deps=["pyspark=={}".format(pyspark_version)]) + _mlflow_conda_env( + conda_env, + additional_conda_deps=["pyspark", "pytest"]) return conda_env @@ -47,33 +58,72 @@ def spark_conda_env(tmpdir): def spark_context(): conf = pyspark.SparkConf() conf.set(key="spark.jars.packages", - value='ml.combust.mleap:mleap-spark-base_2.11:0.10.0,' - 'ml.combust.mleap:mleap-spark_2.11:0.10.0') + value='ml.combust.mleap:mleap-spark-base_2.11:0.12.0,' + 'ml.combust.mleap:mleap-spark_2.11:0.12.0') conf.set(key="spark_session.python.worker.reuse", value=True) - sc = pyspark.SparkContext(master="local-cluster[2, 1, 1024]", conf=conf).getOrCreate() + spark = pyspark.sql.SparkSession.builder\ + .config(conf=conf)\ + .master("local-cluster[2, 1, 1024]")\ + .getOrCreate() + sc = spark.sparkContext return sc @pytest.fixture(scope="session") -def spark_model_iris(spark_context): +def iris_df(spark_context): iris = datasets.load_iris() X = iris.data # we only take the first two features. y = iris.target feature_names = ["0", "1", "2", "3"] - pandas_df = pd.DataFrame(X, columns=feature_names) # to make spark_udf work - pandas_df['label'] = pd.Series(y) + iris_pandas_df = pd.DataFrame(X, columns=feature_names) # to make spark_udf work + iris_pandas_df['label'] = pd.Series(y) spark_session = pyspark.sql.SparkSession(spark_context) - spark_df = spark_session.createDataFrame(pandas_df) + iris_spark_df = spark_session.createDataFrame(iris_pandas_df) + return feature_names, iris_pandas_df, iris_spark_df + + +@pytest.fixture(scope="session") +def spark_model_iris(iris_df): + feature_names, iris_pandas_df, iris_spark_df = iris_df assembler = VectorAssembler(inputCols=feature_names, outputCol="features") lr = LogisticRegression(maxIter=50, regParam=0.1, elasticNetParam=0.8) pipeline = Pipeline(stages=[assembler, lr]) # Fit the model - model = pipeline.fit(spark_df) - preds_df = model.transform(spark_df) + model = pipeline.fit(iris_spark_df) + preds_df = model.transform(iris_spark_df) preds = [x.prediction for x in preds_df.select("prediction").collect()] return SparkModelWithData(model=model, - spark_df=spark_df, - pandas_df=pandas_df, + spark_df=iris_spark_df, + pandas_df=iris_pandas_df, + predictions=preds) + + +@pytest.fixture(scope="session") +def spark_model_transformer(iris_df): + feature_names, iris_pandas_df, iris_spark_df = iris_df + assembler = VectorAssembler(inputCols=feature_names, outputCol="features") + # Fit the model + preds_df = assembler.transform(iris_spark_df) + preds = [x.features for x in preds_df.select("features").collect()] + return SparkModelWithData(model=assembler, + spark_df=iris_spark_df, + pandas_df=iris_pandas_df, + predictions=preds) + + +@pytest.fixture(scope="session") +def spark_model_estimator(iris_df, spark_context): + feature_names, iris_pandas_df, iris_spark_df = iris_df + assembler = VectorAssembler(inputCols=feature_names, outputCol="features") + features_df = assembler.transform(iris_spark_df) + lr = LogisticRegression(maxIter=50, regParam=0.1, elasticNetParam=0.8) + # Fit the model + model = lr.fit(features_df) + preds_df = model.transform(features_df) + preds = [x.prediction for x in preds_df.select("prediction").collect()] + return SparkModelWithData(model=model, + spark_df=features_df, + pandas_df=iris_pandas_df, predictions=preds) @@ -82,6 +132,7 @@ def model_path(tmpdir): return str(tmpdir.mkdir("model")) +@pytest.mark.large def test_hadoop_filesystem(tmpdir): # copy local dir to and back from HadoopFS and make sure the results match from mlflow.spark import _HadoopFileSystem as FS @@ -121,11 +172,12 @@ def test_hadoop_filesystem(tmpdir): assert not os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix -def test_model_export(spark_model_iris, model_path, spark_conda_env): +@pytest.mark.large +def test_model_export(spark_model_iris, model_path, spark_custom_env): sparkm.save_model(spark_model_iris.model, path=model_path, - conda_env=spark_conda_env) + conda_env=spark_custom_env) # 1. score and compare reloaded sparkml model - reloaded_model = sparkm.load_model(path=model_path) + reloaded_model = sparkm.load_model(model_uri=model_path) preds_df = reloaded_model.transform(spark_model_iris.spark_df) preds1 = [x.prediction for x in preds_df.select("prediction").collect()] assert spark_model_iris.predictions == preds1 @@ -134,30 +186,85 @@ def test_model_export(spark_model_iris, model_path, spark_conda_env): preds2 = m.predict(spark_model_iris.pandas_df) assert spark_model_iris.predictions == preds2 # 3. score and compare reloaded pyfunc Spark udf - preds3 = score_model_as_udf(model_path, run_id=None, pandas_df=spark_model_iris.pandas_df) + preds3 = score_model_as_udf(model_uri=model_path, pandas_df=spark_model_iris.pandas_df) assert spark_model_iris.predictions == preds3 assert os.path.exists(sparkm.DFS_TMP) @pytest.mark.large -def test_model_deployment(spark_model_iris, model_path, spark_conda_env): +def test_estimator_model_export(spark_model_estimator, model_path, spark_custom_env): + sparkm.save_model(spark_model_estimator.model, path=model_path, conda_env=spark_custom_env) + # score and compare the reloaded sparkml model + reloaded_model = sparkm.load_model(model_uri=model_path) + preds_df = reloaded_model.transform(spark_model_estimator.spark_df) + preds = [x.prediction for x in preds_df.select("prediction").collect()] + assert spark_model_estimator.predictions == preds + # 2. score and compare reloaded pyfunc + m = pyfunc.load_pyfunc(model_path) + preds2 = m.predict(spark_model_estimator.spark_df.toPandas()) + assert spark_model_estimator.predictions == preds2 + + +@pytest.mark.large +def test_transformer_model_export(spark_model_transformer, model_path, spark_custom_env): + with pytest.raises(MlflowException) as e: + sparkm.save_model( + spark_model_transformer.model, + path=model_path, + conda_env=spark_custom_env) + assert "Cannot serialize this model" in e.value.message + + +# TODO(czumar): Remark this test as "large" instead of "release" after SageMaker docker +# container build issues have been debugged +# @pytest.mark.large +@pytest.mark.release +def test_model_deployment(spark_model_iris, model_path, spark_custom_env): sparkm.save_model(spark_model_iris.model, path=model_path, - conda_env=spark_conda_env, + conda_env=spark_custom_env, # Test both spark ml and mleap sample_input=spark_model_iris.spark_df) # 1. score and compare pyfunc deployed in Sagemaker docker container - preds1 = score_model_in_sagemaker_docker_container(model_path=model_path, - data=spark_model_iris.pandas_df, - flavor=mlflow.pyfunc.FLAVOR_NAME) - assert spark_model_iris.predictions == preds1 + scoring_response_1 = score_model_in_sagemaker_docker_container( + model_uri=model_path, + data=spark_model_iris.pandas_df, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + flavor=mlflow.pyfunc.FLAVOR_NAME) + np.testing.assert_array_almost_equal( + spark_model_iris.predictions, + np.array(json.loads(scoring_response_1.content)), + decimal=4) # 2. score and compare mleap deployed in Sagemaker docker container - preds2 = score_model_in_sagemaker_docker_container(model_path=model_path, - data=spark_model_iris.pandas_df, - flavor=mlflow.mleap.FLAVOR_NAME) - assert spark_model_iris.predictions == preds2 + scoring_response_2 = score_model_in_sagemaker_docker_container( + model_uri=model_path, + data=spark_model_iris.pandas_df.to_json(orient="split"), + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON, + flavor=mlflow.mleap.FLAVOR_NAME) + np.testing.assert_array_almost_equal( + spark_model_iris.predictions, + np.array(json.loads(scoring_response_2.content)), + decimal=4) + + +@pytest.mark.release +def test_sagemaker_docker_model_scoring_with_default_conda_env(spark_model_iris, model_path): + sparkm.save_model(spark_model_iris.model, path=model_path, conda_env=None) + + scoring_response = score_model_in_sagemaker_docker_container( + model_uri=model_path, + data=spark_model_iris.pandas_df, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON, + flavor=mlflow.pyfunc.FLAVOR_NAME) + deployed_model_preds = np.array(json.loads(scoring_response.content)) + + np.testing.assert_array_almost_equal( + deployed_model_preds, + spark_model_iris.predictions, + decimal=4) +@pytest.mark.large def test_sparkml_model_log(tmpdir, spark_model_iris): # Print the coefficients and intercept for multinomial logistic regression old_tracking_uri = mlflow.get_tracking_uri() @@ -175,10 +282,12 @@ def test_sparkml_model_log(tmpdir, spark_model_iris): cnt += 1 sparkm.log_model(artifact_path=artifact_path, spark_model=spark_model_iris.model, dfs_tmpdir=dfs_tmp_dir) - run_id = active_run().info.run_uuid + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + # test reloaded model - reloaded_model = sparkm.load_model(artifact_path, run_id=run_id, - dfs_tmpdir=dfs_tmp_dir) + reloaded_model = sparkm.load_model(model_uri=model_uri, dfs_tmpdir=dfs_tmp_dir) preds_df = reloaded_model.transform(spark_model_iris.spark_df) preds = [x.prediction for x in preds_df.select("prediction").collect()] assert spark_model_iris.predictions == preds @@ -190,19 +299,182 @@ def test_sparkml_model_log(tmpdir, spark_model_iris): shutil.rmtree(tracking_dir) +@pytest.mark.large +def test_sparkml_estimator_model_log(tmpdir, spark_model_estimator): + # Print the coefficients and intercept for multinomial logistic regression + old_tracking_uri = mlflow.get_tracking_uri() + cnt = 0 + # should_start_run tests whether or not calling log_model() automatically starts a run. + for should_start_run in [False, True]: + for dfs_tmp_dir in [None, os.path.join(str(tmpdir), "test")]: + print("should_start_run =", should_start_run, "dfs_tmp_dir =", dfs_tmp_dir) + try: + tracking_dir = os.path.abspath(str(tmpdir.join("mlruns"))) + mlflow.set_tracking_uri("file://%s" % tracking_dir) + if should_start_run: + mlflow.start_run() + artifact_path = "model%d" % cnt + cnt += 1 + sparkm.log_model( + artifact_path=artifact_path, + spark_model=spark_model_estimator.model, + dfs_tmpdir=dfs_tmp_dir) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + # test reloaded model + reloaded_model = sparkm.load_model(model_uri=model_uri, dfs_tmpdir=dfs_tmp_dir) + preds_df = reloaded_model.transform(spark_model_estimator.spark_df) + preds = [x.prediction for x in preds_df.select("prediction").collect()] + assert spark_model_estimator.predictions == preds + finally: + mlflow.end_run() + mlflow.set_tracking_uri(old_tracking_uri) + x = dfs_tmp_dir or sparkm.DFS_TMP + shutil.rmtree(x) + shutil.rmtree(tracking_dir) + + +@pytest.mark.large +def test_sparkml_model_log_invalid_args(spark_model_transformer, model_path): + with pytest.raises(MlflowException) as e: + sparkm.log_model( + spark_model=spark_model_transformer.model, + artifact_path="model0") + assert "Cannot serialize this model" in e.value.message + + +@pytest.mark.large +def test_sparkml_model_load_from_remote_uri_succeeds(spark_model_iris, model_path, mock_s3_bucket): + sparkm.save_model(spark_model=spark_model_iris.model, path=model_path) + + artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) + artifact_path = "model" + artifact_repo = S3ArtifactRepository(artifact_root) + artifact_repo.log_artifacts(model_path, artifact_path=artifact_path) + + model_uri = artifact_root + "/" + artifact_path + reloaded_model = sparkm.load_model(model_uri=model_uri) + preds_df = reloaded_model.transform(spark_model_iris.spark_df) + preds = [x.prediction for x in preds_df.select("prediction").collect()] + assert spark_model_iris.predictions == preds + + +@pytest.mark.large +def test_sparkml_model_save_persists_specified_conda_env_in_mlflow_model_directory( + spark_model_iris, model_path, spark_custom_env): + sparkm.save_model(spark_model=spark_model_iris.model, + path=model_path, + conda_env=spark_custom_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != spark_custom_env + + with open(spark_custom_env, "r") as f: + spark_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == spark_custom_env_parsed + + +@pytest.mark.large +def test_sparkml_model_save_accepts_conda_env_as_dict(spark_model_iris, model_path): + conda_env = dict(mlflow.spark.get_default_conda_env()) + conda_env["dependencies"].append("pytest") + sparkm.save_model(spark_model=spark_model_iris.model, + path=model_path, + conda_env=conda_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == conda_env + + +@pytest.mark.large +def test_sparkml_model_log_persists_specified_conda_env_in_mlflow_model_directory( + spark_model_iris, model_path, spark_custom_env): + artifact_path = "model" + with mlflow.start_run(): + sparkm.log_model( + spark_model=spark_model_iris.model, + artifact_path=artifact_path, + conda_env=spark_custom_env) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + model_path = _download_artifact_from_uri(artifact_uri=model_uri) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != spark_custom_env + + with open(spark_custom_env, "r") as f: + spark_custom_env_parsed = yaml.safe_load(f) + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == spark_custom_env_parsed + + +@pytest.mark.large +def test_sparkml_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies( + spark_model_iris, model_path): + sparkm.save_model(spark_model=spark_model_iris.model, path=model_path, conda_env=None) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == sparkm.get_default_conda_env() + + +@pytest.mark.large +def test_sparkml_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies( + spark_model_iris): + artifact_path = "model" + with mlflow.start_run(): + sparkm.log_model( + spark_model=spark_model_iris.model, artifact_path=artifact_path, conda_env=None) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + model_path = _download_artifact_from_uri(artifact_uri=model_uri) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == sparkm.get_default_conda_env() + + +@pytest.mark.large def test_mleap_model_log(spark_model_iris): artifact_path = "model" - sparkm.log_model(spark_model=spark_model_iris.model, - sample_input=spark_model_iris.spark_df, - artifact_path=artifact_path) - rid = active_run().info.run_uuid - model_path = mlflow.tracking.utils._get_model_log_dir(model_name=artifact_path, run_id=rid) + with mlflow.start_run(): + sparkm.log_model(spark_model=spark_model_iris.model, + sample_input=spark_model_iris.spark_df, + artifact_path=artifact_path) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + model_path = _download_artifact_from_uri(artifact_uri=model_uri) config_path = os.path.join(model_path, "MLmodel") mlflow_model = Model.load(config_path) assert sparkm.FLAVOR_NAME in mlflow_model.flavors assert mleap.FLAVOR_NAME in mlflow_model.flavors +@pytest.mark.large def test_mleap_output_json_format(spark_model_iris, model_path): mlflow_model = Model() mleap.save_model(spark_model=spark_model_iris.model, @@ -221,6 +493,7 @@ def test_mleap_output_json_format(spark_model_iris, model_path): assert "name" in json_schema["fields"][0] +@pytest.mark.large def test_spark_module_model_save_with_mleap_and_unsupported_transformer_raises_exception( spark_model_iris, model_path): class CustomTransformer(JavaModel): @@ -230,14 +503,52 @@ def _transform(self, dataset): unsupported_pipeline = Pipeline(stages=[CustomTransformer()]) unsupported_model = unsupported_pipeline.fit(spark_model_iris.spark_df) - with pytest.raises(mleap.MLeapSerializationException): + with pytest.raises(ValueError): sparkm.save_model(spark_model=unsupported_model, path=model_path, sample_input=spark_model_iris.spark_df) -def test_mleap_module_model_save_with_valid_sample_input_produces_mleap_flavor( +@pytest.mark.large +def test_spark_module_model_save_with_relative_path_and_valid_sample_input_produces_mleap_flavor( + spark_model_iris): + with TempDir(chdr=True) as tmp: + model_path = os.path.basename(tmp.path("model")) + mlflow_model = Model() + sparkm.save_model(spark_model=spark_model_iris.model, + path=model_path, + sample_input=spark_model_iris.spark_df, + mlflow_model=mlflow_model) + assert mleap.FLAVOR_NAME in mlflow_model.flavors + + config_path = os.path.join(model_path, "MLmodel") + assert os.path.exists(config_path) + config = Model.load(config_path) + assert mleap.FLAVOR_NAME in config.flavors + + +@pytest.mark.large +def test_mleap_module_model_save_with_relative_path_and_valid_sample_input_produces_mleap_flavor( + spark_model_iris): + with TempDir(chdr=True) as tmp: + model_path = os.path.basename(tmp.path("model")) + mlflow_model = Model() + mleap.save_model(spark_model=spark_model_iris.model, + path=model_path, + sample_input=spark_model_iris.spark_df, + mlflow_model=mlflow_model) + assert mleap.FLAVOR_NAME in mlflow_model.flavors + + config_path = os.path.join(model_path, "MLmodel") + assert os.path.exists(config_path) + config = Model.load(config_path) + assert mleap.FLAVOR_NAME in config.flavors + + +@pytest.mark.large +def test_mleap_module_model_save_with_absolute_path_and_valid_sample_input_produces_mleap_flavor( spark_model_iris, model_path): + model_path = os.path.abspath(model_path) mlflow_model = Model() mleap.save_model(spark_model=spark_model_iris.model, path=model_path, @@ -251,6 +562,7 @@ def test_mleap_module_model_save_with_valid_sample_input_produces_mleap_flavor( assert mleap.FLAVOR_NAME in config.flavors +@pytest.mark.large def test_mleap_module_model_save_with_invalid_sample_input_type_raises_exception( spark_model_iris, model_path): with pytest.raises(Exception): @@ -260,6 +572,7 @@ def test_mleap_module_model_save_with_invalid_sample_input_type_raises_exception sample_input=invalid_input) +@pytest.mark.large def test_mleap_module_model_save_with_unsupported_transformer_raises_serialization_exception( spark_model_iris, model_path): class CustomTransformer(JavaModel): @@ -275,6 +588,7 @@ def _transform(self, dataset): sample_input=spark_model_iris.spark_df) +@pytest.mark.large def test_save_with_sample_input_containing_unsupported_data_type_raises_serialization_exception( spark_context, model_path): sql_context = SQLContext(spark_context) diff --git a/tests/store/dump_schema.py b/tests/store/dump_schema.py new file mode 100644 index 0000000000000..8c8eb330d9c8f --- /dev/null +++ b/tests/store/dump_schema.py @@ -0,0 +1,42 @@ +"""Script that generates a dump of the MLflow tracking database schema""" +import os +import shutil +import sys + +import sqlalchemy +from sqlalchemy.schema import CreateTable, MetaData +import tempfile + +from mlflow.store.sqlalchemy_store import SqlAlchemyStore + + +def dump_db_schema(db_url, dst_file): + engine = sqlalchemy.create_engine(db_url) + created_tables_metadata = MetaData(bind=engine) + created_tables_metadata.reflect() + # Write out table schema as described in + # https://docs.sqlalchemy.org/en/13/faq/ + # metadata_schema.html#how-can-i-get-the-create-table-drop-table-output-as-a-string + schema = "".join([str(CreateTable(ti)) for ti in created_tables_metadata.sorted_tables]) + print("Writing database schema to %s" % dst_file) + with open(dst_file, "w") as handle: + handle.write(schema) + + +def dump_sqlalchemy_store_schema(dst_file): + db_tmpdir = tempfile.mkdtemp() + try: + path = os.path.join(db_tmpdir, "db_file") + db_url = "sqlite:///%s" % path + SqlAlchemyStore(db_url, db_tmpdir) + dump_db_schema(db_url, dst_file) + finally: + shutil.rmtree(db_tmpdir) + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("usage: python tests/store/dump_schema.py [destination_file]. " + "Dumps up-to-date database schema to the specified file.") + sys.exit(1) + dump_sqlalchemy_store_schema(sys.argv[1]) diff --git a/tests/store/test_abstract_store.py b/tests/store/test_abstract_store.py new file mode 100644 index 0000000000000..640028ad41d19 --- /dev/null +++ b/tests/store/test_abstract_store.py @@ -0,0 +1,132 @@ +import mock + +from mlflow.store import SEARCH_MAX_RESULTS_DEFAULT +from mlflow.store.abstract_store import AbstractStore +from mlflow.entities import ViewType + + +class AbstractStoreTestImpl(AbstractStore): + + def list_experiments(self, view_type=ViewType.ACTIVE_ONLY): + raise NotImplementedError() + + def create_experiment(self, name, artifact_location): + raise NotImplementedError() + + def get_experiment(self, experiment_id): + raise NotImplementedError() + + def delete_experiment(self, experiment_id): + raise NotImplementedError() + + def restore_experiment(self, experiment_id): + raise NotImplementedError() + + def rename_experiment(self, experiment_id, new_name): + raise NotImplementedError() + + def get_run(self, run_id): + raise NotImplementedError() + + def update_run_info(self, run_id, run_status, end_time): + raise NotImplementedError() + + def create_run(self, experiment_id, user_id, start_time, tags): + raise NotImplementedError() + + def delete_run(self, run_id): + raise NotImplementedError() + + def restore_run(self, run_id): + raise NotImplementedError() + + def get_metric_history(self, run_id, metric_key): + raise NotImplementedError() + + def _search_runs(self, experiment_ids, filter_string, run_view_type, max_results, order_by, + page_token): + raise NotImplementedError() + + def log_batch(self, run_id, metrics, params, tags): + raise NotImplementedError() + + +def test_get_experiment_by_name(): + experiments = [mock.Mock(), mock.Mock(), mock.Mock()] + # Configure name after mock creation as name is a reserved argument to Mock() + experiments[1].configure_mock(name="my experiment") + with mock.patch.object(AbstractStoreTestImpl, "list_experiments", return_value=experiments): + store = AbstractStoreTestImpl() + assert store.get_experiment_by_name("my experiment") == experiments[1] + store.list_experiments.assert_called_once_with(ViewType.ALL) + + +def test_get_experiment_by_name_missing(): + with mock.patch.object(AbstractStoreTestImpl, "list_experiments", return_value=[]): + store = AbstractStoreTestImpl() + assert store.get_experiment_by_name("my experiment") is None + store.list_experiments.assert_called_once_with(ViewType.ALL) + + +def test_log_metric(): + run_id = mock.Mock() + metric = mock.Mock() + + with mock.patch.object(AbstractStoreTestImpl, "log_batch"): + store = AbstractStoreTestImpl() + store.log_metric(run_id, metric) + store.log_batch.assert_called_once_with( + run_id, metrics=[metric], params=[], tags=[] + ) + + +def test_log_param(): + run_id = mock.Mock() + param = mock.Mock() + + with mock.patch.object(AbstractStoreTestImpl, "log_batch"): + store = AbstractStoreTestImpl() + store.log_param(run_id, param) + store.log_batch.assert_called_once_with( + run_id, metrics=[], params=[param], tags=[] + ) + + +def test_set_tag(): + run_id = mock.Mock() + tag = mock.Mock() + + with mock.patch.object(AbstractStoreTestImpl, "log_batch"): + store = AbstractStoreTestImpl() + store.set_tag(run_id, tag) + store.log_batch.assert_called_once_with( + run_id, metrics=[], params=[], tags=[tag] + ) + + +def test_list_run_infos(): + experiment_id = mock.Mock() + view_type = mock.Mock() + run_infos = [mock.Mock(), mock.Mock()] + runs = [mock.Mock(info=info) for info in run_infos] + + with mock.patch.object(AbstractStoreTestImpl, "search_runs", return_value=runs): + store = AbstractStoreTestImpl() + assert store.list_run_infos(experiment_id, view_type) == run_infos + store.search_runs.assert_called_once_with([experiment_id], None, view_type) + + +def test_search_runs(): + experiment_id = mock.Mock() + view_type = mock.Mock() + runs = [mock.Mock(), mock.Mock()] + token = "adfoiweroh12334kj129318934u" + + with mock.patch.object(AbstractStoreTestImpl, "_search_runs", return_value=(runs, token)): + store = AbstractStoreTestImpl() + result = store.search_runs([experiment_id], None, view_type) + for i in range(len(result)): + assert result[i] == runs[i] + assert result.token == token + store._search_runs.assert_called_once_with([experiment_id], None, view_type, + SEARCH_MAX_RESULTS_DEFAULT, None, None) diff --git a/tests/store/test_artifact_repository_registry.py b/tests/store/test_artifact_repository_registry.py new file mode 100644 index 0000000000000..080ddb819d8d6 --- /dev/null +++ b/tests/store/test_artifact_repository_registry.py @@ -0,0 +1,119 @@ +import mock +import pytest +from six.moves import reload_module as reload + +import mlflow +from mlflow.store.artifact_repository_registry import ArtifactRepositoryRegistry + + +def test_standard_artifact_registry(): + mock_entrypoint = mock.Mock() + mock_entrypoint.name = "mock-scheme" + + with mock.patch( + "entrypoints.get_group_all", return_value=[mock_entrypoint] + ): + # Entrypoints are registered at import time, so we need to reload the + # module to register the entrypoint given by the mocked + # extrypoints.get_group_all + reload(mlflow.store.artifact_repository_registry) + + expected_artifact_repository_registry = { + '', + 's3', + 'gs', + 'wasbs', + 'ftp', + 'sftp', + 'dbfs', + 'mock-scheme' + } + + assert expected_artifact_repository_registry.issubset( + mlflow.store.artifact_repository_registry._artifact_repository_registry._registry.keys() + ) + + +@pytest.mark.large +def test_plugin_registration_via_installed_package(): + """This test requires the package in tests/resources/mlflow-test-plugin to be installed""" + + reload(mlflow.store.artifact_repository_registry) + + assert ( + "file-plugin" in + mlflow.store.artifact_repository_registry._artifact_repository_registry._registry + ) + + from mlflow_test_plugin import PluginLocalArtifactRepository + + test_uri = "file-plugin:test-path" + + plugin_repo = mlflow.store.artifact_repository_registry.get_artifact_repository(test_uri) + + assert isinstance(plugin_repo, PluginLocalArtifactRepository) + assert plugin_repo.is_plugin + + +def test_plugin_registration(): + artifact_repository_registry = ArtifactRepositoryRegistry() + + mock_plugin = mock.Mock() + artifact_repository_registry.register("mock-scheme", mock_plugin) + assert "mock-scheme" in artifact_repository_registry._registry + repository_instance = artifact_repository_registry.get_artifact_repository( + artifact_uri="mock-scheme://fake-host/fake-path" + ) + assert repository_instance == mock_plugin.return_value + + mock_plugin.assert_called_once_with("mock-scheme://fake-host/fake-path") + + +def test_get_unknown_scheme(): + artifact_repository_registry = ArtifactRepositoryRegistry() + + with pytest.raises(mlflow.exceptions.MlflowException, + match="Could not find a registered artifact repository"): + artifact_repository_registry.get_artifact_repository("unknown-scheme://") + + +def test_plugin_registration_via_entrypoints(): + mock_plugin_function = mock.Mock() + mock_entrypoint = mock.Mock(load=mock.Mock(return_value=mock_plugin_function)) + mock_entrypoint.name = "mock-scheme" + + with mock.patch( + "entrypoints.get_group_all", return_value=[mock_entrypoint] + ) as mock_get_group_all: + + artifact_repository_registry = ArtifactRepositoryRegistry() + artifact_repository_registry.register_entrypoints() + + assert ( + artifact_repository_registry.get_artifact_repository("mock-scheme://fake-host/fake-path") + == mock_plugin_function.return_value + ) + + mock_plugin_function.assert_called_once_with("mock-scheme://fake-host/fake-path") + mock_get_group_all.assert_called_once_with("mlflow.artifact_repository") + + +@pytest.mark.parametrize("exception", + [AttributeError("test exception"), + ImportError("test exception")]) +def test_plugin_registration_failure_via_entrypoints(exception): + mock_entrypoint = mock.Mock(load=mock.Mock(side_effect=exception)) + mock_entrypoint.name = "mock-scheme" + + with mock.patch( + "entrypoints.get_group_all", return_value=[mock_entrypoint] + ) as mock_get_group_all: + + repo_registry = ArtifactRepositoryRegistry() + + # Check that the raised warning contains the message from the original exception + with pytest.warns(UserWarning, match="test exception"): + repo_registry.register_entrypoints() + + mock_entrypoint.load.assert_called_once() + mock_get_group_all.assert_called_once_with("mlflow.artifact_repository") diff --git a/tests/store/test_azure_blob_artifact_repo.py b/tests/store/test_azure_blob_artifact_repo.py index 6117bc5763f95..bb06822d63afd 100644 --- a/tests/store/test_azure_blob_artifact_repo.py +++ b/tests/store/test_azure_blob_artifact_repo.py @@ -1,15 +1,18 @@ import os +import posixpath import mock import pytest from azure.storage.blob import Blob, BlobPrefix, BlobProperties, BlockBlobService -from mlflow.store.artifact_repo import ArtifactRepository +from mlflow.exceptions import MlflowException +from mlflow.store.artifact_repository_registry import get_artifact_repository from mlflow.store.azure_blob_artifact_repo import AzureBlobArtifactRepository TEST_ROOT_PATH = "some/path" -TEST_URI = "wasbs://container@account.blob.core.windows.net/" + TEST_ROOT_PATH +TEST_BLOB_CONTAINER_ROOT = "wasbs://container@account.blob.core.windows.net/" +TEST_URI = os.path.join(TEST_BLOB_CONTAINER_ROOT, TEST_ROOT_PATH) class MockBlobList(object): @@ -44,7 +47,7 @@ def test_artifact_uri_factory(mock_client): # We pass in the mock_client here to clear Azure environment variables, but we don't use it; # We do need to set up a fake access key for the code to run though os.environ['AZURE_STORAGE_ACCESS_KEY'] = '' - repo = ArtifactRepository.from_artifact_uri(TEST_URI, mock.Mock()) + repo = get_artifact_repository(TEST_URI) assert isinstance(repo, AzureBlobArtifactRepository) del os.environ['AZURE_STORAGE_ACCESS_KEY'] @@ -130,9 +133,12 @@ def test_log_artifacts(mock_client, tmpdir): repo.log_artifacts(parentd.strpath) mock_client.create_blob_from_path.assert_has_calls([ - mock.call("container", TEST_ROOT_PATH + "/a.txt", parentd.strpath + "/a.txt"), - mock.call("container", TEST_ROOT_PATH + "/subdir/b.txt", subd.strpath + "/b.txt"), - mock.call("container", TEST_ROOT_PATH + "/subdir/c.txt", subd.strpath + "/c.txt"), + mock.call("container", TEST_ROOT_PATH + "/a.txt", + os.path.normpath(parentd.strpath + "/a.txt")), + mock.call("container", TEST_ROOT_PATH + "/subdir/b.txt", + os.path.normpath(subd.strpath + "/b.txt")), + mock.call("container", TEST_ROOT_PATH + "/subdir/c.txt", + os.path.normpath(subd.strpath + "/c.txt")), ], any_order=True) @@ -155,7 +161,9 @@ def create_file(container, cloud_path, local_path): "container", TEST_ROOT_PATH + "/test.txt", mock.ANY) -def test_download_directory_artifact(mock_client, tmpdir): +def test_download_directory_artifact_succeeds_when_artifact_root_is_not_blob_container_root( + mock_client, tmpdir): + assert TEST_URI is not TEST_BLOB_CONTAINER_ROOT repo = AzureBlobArtifactRepository(TEST_URI, mock_client) file_path_1 = "file_1" @@ -163,11 +171,11 @@ def test_download_directory_artifact(mock_client, tmpdir): blob_props_1 = BlobProperties() blob_props_1.content_length = 42 - blob_1 = Blob(os.path.join(TEST_ROOT_PATH, file_path_1), props=blob_props_1) + blob_1 = Blob(posixpath.join(TEST_ROOT_PATH, file_path_1), props=blob_props_1) blob_props_2 = BlobProperties() blob_props_2.content_length = 42 - blob_2 = Blob(os.path.join(TEST_ROOT_PATH, file_path_2), props=blob_props_2) + blob_2 = Blob(posixpath.join(TEST_ROOT_PATH, file_path_2), props=blob_props_2) def get_mock_listing(*args, **kwargs): """ @@ -178,7 +186,7 @@ def get_mock_listing(*args, **kwargs): directory traversal. """ # pylint: disable=unused-argument - if os.path.abspath(kwargs["prefix"]) == os.path.abspath(TEST_ROOT_PATH): + if posixpath.abspath(kwargs["prefix"]) == posixpath.abspath(TEST_ROOT_PATH): return MockBlobList([blob_1, blob_2]) else: return MockBlobList([]) @@ -198,3 +206,87 @@ def create_file(container, cloud_path, local_path): dir_contents = os.listdir(tmpdir.strpath) assert file_path_1 in dir_contents assert file_path_2 in dir_contents + + +def test_download_directory_artifact_succeeds_when_artifact_root_is_blob_container_root( + mock_client, tmpdir): + repo = AzureBlobArtifactRepository(TEST_BLOB_CONTAINER_ROOT, mock_client) + + subdir_path = "my_directory" + dir_prefix = BlobPrefix() + dir_prefix.name = subdir_path + + file_path_1 = "file_1" + file_path_2 = "file_2" + + blob_props_1 = BlobProperties() + blob_props_1.content_length = 42 + blob_1 = Blob(os.path.join(subdir_path, file_path_1), props=blob_props_1) + + blob_props_2 = BlobProperties() + blob_props_2.content_length = 42 + blob_2 = Blob(os.path.join(subdir_path, file_path_2), props=blob_props_2) + + def get_mock_listing(*args, **kwargs): + """ + Produces a mock listing that only contains content if the specified prefix is the artifact + root or a relevant subdirectory. This allows us to mock `list_artifacts` during the + `_download_artifacts_into` subroutine without recursively listing the same artifacts at + every level of the directory traversal. + """ + # pylint: disable=unused-argument + if posixpath.abspath(kwargs["prefix"]) == "/": + return MockBlobList([dir_prefix]) + if posixpath.abspath(kwargs["prefix"]) == posixpath.abspath(subdir_path): + return MockBlobList([blob_1, blob_2]) + else: + return MockBlobList([]) + + def create_file(container, cloud_path, local_path): + # pylint: disable=unused-argument + fname = os.path.basename(local_path) + f = tmpdir.join(fname) + f.write("hello world!") + + mock_client.list_blobs.side_effect = get_mock_listing + mock_client.get_blob_to_path.side_effect = create_file + + # Ensure that the root directory can be downloaded successfully + repo.download_artifacts("") + # Ensure that the `mkfile` side effect copied all of the download artifacts into `tmpdir` + dir_contents = os.listdir(tmpdir.strpath) + assert file_path_1 in dir_contents + assert file_path_2 in dir_contents + + +def test_download_artifact_throws_value_error_when_listed_blobs_do_not_contain_artifact_root_prefix( + mock_client): + repo = AzureBlobArtifactRepository(TEST_URI, mock_client) + + # Create a "bad blob" with a name that is not prefixed by the root path of the artifact store + bad_blob_props = BlobProperties() + bad_blob_props.content_length = 42 + bad_blob = Blob("file_path", props=bad_blob_props) + + def get_mock_listing(*args, **kwargs): + """ + Produces a mock listing that only contains content if the + specified prefix is the artifact root. This allows us to mock + `list_artifacts` during the `_download_artifacts_into` subroutine + without recursively listing the same artifacts at every level of the + directory traversal. + """ + # pylint: disable=unused-argument + if posixpath.abspath(kwargs["prefix"]) == posixpath.abspath(TEST_ROOT_PATH): + # Return a blob that is not prefixed by the root path of the artifact store. This + # should result in an exception being raised + return MockBlobList([bad_blob]) + else: + return MockBlobList([]) + + mock_client.list_blobs.side_effect = get_mock_listing + + with pytest.raises(MlflowException) as exc: + repo.download_artifacts("") + + assert "Azure blob does not begin with the specified artifact path" in str(exc) diff --git a/tests/store/test_cli.py b/tests/store/test_cli.py index 098f34e742b0e..7f63eb544cfb9 100644 --- a/tests/store/test_cli.py +++ b/tests/store/test_cli.py @@ -1,7 +1,16 @@ import json +import os +import posixpath +from mock import mock + +import mlflow +import mlflow.pyfunc from mlflow.entities import FileInfo from mlflow.store.cli import _file_infos_to_json +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.file_utils import TempDir +from subprocess import Popen, STDOUT, PIPE def test_file_info_to_json(): @@ -18,3 +27,69 @@ def test_file_info_to_json(): "path": "/my/dir", "is_dir": True, }] + + +def test_download_from_uri(): + class TestArtifactRepo: + def __init__(self, scheme): + self.scheme = scheme + + def download_artifacts(self, artifact_path, **kwargs): # pylint: disable=unused-argument + return (self.scheme, artifact_path) + + def test_get_artifact_repository(artifact_uri): + return TestArtifactRepo(artifact_uri) + + pairs = [ + ("path", ("", "path")), + ("path/", ("path", "")), + ("/path", ("/", "path")), + ("/path/", ("/path", "")), + ("path/to/dir", ("path/to", "dir")), + ("file:", ("file:", "")), + ("file:path", ("file:", "path")), + ("file:path/", ("file:path", "")), + ("file:path/to/dir", ("file:path/to", "dir")), + ("file:/", ("file:///", "")), + ("file:/path", ("file:///", "path")), + ("file:/path/", ("file:///path", "")), + ("file:/path/to/dir", ("file:///path/to", "dir")), + ("file:///", ("file:///", "")), + ("file:///path", ("file:///", "path")), + ("file:///path/", ("file:///path", "")), + ("file:///path/to/dir", ("file:///path/to", "dir")), + ("s3://", ("s3:", "")), + ("s3://path", ("s3://path", "")), # path is netloc in this case + ("s3://path/", ("s3://path/", "")), + ("s3://path/to/", ("s3://path/to", "")), + ("s3://path/to", ("s3://path/", "to")), + ("s3://path/to/dir", ("s3://path/to", "dir")), + ] + with mock.patch("mlflow.tracking.artifact_utils.get_artifact_repository") \ + as get_artifact_repo_mock: + get_artifact_repo_mock.side_effect = test_get_artifact_repository + + for uri, expected_result in pairs: + actual_result = _download_artifact_from_uri(uri) + assert expected_result == actual_result + + +def test_download_artifacts_from_uri(): + with mlflow.start_run() as run: + with TempDir() as tmp: + local_path = tmp.path("test") + with open(local_path, "w") as f: + f.write("test") + mlflow.log_artifact(local_path, "test") + command = ["mlflow", "artifacts", "download", "-u"] + # Test with run uri + run_uri = "runs:/{run_id}/test".format(run_id=run.info.run_id) + actual_uri = posixpath.join(run.info.artifact_uri, "test") + for uri in (run_uri, actual_uri): + p = Popen(command + [uri], stdout=PIPE, + stderr=STDOUT) + output = p.stdout.readlines() + downloaded_file_path = output[-1].strip() + downloaded_file = os.listdir(downloaded_file_path)[0] + with open(os.path.join(downloaded_file_path, downloaded_file), "r") as f: + assert f.read() == "test" diff --git a/tests/store/test_dbfs_artifact_repo_delegation.py b/tests/store/test_dbfs_artifact_repo_delegation.py new file mode 100644 index 0000000000000..338246446f10f --- /dev/null +++ b/tests/store/test_dbfs_artifact_repo_delegation.py @@ -0,0 +1,36 @@ +import os +import mock +import pytest + +from mlflow.store.artifact_repository_registry import get_artifact_repository +from mlflow.store.local_artifact_repo import LocalArtifactRepository +from mlflow.store.dbfs_artifact_repo import DbfsRestArtifactRepository + +from mlflow.utils.rest_utils import MlflowHostCreds + + +@pytest.fixture() +def host_creds_mock(): + with mock.patch('mlflow.store.dbfs_artifact_repo._get_host_creds_from_default_store') \ + as get_creds_mock: + get_creds_mock.return_value = lambda: MlflowHostCreds('http://host') + yield + + +@mock.patch('mlflow.utils.databricks_utils.is_dbfs_fuse_available') +def test_dbfs_artifact_repo_delegates_to_correct_repo( + is_dbfs_fuse_available, host_creds_mock): # pylint: disable=unused-argument + is_dbfs_fuse_available.return_value = True + artifact_uri = "dbfs:/databricks/my/absolute/dbfs/path" + repo = get_artifact_repository(artifact_uri) + assert isinstance(repo, LocalArtifactRepository) + assert repo.artifact_dir == os.path.join( + os.path.sep, "dbfs", "databricks", "my", "absolute", "dbfs", "path") + with mock.patch.dict(os.environ, {'MLFLOW_ENABLE_DBFS_FUSE_ARTIFACT_REPO': 'false'}): + fuse_disabled_repo = get_artifact_repository(artifact_uri) + assert isinstance(fuse_disabled_repo, DbfsRestArtifactRepository) + assert fuse_disabled_repo.artifact_uri == artifact_uri + is_dbfs_fuse_available.return_value = False + rest_repo = get_artifact_repository(artifact_uri) + assert isinstance(rest_repo, DbfsRestArtifactRepository) + assert rest_repo.artifact_uri == artifact_uri diff --git a/tests/store/test_dbfs_fuse_artifact_repo.py b/tests/store/test_dbfs_fuse_artifact_repo.py new file mode 100644 index 0000000000000..ea0336b407e3c --- /dev/null +++ b/tests/store/test_dbfs_fuse_artifact_repo.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +import os + +import pytest +import mock +from mock import PropertyMock + +from mlflow.store.artifact_repository_registry import get_artifact_repository + +TEST_FILE_1_CONTENT = u"Hello 🍆🍔".encode("utf-8") +TEST_FILE_2_CONTENT = u"World 🍆🍔🍆".encode("utf-8") +TEST_FILE_3_CONTENT = u"¡🍆🍆🍔🍆🍆!".encode("utf-8") + + +@pytest.fixture() +def artifact_dir(tmpdir): + return tmpdir.join("artifacts-to-log").strpath + + +@pytest.fixture() +def force_dbfs_fuse_repo(artifact_dir): + in_databricks_mock_path = 'mlflow.utils.databricks_utils.is_dbfs_fuse_available' + artifact_dir_mock_path = 'mlflow.store.local_artifact_repo.LocalArtifactRepository.artifact_dir' + with mock.patch(in_databricks_mock_path) as is_dbfs_fuse_available, \ + mock.patch(artifact_dir_mock_path, new_callable=PropertyMock) as artifact_dir_mock: + is_dbfs_fuse_available.return_value = True + artifact_dir_mock.return_value = artifact_dir + yield + + +@pytest.fixture() +def dbfs_fuse_artifact_repo(force_dbfs_fuse_repo): # pylint: disable=unused-argument + return get_artifact_repository('dbfs:/unused/path/replaced/by/mock') + + +@pytest.fixture() +def files_dir(tmpdir): + return tmpdir.mkdir("files") + + +@pytest.fixture() +def test_file(files_dir): + p = files_dir.join("test.txt") + with open(p.strpath, 'wb') as f: + f.write(TEST_FILE_1_CONTENT) + return p + + +@pytest.fixture() +def test_dir(files_dir): + with open(files_dir.mkdir('subdir').join('test.txt').strpath, 'wb') as f: + f.write(TEST_FILE_2_CONTENT) + with open(files_dir.join('test.txt').strpath, 'wb') as f: + f.write(TEST_FILE_3_CONTENT) + with open(files_dir.join('empty-file').strpath, 'wb'): + pass + return files_dir + + +class TestDbfsFuseArtifactRepository(object): + @pytest.mark.parametrize("artifact_path", [ + None, + 'output', + '', + ]) + def test_log_artifact(self, dbfs_fuse_artifact_repo, test_file, artifact_path, artifact_dir): + dbfs_fuse_artifact_repo.log_artifact(test_file.strpath, artifact_path) + print(os.listdir(artifact_dir)) + expected_file_path = os.path.join( + artifact_dir, + artifact_path if artifact_path else '', os.path.basename(test_file.strpath)) + with open(expected_file_path, 'rb') as handle: + data = handle.read() + assert data == TEST_FILE_1_CONTENT + + def test_log_artifact_empty_file(self, dbfs_fuse_artifact_repo, test_dir, artifact_dir): + dbfs_fuse_artifact_repo.log_artifact(os.path.join(test_dir.strpath, "empty-file")) + expected_file_path = os.path.join(artifact_dir, "empty-file") + with open(expected_file_path, 'rb') as handle: + data = handle.read() + assert data == "".encode("utf-8") + + @pytest.mark.parametrize("artifact_path", [ + None, + '', # should behave like '/' and exclude base name of logged_dir + 'abc', + # We should add '.', + ]) + def test_log_artifacts(self, dbfs_fuse_artifact_repo, test_dir, artifact_path, artifact_dir): + dbfs_fuse_artifact_repo.log_artifacts(test_dir.strpath, artifact_path) + artifact_dst_path = os.path.join(artifact_dir, artifact_path if artifact_path else '') + assert os.path.exists(artifact_dst_path) + expected_contents = { + "subdir/test.txt": TEST_FILE_2_CONTENT, + "test.txt": TEST_FILE_3_CONTENT, + "empty-file": "".encode("utf-8"), + } + for filename, contents in expected_contents.items(): + with open(os.path.join(artifact_dst_path, filename), 'rb') as handle: + assert handle.read() == contents + + def test_list_artifacts(self, dbfs_fuse_artifact_repo, test_dir): + assert len(dbfs_fuse_artifact_repo.list_artifacts()) == 0 + dbfs_fuse_artifact_repo.log_artifacts(test_dir.strpath) + artifacts = dbfs_fuse_artifact_repo.list_artifacts() + assert len(artifacts) == 3 + assert artifacts[0].path == 'empty-file' + assert artifacts[0].is_dir is False + assert artifacts[0].file_size == 0 + assert artifacts[1].path == 'subdir' + assert artifacts[1].is_dir is True + assert artifacts[1].file_size is None + assert artifacts[2].path == 'test.txt' + assert artifacts[2].is_dir is False + assert artifacts[2].file_size is 23 + + def test_download_artifacts(self, dbfs_fuse_artifact_repo, test_dir): + dbfs_fuse_artifact_repo.log_artifacts(test_dir.strpath) + local_download_dir = dbfs_fuse_artifact_repo.download_artifacts("") + expected_contents = { + "subdir/test.txt": TEST_FILE_2_CONTENT, + "test.txt": TEST_FILE_3_CONTENT, + "empty-file": "".encode("utf-8"), + } + for filename, contents in expected_contents.items(): + with open(os.path.join(local_download_dir, filename), 'rb') as handle: + assert handle.read() == contents diff --git a/tests/store/test_dbfs_artifact_repo.py b/tests/store/test_dbfs_rest_artifact_repo.py similarity index 69% rename from tests/store/test_dbfs_artifact_repo.py rename to tests/store/test_dbfs_rest_artifact_repo.py index 8c381a6adaf70..ff2e83b5d038d 100644 --- a/tests/store/test_dbfs_artifact_repo.py +++ b/tests/store/test_dbfs_rest_artifact_repo.py @@ -1,25 +1,33 @@ # -*- coding: utf-8 -*- import json +import os import pytest import mock from mock import Mock -from mlflow.exceptions import IllegalArtifactPathError, MlflowException -from mlflow.store.dbfs_artifact_repo import DbfsArtifactRepository +from mlflow.exceptions import MlflowException +from mlflow.store.artifact_repository_registry import get_artifact_repository +from mlflow.store.dbfs_artifact_repo import _get_host_creds_from_default_store +from mlflow.store.dbfs_artifact_repo import DbfsRestArtifactRepository +from mlflow.store.file_store import FileStore +from mlflow.store.rest_store import RestStore from mlflow.utils.rest_utils import MlflowHostCreds @pytest.fixture() def dbfs_artifact_repo(): - return DbfsArtifactRepository('dbfs:/test/', lambda: MlflowHostCreds('http://host')) + with mock.patch('mlflow.store.dbfs_artifact_repo._get_host_creds_from_default_store') \ + as get_creds_mock: + get_creds_mock.return_value = lambda: MlflowHostCreds('http://host') + return get_artifact_repository('dbfs:/test/') TEST_FILE_1_CONTENT = u"Hello 🍆🍔".encode("utf-8") TEST_FILE_2_CONTENT = u"World 🍆🍔🍆".encode("utf-8") TEST_FILE_3_CONTENT = u"¡🍆🍆🍔🍆🍆!".encode("utf-8") -DBFS_ARTIFACT_REPOSITORY_PACKAGE = 'mlflow.store.dbfs_artifact_repo.DbfsArtifactRepository' +DBFS_ARTIFACT_REPOSITORY_PACKAGE = 'mlflow.store.dbfs_artifact_repo.DbfsRestArtifactRepository' @pytest.fixture() @@ -36,6 +44,8 @@ def test_dir(tmpdir): f.write(TEST_FILE_2_CONTENT) with open(tmpdir.join('test.txt').strpath, 'wb') as f: f.write(bytes(TEST_FILE_3_CONTENT)) + with open(tmpdir.join('empty-file').strpath, 'w'): + pass return tmpdir @@ -62,10 +72,13 @@ def test_dir(tmpdir): class TestDbfsArtifactRepository(object): def test_init_validation_and_cleaning(self): - repo = DbfsArtifactRepository('dbfs:/test/', lambda: MlflowHostCreds('http://host')) - assert repo.artifact_uri == 'dbfs:/test' - with pytest.raises(MlflowException): - DbfsArtifactRepository('s3://test', lambda: MlflowHostCreds('http://host')) + with mock.patch('mlflow.store.dbfs_artifact_repo._get_host_creds_from_default_store') \ + as get_creds_mock: + get_creds_mock.return_value = lambda: MlflowHostCreds('http://host') + repo = get_artifact_repository('dbfs:/test/') + assert repo.artifact_uri == 'dbfs:/test' + with pytest.raises(MlflowException): + DbfsRestArtifactRepository('s3://test') @pytest.mark.parametrize("artifact_path,expected_endpoint", [ (None, '/dbfs/test/test.txt'), @@ -85,8 +98,22 @@ def my_http_request(host_creds, **kwargs): # pylint: disable=unused-argument assert endpoints == [expected_endpoint] assert data == [TEST_FILE_1_CONTENT] - def test_log_artifact_empty(self, dbfs_artifact_repo, test_file): - with pytest.raises(IllegalArtifactPathError): + def test_log_artifact_empty_file(self, dbfs_artifact_repo, test_dir): + with mock.patch('mlflow.utils.rest_utils.http_request') as http_request_mock: + def my_http_request(host_creds, **kwargs): # pylint: disable=unused-argument + assert kwargs['endpoint'] == "/dbfs/test/empty-file" + assert kwargs['data'] == "" + return Mock(status_code=200) + http_request_mock.side_effect = my_http_request + dbfs_artifact_repo.log_artifact(os.path.join(test_dir.strpath, "empty-file")) + + def test_log_artifact_empty_artifact_path(self, dbfs_artifact_repo, test_file): + with mock.patch('mlflow.utils.rest_utils.http_request') as http_request_mock: + def my_http_request(host_creds, **kwargs): # pylint: disable=unused-argument + assert kwargs['endpoint'] == "/dbfs/test/test.txt" + assert kwargs['data'].read() == TEST_FILE_1_CONTENT + return Mock(status_code=200) + http_request_mock.side_effect = my_http_request dbfs_artifact_repo.log_artifact(test_file.strpath, '') def test_log_artifact_error(self, dbfs_artifact_repo, test_file): @@ -107,17 +134,22 @@ def test_log_artifacts(self, dbfs_artifact_repo, test_dir, artifact_path): def my_http_request(host_creds, **kwargs): # pylint: disable=unused-argument endpoints.append(kwargs['endpoint']) - data.append(kwargs['data'].read()) + if kwargs['endpoint'] == "/dbfs/test/empty-file": + data.append(kwargs['data']) + else: + data.append(kwargs['data'].read()) return Mock(status_code=200) http_request_mock.side_effect = my_http_request dbfs_artifact_repo.log_artifacts(test_dir.strpath, artifact_path) assert set(endpoints) == { '/dbfs/test/subdir/test.txt', - '/dbfs/test/test.txt' + '/dbfs/test/test.txt', + '/dbfs/test/empty-file', } assert set(data) == { TEST_FILE_2_CONTENT, TEST_FILE_3_CONTENT, + "", } def test_log_artifacts_error(self, dbfs_artifact_repo, test_dir): @@ -127,9 +159,10 @@ def test_log_artifacts_error(self, dbfs_artifact_repo, test_dir): dbfs_artifact_repo.log_artifacts(test_dir.strpath) @pytest.mark.parametrize("artifact_path,expected_endpoints", [ - ('a', {'/dbfs/test/a/subdir/test.txt', '/dbfs/test/a/test.txt'}), - ('a/', {'/dbfs/test/a/subdir/test.txt', '/dbfs/test/a/test.txt'}), - ('/', {'/dbfs/test/subdir/test.txt', '/dbfs/test/test.txt'}), + ('a', {'/dbfs/test/a/subdir/test.txt', '/dbfs/test/a/test.txt', '/dbfs/test/a/empty-file'}), + ('a/', {'/dbfs/test/a/subdir/test.txt', '/dbfs/test/a/test.txt', + '/dbfs/test/a/empty-file'}), + ('/', {'/dbfs/test/subdir/test.txt', '/dbfs/test/test.txt', '/dbfs/test/empty-file'}), ]) def test_log_artifacts_with_artifact_path(self, dbfs_artifact_repo, test_dir, artifact_path, expected_endpoints): @@ -183,3 +216,16 @@ def test_download_artifacts(self, dbfs_artifact_repo): _, kwargs_call_2 = chronological_download_calls[1] assert kwargs_call_1['endpoint'] == '/dbfs/test/dir' assert kwargs_call_2['endpoint'] == '/dbfs/test/a.txt' + + +def test_get_host_creds_from_default_store_file_store(): + with mock.patch('mlflow.tracking.utils._get_store') as get_store_mock: + get_store_mock.return_value = FileStore() + with pytest.raises(MlflowException): + _get_host_creds_from_default_store() + + +def test_get_host_creds_from_default_store_rest_store(): + with mock.patch('mlflow.tracking.utils._get_store') as get_store_mock: + get_store_mock.return_value = RestStore(lambda: MlflowHostCreds('http://host')) + assert isinstance(_get_host_creds_from_default_store()(), MlflowHostCreds) diff --git a/tests/store/test_file_store.py b/tests/store/test_file_store.py index ed6d9ab193a13..7a21504e08524 100644 --- a/tests/store/test_file_store.py +++ b/tests/store/test_file_store.py @@ -1,24 +1,30 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- import os +import posixpath +import random import shutil +import six +import tempfile +import time import unittest import uuid -import time - +import mock import pytest -from mlflow.entities import Experiment, Metric, Param, RunTag, ViewType, RunInfo -from mlflow.exceptions import MlflowException +from mlflow.entities import Metric, Param, RunTag, ViewType, LifecycleStage, RunStatus, RunData +from mlflow.exceptions import MlflowException, MissingConfigException +from mlflow.store import SEARCH_MAX_RESULTS_DEFAULT from mlflow.store.file_store import FileStore -from mlflow.utils.file_utils import write_yaml -from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID -from tests.helper_functions import random_int, random_str +from mlflow.utils.file_utils import write_yaml, read_yaml, path_to_local_file_uri +from mlflow.protos.databricks_pb2 import ErrorCode, RESOURCE_DOES_NOT_EXIST, INTERNAL_ERROR + +from tests.helper_functions import random_int, random_str, safe_edit_yaml class TestFileStore(unittest.TestCase): - ROOT_LOCATION = "/tmp" + ROOT_LOCATION = tempfile.gettempdir() def setUp(self): self._create_root(TestFileStore.ROOT_LOCATION) @@ -27,11 +33,11 @@ def setUp(self): def _create_root(self, root): self.test_root = os.path.join(root, "test_file_store_%d" % random_int()) os.mkdir(self.test_root) - self.experiments = [random_int(100, int(1e9)) for _ in range(3)] + self.experiments = [str(random_int(100, int(1e9))) for _ in range(3)] self.exp_data = {} self.run_data = {} # Include default experiment - self.experiments.append(Experiment.DEFAULT_EXPERIMENT_ID) + self.experiments.append(FileStore.DEFAULT_EXPERIMENT_ID) for exp in self.experiments: # create experiment exp_folder = os.path.join(self.test_root, str(exp)) @@ -42,26 +48,22 @@ def _create_root(self, root): # add runs self.exp_data[exp]["runs"] = [] for _ in range(2): - run_uuid = uuid.uuid4().hex - self.exp_data[exp]["runs"].append(run_uuid) - run_folder = os.path.join(exp_folder, run_uuid) + run_id = uuid.uuid4().hex + self.exp_data[exp]["runs"].append(run_id) + run_folder = os.path.join(exp_folder, run_id) os.makedirs(run_folder) - run_info = {"run_uuid": run_uuid, + run_info = {"run_uuid": run_id, + "run_id": run_id, "experiment_id": exp, - "name": random_str(random_int(10, 40)), - "source_type": random_int(1, 4), - "source_name": random_str(random_int(100, 300)), - "entry_point_name": random_str(random_int(100, 300)), "user_id": random_str(random_int(10, 25)), - "status": random_int(1, 5), + "status": random.choice(RunStatus.all_status()), "start_time": random_int(1, 10), "end_time": random_int(20, 30), - "source_version": random_str(random_int(10, 30)), "tags": [], "artifact_uri": "%s/%s" % (run_folder, FileStore.ARTIFACTS_FOLDER_NAME), } write_yaml(run_folder, FileStore.META_DATA_FILE_NAME, run_info) - self.run_data[run_uuid] = run_info + self.run_data[run_id] = run_info # params params_folder = os.path.join(run_folder, FileStore.PARAMS_FOLDER_NAME) os.makedirs(params_folder) @@ -73,7 +75,7 @@ def _create_root(self, root): with open(param_file, 'w') as f: f.write(param_value) params[param_name] = param_value - self.run_data[run_uuid]["params"] = params + self.run_data[run_id]["params"] = params # metrics metrics_folder = os.path.join(run_folder, FileStore.METRICS_FOLDER_NAME) os.makedirs(metrics_folder) @@ -90,7 +92,7 @@ def _create_root(self, root): with open(metric_file, 'a') as f: f.write("%d %d\n" % (timestamp, metric_value)) metrics[metric_name] = values - self.run_data[run_uuid]["metrics"] = metrics + self.run_data[run_id]["metrics"] = metrics # artifacts os.makedirs(os.path.join(run_folder, FileStore.ARTIFACTS_FOLDER_NAME)) @@ -119,13 +121,16 @@ def test_list_experiments(self): self.assertEqual(exp.name, self.exp_data[exp_id]["name"]) self.assertEqual(exp.artifact_location, self.exp_data[exp_id]["artifact_location"]) + def _verify_experiment(self, fs, exp_id): + exp = fs.get_experiment(exp_id) + self.assertEqual(exp.experiment_id, exp_id) + self.assertEqual(exp.name, self.exp_data[exp_id]["name"]) + self.assertEqual(exp.artifact_location, self.exp_data[exp_id]["artifact_location"]) + def test_get_experiment(self): fs = FileStore(self.test_root) for exp_id in self.experiments: - exp = fs.get_experiment(exp_id) - self.assertEqual(exp.experiment_id, exp_id) - self.assertEqual(exp.name, self.exp_data[exp_id]["name"]) - self.assertEqual(exp.artifact_location, self.exp_data[exp_id]["artifact_location"]) + self._verify_experiment(fs, exp_id) # test that fake experiments dont exist. # look for random experiment ids between 8000, 15000 since created ones are (100, 2000) @@ -133,6 +138,13 @@ def test_get_experiment(self): with self.assertRaises(Exception): fs.get_experiment(exp_id) + def test_get_experiment_int_experiment_id_backcompat(self): + fs = FileStore(self.test_root) + exp_id = FileStore.DEFAULT_EXPERIMENT_ID + root_dir = os.path.join(self.test_root, exp_id) + with safe_edit_yaml(root_dir, "meta.yaml", self._experiment_id_edit_func): + self._verify_experiment(fs, exp_id) + def test_get_experiment_by_name(self): fs = FileStore(self.test_root) for exp_id in self.experiments: @@ -148,6 +160,15 @@ def test_get_experiment_by_name(self): exp = fs.get_experiment_by_name(exp_names) self.assertIsNone(exp) + def test_create_first_experiment(self): + fs = FileStore(self.test_root) + fs.list_experiments = mock.Mock(return_value=[]) + fs._create_experiment_with_id = mock.Mock() + fs.create_experiment(random_str(1)) + fs._create_experiment_with_id.assert_called_once() + experiment_id = fs._create_experiment_with_id.call_args[0][1] + self.assertEqual(experiment_id, FileStore.DEFAULT_EXPERIMENT_ID) + def test_create_experiment(self): fs = FileStore(self.test_root) @@ -157,7 +178,8 @@ def test_create_experiment(self): with self.assertRaises(Exception): fs.create_experiment("") - next_id = max(self.experiments) + 1 + exp_id_ints = (int(exp_id) for exp_id in self.experiments) + next_id = str(max(exp_id_ints) + 1) name = random_str(25) # since existing experiments are 10 chars long created_id = fs.create_experiment(name) # test that newly created experiment matches expected id @@ -166,6 +188,8 @@ def test_create_experiment(self): # get the new experiment (by id) and verify (by name) exp1 = fs.get_experiment(created_id) self.assertEqual(exp1.name, name) + self.assertEqual(exp1.artifact_location, + path_to_local_file_uri(posixpath.join(self.test_root, created_id))) # get the new experiment (by name) and verify (by id) exp2 = fs.get_experiment_by_name(name) @@ -191,8 +215,7 @@ def test_delete_restore_experiment(self): self.assertTrue(exp_id not in self._extract_ids(fs.list_experiments(ViewType.ACTIVE_ONLY))) self.assertTrue(exp_id in self._extract_ids(fs.list_experiments(ViewType.DELETED_ONLY))) self.assertTrue(exp_id in self._extract_ids(fs.list_experiments(ViewType.ALL))) - self.assertEqual(fs.get_experiment(exp_id).lifecycle_stage, - Experiment.DELETED_LIFECYCLE) + self.assertEqual(fs.get_experiment(exp_id).lifecycle_stage, LifecycleStage.DELETED) # restore it fs.restore_experiment(exp_id) @@ -205,8 +228,7 @@ def test_delete_restore_experiment(self): self.assertTrue(exp_id in self._extract_ids(fs.list_experiments(ViewType.ACTIVE_ONLY))) self.assertTrue(exp_id not in self._extract_ids(fs.list_experiments(ViewType.DELETED_ONLY))) self.assertTrue(exp_id in self._extract_ids(fs.list_experiments(ViewType.ALL))) - self.assertEqual(fs.get_experiment(exp_id).lifecycle_stage, - Experiment.ACTIVE_LIFECYCLE) + self.assertEqual(fs.get_experiment(exp_id).lifecycle_stage, LifecycleStage.ACTIVE) def test_rename_experiment(self): fs = FileStore(self.test_root) @@ -248,73 +270,128 @@ def test_create_run_in_deleted_experiment(self): # delete it fs.delete_experiment(exp_id) with pytest.raises(Exception): - fs.create_run(exp_id, 'user', 'name', 'source_type', 'source_name', 'entry_point_name', - 0, None, [], None) + fs.create_run(exp_id, 'user', 0, []) + + def test_create_run_returns_expected_run_data(self): + fs = FileStore(self.test_root) + no_tags_run = fs.create_run( + experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, user_id='user', start_time=0, tags=[]) + assert isinstance(no_tags_run.data, RunData) + assert len(no_tags_run.data.tags) == 0 + + tags_dict = { + "my_first_tag": "first", + "my-second-tag": "2nd", + } + tags_entities = [ + RunTag(key, value) for key, value in tags_dict.items() + ] + tags_run = fs.create_run( + experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, + user_id='user', + start_time=0, + tags=tags_entities) + assert isinstance(tags_run.data, RunData) + assert tags_run.data.tags == tags_dict + + def _experiment_id_edit_func(self, old_dict): + old_dict["experiment_id"] = int(old_dict["experiment_id"]) + return old_dict + + def _verify_run(self, fs, run_id): + run = fs.get_run(run_id) + run_info = self.run_data[run_id] + run_info.pop("metrics", None) + run_info.pop("params", None) + run_info.pop("tags", None) + run_info['lifecycle_stage'] = LifecycleStage.ACTIVE + run_info['status'] = RunStatus.to_string(run_info['status']) + self.assertEqual(run_info, dict(run.info)) def test_get_run(self): fs = FileStore(self.test_root) for exp_id in self.experiments: runs = self.exp_data[exp_id]["runs"] - for run_uuid in runs: - run = fs.get_run(run_uuid) - run_info = self.run_data[run_uuid] - run_info.pop("metrics") - run_info.pop("params") - run_info.pop("tags") - run_info['lifecycle_stage'] = RunInfo.ACTIVE_LIFECYCLE - self.assertEqual(run_info, dict(run.info)) + for run_id in runs: + self._verify_run(fs, run_id) + + def test_get_run_int_experiment_id_backcompat(self): + fs = FileStore(self.test_root) + exp_id = FileStore.DEFAULT_EXPERIMENT_ID + run_id = self.exp_data[exp_id]["runs"][0] + root_dir = os.path.join(self.test_root, exp_id, run_id) + with safe_edit_yaml(root_dir, "meta.yaml", self._experiment_id_edit_func): + self._verify_run(fs, run_id) def test_list_run_infos(self): fs = FileStore(self.test_root) for exp_id in self.experiments: run_infos = fs.list_run_infos(exp_id, run_view_type=ViewType.ALL) for run_info in run_infos: - run_uuid = run_info.run_uuid - dict_run_info = self.run_data[run_uuid] + run_id = run_info.run_id + dict_run_info = self.run_data[run_id] dict_run_info.pop("metrics") dict_run_info.pop("params") dict_run_info.pop("tags") - dict_run_info['lifecycle_stage'] = RunInfo.ACTIVE_LIFECYCLE + dict_run_info['lifecycle_stage'] = LifecycleStage.ACTIVE + dict_run_info['status'] = RunStatus.to_string(dict_run_info['status']) self.assertEqual(dict_run_info, dict(run_info)) - def test_get_metric(self): + def test_log_metric_allows_multiple_values_at_same_step_and_run_data_uses_max_step_value(self): fs = FileStore(self.test_root) - for exp_id in self.experiments: - runs = self.exp_data[exp_id]["runs"] - for run_uuid in runs: - run_info = self.run_data[run_uuid] - metrics_dict = run_info.pop("metrics") - for metric_name, values in metrics_dict.items(): - # just the last recorded value - timestamp, metric_value = values[-1] - metric = fs.get_metric(run_uuid, metric_name) - self.assertEqual(metric.timestamp, timestamp) - self.assertEqual(metric.key, metric_name) - self.assertEqual(metric.value, metric_value) + run_id = self._create_run(fs).info.run_id + + metric_name = "test-metric-1" + # Check that we get the max of (step, timestamp, value) in that order + tuples_to_log = [ + (0, 100, 1000), + (3, 40, 100), # larger step wins even though it has smaller value + (3, 50, 10), # larger timestamp wins even though it has smaller value + (3, 50, 20), # tiebreak by max value + (3, 50, 20), # duplicate metrics with same (step, timestamp, value) are ok + # verify that we can log steps out of order / negative steps + (-3, 900, 900), + (-1, 800, 800), + ] + for step, timestamp, value in reversed(tuples_to_log): + fs.log_metric(run_id, Metric(metric_name, value, timestamp, step)) + + metric_history = fs.get_metric_history(run_id, metric_name) + logged_tuples = [(m.step, m.timestamp, m.value) for m in metric_history] + assert set(logged_tuples) == set(tuples_to_log) + + run_data = fs.get_run(run_id).data + run_metrics = run_data.metrics + assert len(run_metrics) == 1 + assert run_metrics[metric_name] == 20 + metric_obj = run_data._metric_objs[0] + assert metric_obj.key == metric_name + assert metric_obj.step == 3 + assert metric_obj.timestamp == 50 + assert metric_obj.value == 20 def test_get_all_metrics(self): fs = FileStore(self.test_root) for exp_id in self.experiments: runs = self.exp_data[exp_id]["runs"] - for run_uuid in runs: - run_info = self.run_data[run_uuid] - metrics = fs.get_all_metrics(run_uuid) + for run_id in runs: + run_info = self.run_data[run_id] + metrics = fs.get_all_metrics(run_id) metrics_dict = run_info.pop("metrics") for metric in metrics: - # just the last recorded value - timestamp, metric_value = metrics_dict[metric.key][-1] - self.assertEqual(metric.timestamp, timestamp) - self.assertEqual(metric.value, metric_value) + expected_timestamp, expected_value = max(metrics_dict[metric.key]) + self.assertEqual(metric.timestamp, expected_timestamp) + self.assertEqual(metric.value, expected_value) def test_get_metric_history(self): fs = FileStore(self.test_root) for exp_id in self.experiments: runs = self.exp_data[exp_id]["runs"] - for run_uuid in runs: - run_info = self.run_data[run_uuid] + for run_id in runs: + run_info = self.run_data[run_id] metrics = run_info.pop("metrics") for metric_name, values in metrics.items(): - metric_history = fs.get_metric_history(run_uuid, metric_name) + metric_history = fs.get_metric_history(run_id, metric_name) sorted_values = sorted(values, reverse=True) for metric in metric_history: timestamp, metric_value = sorted_values.pop() @@ -322,44 +399,131 @@ def test_get_metric_history(self): self.assertEqual(metric.key, metric_name) self.assertEqual(metric.value, metric_value) - def test_get_param(self): - fs = FileStore(self.test_root) - for exp_id in self.experiments: - runs = self.exp_data[exp_id]["runs"] - for run_uuid in runs: - run_info = self.run_data[run_uuid] - params_dict = run_info.pop("params") - for param_name, param_value in params_dict.items(): - param = fs.get_param(run_uuid, param_name) - self.assertEqual(param.key, param_name) - self.assertEqual(param.value, param_value) + def _search(self, fs, experiment_id, filter_str=None, + run_view_type=ViewType.ALL, max_results=SEARCH_MAX_RESULTS_DEFAULT): + return [r.info.run_id + for r in fs.search_runs([experiment_id], filter_str, run_view_type, max_results)] def test_search_runs(self): # replace with test with code is implemented fs = FileStore(self.test_root) # Expect 2 runs for each experiment - assert len(fs.search_runs([self.experiments[0]], [], run_view_type=ViewType.ACTIVE_ONLY)) \ - == 2 - assert len(fs.search_runs([self.experiments[0]], [], run_view_type=ViewType.ALL)) \ - == 2 - assert len(fs.search_runs([self.experiments[0]], [], run_view_type=ViewType.DELETED_ONLY)) \ - == 0 + assert len(self._search(fs, self.experiments[0], run_view_type=ViewType.ACTIVE_ONLY)) == 2 + assert len(self._search(fs, self.experiments[0])) == 2 + assert len(self._search(fs, self.experiments[0], run_view_type=ViewType.DELETED_ONLY)) == 0 + + def test_search_tags(self): + fs = FileStore(self.test_root) + experiment_id = self.experiments[0] + r1 = fs.create_run(experiment_id, 'user', 0, []).info.run_id + r2 = fs.create_run(experiment_id, 'user', 0, []).info.run_id + + fs.set_tag(r1, RunTag('generic_tag', 'p_val')) + fs.set_tag(r2, RunTag('generic_tag', 'p_val')) + + fs.set_tag(r1, RunTag('generic_2', 'some value')) + fs.set_tag(r2, RunTag('generic_2', 'another value')) + + fs.set_tag(r1, RunTag('p_a', 'abc')) + fs.set_tag(r2, RunTag('p_b', 'ABC')) + + # test search returns both runs + six.assertCountEqual(self, [r1, r2], self._search(fs, experiment_id, + filter_str="tags.generic_tag = 'p_val'")) + # test search returns appropriate run (same key different values per run) + six.assertCountEqual(self, [r1], + self._search(fs, experiment_id, + filter_str="tags.generic_2 = 'some value'")) + six.assertCountEqual(self, [r2], self._search(fs, experiment_id, + filter_str="tags.generic_2='another value'")) + six.assertCountEqual(self, [], self._search(fs, experiment_id, + filter_str="tags.generic_tag = 'wrong_val'")) + six.assertCountEqual(self, [], self._search(fs, experiment_id, + filter_str="tags.generic_tag != 'p_val'")) + six.assertCountEqual(self, [r1, r2], + self._search(fs, experiment_id, + filter_str="tags.generic_tag != 'wrong_val'")) + six.assertCountEqual(self, [r1, r2], + self._search(fs, experiment_id, + filter_str="tags.generic_2 != 'wrong_val'")) + six.assertCountEqual(self, [r1], self._search(fs, experiment_id, + filter_str="tags.p_a = 'abc'")) + six.assertCountEqual(self, [r2], self._search(fs, experiment_id, + filter_str="tags.p_b = 'ABC'")) + + def test_search_with_max_results(self): + fs = FileStore(self.test_root) + exp = fs.create_experiment("search_with_max_results") + + runs = [fs.create_run(exp, 'user', r, []).info.run_id + for r in range(10)] + runs.reverse() + + print(runs) + print(self._search(fs, exp)) + assert(runs[:10] == self._search(fs, exp)) + for n in [0, 1, 2, 4, 8, 10, 20, 50, 100, 500, 1000, 1200, 2000]: + assert(runs[:min(1200, n)] == self._search(fs, exp, max_results=n)) + + with self.assertRaises(MlflowException) as e: + self._search(fs, exp, None, max_results=int(1e10)) + self.assertIn("Invalid value for request parameter max_results. It ", e.exception.message) + + def test_search_with_deterministic_max_results(self): + fs = FileStore(self.test_root) + exp = fs.create_experiment("test_search_with_deterministic_max_results") + + # Create 10 runs with the same start_time. + # Sort based on run_id + runs = sorted([fs.create_run(exp, 'user', 1000, []).info.run_id + for r in range(10)]) + for n in [0, 1, 2, 4, 8, 10, 20]: + assert(runs[:min(10, n)] == self._search(fs, exp, max_results=n)) + + def test_search_runs_pagination(self): + fs = FileStore(self.test_root) + exp = fs.create_experiment("test_search_runs_pagination") + # test returned token behavior + runs = sorted([fs.create_run(exp, 'user', 1000, []).info.run_id + for r in range(10)]) + result = fs.search_runs([exp], None, ViewType.ALL, max_results=4) + assert [r.info.run_id for r in result] == runs[0:4] + assert result.token is not None + result = fs.search_runs([exp], None, ViewType.ALL, max_results=4, + page_token=result.token) + assert [r.info.run_id for r in result] == runs[4:8] + assert result.token is not None + result = fs.search_runs([exp], None, ViewType.ALL, max_results=4, + page_token=result.token) + assert [r.info.run_id for r in result] == runs[8:] + assert result.token is None def test_weird_param_names(self): WEIRD_PARAM_NAME = "this is/a weird/but valid param" fs = FileStore(self.test_root) - run_uuid = self.exp_data[0]["runs"][0] - fs.log_param(run_uuid, Param(WEIRD_PARAM_NAME, "Value")) - param = fs.get_param(run_uuid, WEIRD_PARAM_NAME) - assert param.key == WEIRD_PARAM_NAME - assert param.value == "Value" + run_id = self.exp_data[FileStore.DEFAULT_EXPERIMENT_ID]["runs"][0] + fs.log_param(run_id, Param(WEIRD_PARAM_NAME, "Value")) + run = fs.get_run(run_id) + assert run.data.params[WEIRD_PARAM_NAME] == "Value" + + def test_log_empty_str(self): + PARAM_NAME = "new param" + fs = FileStore(self.test_root) + run_id = self.exp_data[FileStore.DEFAULT_EXPERIMENT_ID]["runs"][0] + fs.log_param(run_id, Param(PARAM_NAME, "")) + run = fs.get_run(run_id) + assert run.data.params[PARAM_NAME] == "" def test_weird_metric_names(self): WEIRD_METRIC_NAME = "this is/a weird/but valid metric" fs = FileStore(self.test_root) - run_uuid = self.exp_data[0]["runs"][0] - fs.log_metric(run_uuid, Metric(WEIRD_METRIC_NAME, 10, 1234)) - metric = fs.get_metric(run_uuid, WEIRD_METRIC_NAME) + run_id = self.exp_data[FileStore.DEFAULT_EXPERIMENT_ID]["runs"][0] + fs.log_metric(run_id, Metric(WEIRD_METRIC_NAME, 10, 1234, 0)) + run = fs.get_run(run_id) + assert run.data.metrics[WEIRD_METRIC_NAME] == 10 + history = fs.get_metric_history(run_id, WEIRD_METRIC_NAME) + assert len(history) == 1 + metric = history[0] assert metric.key == WEIRD_METRIC_NAME assert metric.value == 10 assert metric.timestamp == 1234 @@ -367,48 +531,63 @@ def test_weird_metric_names(self): def test_weird_tag_names(self): WEIRD_TAG_NAME = "this is/a weird/but valid tag" fs = FileStore(self.test_root) - run_uuid = self.exp_data[0]["runs"][0] - fs.set_tag(run_uuid, RunTag(WEIRD_TAG_NAME, "Muhahaha!")) - tag = fs.get_run(run_uuid).data.tags[0] - assert tag.key == WEIRD_TAG_NAME - assert tag.value == "Muhahaha!" + run_id = self.exp_data[FileStore.DEFAULT_EXPERIMENT_ID]["runs"][0] + fs.set_tag(run_id, RunTag(WEIRD_TAG_NAME, "Muhahaha!")) + run = fs.get_run(run_id) + assert run.data.tags[WEIRD_TAG_NAME] == "Muhahaha!" def test_set_tags(self): fs = FileStore(self.test_root) - run_uuid = self.exp_data[0]["runs"][0] - fs.set_tag(run_uuid, RunTag("tag0", "value0")) - fs.set_tag(run_uuid, RunTag("tag1", "value1")) - tags = [(t.key, t.value) for t in fs.get_run(run_uuid).data.tags] - assert set(tags) == { - ("tag0", "value0"), - ("tag1", "value1"), - } + run_id = self.exp_data[FileStore.DEFAULT_EXPERIMENT_ID]["runs"][0] + fs.set_tag(run_id, RunTag("tag0", "value0")) + fs.set_tag(run_id, RunTag("tag1", "value1")) + tags = fs.get_run(run_id).data.tags + assert tags["tag0"] == "value0" + assert tags["tag1"] == "value1" # Can overwrite tags. - fs.set_tag(run_uuid, RunTag("tag0", "value2")) - tags = [(t.key, t.value) for t in fs.get_run(run_uuid).data.tags] - assert set(tags) == { - ("tag0", "value2"), - ("tag1", "value1"), - } + fs.set_tag(run_id, RunTag("tag0", "value2")) + tags = fs.get_run(run_id).data.tags + assert tags["tag0"] == "value2" + assert tags["tag1"] == "value1" # Can set multiline tags. - fs.set_tag(run_uuid, RunTag("multiline_tag", "value2\nvalue2\nvalue2")) - tags = [(t.key, t.value) for t in fs.get_run(run_uuid).data.tags] - assert set(tags) == { - ("tag0", "value2"), - ("tag1", "value1"), - ("multiline_tag", "value2\nvalue2\nvalue2"), - } + fs.set_tag(run_id, RunTag("multiline_tag", "value2\nvalue2\nvalue2")) + tags = fs.get_run(run_id).data.tags + assert tags["multiline_tag"] == "value2\nvalue2\nvalue2" + + def test_delete_tags(self): + fs = FileStore(self.test_root) + exp_id = self.experiments[random_int(0, len(self.experiments) - 1)] + run_id = self.exp_data[exp_id]['runs'][0] + fs.set_tag(run_id, RunTag("tag0", "value0")) + fs.set_tag(run_id, RunTag("tag1", "value1")) + tags = fs.get_run(run_id).data.tags + assert tags["tag0"] == "value0" + assert tags["tag1"] == "value1" + fs.delete_tag(run_id, "tag0") + new_tags = fs.get_run(run_id).data.tags + assert "tag0" not in new_tags.keys() + # test that you cannot delete tags that don't exist. + with pytest.raises(MlflowException): + fs.delete_tag(run_id, "fakeTag") + # test that you cannot delete tags for nonexistent runs + with pytest.raises(MlflowException): + fs.delete_tag("random_id", "tag0") + fs = FileStore(self.test_root) + fs.delete_run(run_id) + # test that you cannot delete tags for deleted runs. + assert fs.get_run(run_id).info.lifecycle_stage == LifecycleStage.DELETED + with pytest.raises(MlflowException): + fs.delete_tag(run_id, "tag0") def test_unicode_tag(self): fs = FileStore(self.test_root) - run_uuid = self.exp_data[0]["runs"][0] + run_id = self.exp_data[FileStore.DEFAULT_EXPERIMENT_ID]["runs"][0] value = u"𝐼 𝓈𝑜𝓁𝑒𝓂𝓃𝓁𝓎 𝓈𝓌𝑒𝒶𝓇 𝓉𝒽𝒶𝓉 𝐼 𝒶𝓂 𝓊𝓅 𝓉𝑜 𝓃𝑜 𝑔𝑜𝑜𝒹" - fs.set_tag(run_uuid, RunTag("message", value)) - tag = fs.get_run(run_uuid).data.tags[0] - assert tag.key == "message" - assert tag.value == value + fs.set_tag(run_id, RunTag("message", value)) + tags = fs.get_run(run_id).data.tags + assert tags["message"] == value def test_get_deleted_run(self): """ @@ -418,10 +597,7 @@ def test_get_deleted_run(self): exp_id = self.experiments[random_int(0, len(self.experiments) - 1)] run_id = self.exp_data[exp_id]['runs'][0] fs.delete_run(run_id) - - run = fs.get_run(run_id) - assert fs.get_metric(run_id, run.data.metrics[0].key).value == run.data.metrics[0].value - assert fs.get_param(run_id, run.data.params[0].key).value == run.data.params[0].value + assert fs.get_run(run_id) def test_set_deleted_run(self): """ @@ -432,24 +608,220 @@ def test_set_deleted_run(self): run_id = self.exp_data[exp_id]['runs'][0] fs.delete_run(run_id) - assert fs.get_run(run_id).info.lifecycle_stage == RunInfo.DELETED_LIFECYCLE + assert fs.get_run(run_id).info.lifecycle_stage == LifecycleStage.DELETED with pytest.raises(MlflowException): fs.set_tag(run_id, RunTag('a', 'b')) with pytest.raises(MlflowException): - fs.log_metric(run_id, Metric('a', 0.0, timestamp=0)) + fs.log_metric(run_id, Metric('a', 0.0, timestamp=0, step=0)) with pytest.raises(MlflowException): fs.log_param(run_id, Param('a', 'b')) - def test_create_run_with_parent_id(self): + def test_default_experiment_initialization(self): fs = FileStore(self.test_root) - exp_id = self.experiments[random_int(0, len(self.experiments) - 1)] - run = fs.create_run(exp_id, 'user', 'name', 'source_type', 'source_name', - 'entry_point_name', 0, None, [], 'test_parent_run_id') - assert any([t.key == MLFLOW_PARENT_RUN_ID and t.value == 'test_parent_run_id' - for t in fs.get_all_tags(run.info.run_uuid)]) + fs.delete_experiment(FileStore.DEFAULT_EXPERIMENT_ID) + fs = FileStore(self.test_root) + experiment = fs.get_experiment(FileStore.DEFAULT_EXPERIMENT_ID) + assert experiment.lifecycle_stage == LifecycleStage.DELETED - def test_default_experiment_initialization(self): + def test_malformed_experiment(self): fs = FileStore(self.test_root) - fs.delete_experiment(Experiment.DEFAULT_EXPERIMENT_ID) + exp_0 = fs.get_experiment(FileStore.DEFAULT_EXPERIMENT_ID) + assert exp_0.experiment_id == FileStore.DEFAULT_EXPERIMENT_ID + + experiments = len(fs.list_experiments(ViewType.ALL)) + + # delete metadata file. + path = os.path.join(self.test_root, str(exp_0.experiment_id), "meta.yaml") + os.remove(path) + with pytest.raises(MissingConfigException) as e: + fs.get_experiment(FileStore.DEFAULT_EXPERIMENT_ID) + assert e.message.contains("does not exist") + + assert len(fs.list_experiments(ViewType.ALL)) == experiments - 1 + + def test_malformed_run(self): + fs = FileStore(self.test_root) + exp_0 = fs.get_experiment(FileStore.DEFAULT_EXPERIMENT_ID) + all_runs = self._search(fs, exp_0.experiment_id) + + all_run_ids = self.exp_data[exp_0.experiment_id]["runs"] + assert len(all_runs) == len(all_run_ids) + + # delete metadata file. + bad_run_id = self.exp_data[exp_0.experiment_id]['runs'][0] + path = os.path.join(self.test_root, str(exp_0.experiment_id), str(bad_run_id), "meta.yaml") + os.remove(path) + with pytest.raises(MissingConfigException) as e: + fs.get_run(bad_run_id) + assert e.message.contains("does not exist") + + valid_runs = self._search(fs, exp_0.experiment_id) + assert len(valid_runs) == len(all_runs) - 1 + + for rid in all_run_ids: + if rid != bad_run_id: + fs.get_run(rid) + + def test_mismatching_experiment_id(self): + fs = FileStore(self.test_root) + exp_0 = fs.get_experiment(FileStore.DEFAULT_EXPERIMENT_ID) + assert exp_0.experiment_id == FileStore.DEFAULT_EXPERIMENT_ID + + experiments = len(fs.list_experiments(ViewType.ALL)) + + # mv experiment folder + target = "1" + path_orig = os.path.join(self.test_root, str(exp_0.experiment_id)) + path_new = os.path.join(self.test_root, str(target)) + os.rename(path_orig, path_new) + + with pytest.raises(MlflowException) as e: + fs.get_experiment(FileStore.DEFAULT_EXPERIMENT_ID) + assert e.message.contains("Could not find experiment with ID") + + with pytest.raises(MlflowException) as e: + fs.get_experiment(target) + assert e.message.contains("does not exist") + assert len(fs.list_experiments(ViewType.ALL)) == experiments - 1 + + def test_bad_experiment_id_recorded_for_run(self): + fs = FileStore(self.test_root) + exp_0 = fs.get_experiment(FileStore.DEFAULT_EXPERIMENT_ID) + all_runs = self._search(fs, exp_0.experiment_id) + + all_run_ids = self.exp_data[exp_0.experiment_id]["runs"] + assert len(all_runs) == len(all_run_ids) + + # change experiment pointer in run + bad_run_id = str(self.exp_data[exp_0.experiment_id]['runs'][0]) + path = os.path.join(self.test_root, str(exp_0.experiment_id), bad_run_id) + experiment_data = read_yaml(path, "meta.yaml") + experiment_data["experiment_id"] = 1 + write_yaml(path, "meta.yaml", experiment_data, True) + + with pytest.raises(MlflowException) as e: + fs.get_run(bad_run_id) + assert e.message.contains("not found") + + valid_runs = self._search(fs, exp_0.experiment_id) + assert len(valid_runs) == len(all_runs) - 1 + + for rid in all_run_ids: + if rid != bad_run_id: + fs.get_run(rid) + + def test_log_batch(self): + fs = FileStore(self.test_root) + run = fs.create_run( + experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, user_id='user', start_time=0, tags=[]) + run_id = run.info.run_id + metric_entities = [Metric("m1", 0.87, 12345, 0), Metric("m2", 0.49, 12345, 0)] + param_entities = [Param("p1", "p1val"), Param("p2", "p2val")] + tag_entities = [RunTag("t1", "t1val"), RunTag("t2", "t2val")] + fs.log_batch( + run_id=run_id, metrics=metric_entities, params=param_entities, tags=tag_entities) + self._verify_logged(fs, run_id, metric_entities, param_entities, tag_entities) + + def _create_run(self, fs): + return fs.create_run( + experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, user_id='user', + start_time=0, tags=[]) + + def _verify_logged(self, fs, run_id, metrics, params, tags): + run = fs.get_run(run_id) + all_metrics = sum([fs.get_metric_history(run_id, key) + for key in run.data.metrics], []) + assert len(all_metrics) == len(metrics) + logged_metrics = [(m.key, m.value, m.timestamp, m.step) for m in all_metrics] + assert set(logged_metrics) == set([(m.key, m.value, m.timestamp, m.step) for m in metrics]) + logged_tags = set([(tag_key, tag_value) for tag_key, tag_value in run.data.tags.items()]) + assert set([(tag.key, tag.value) for tag in tags]) <= logged_tags + assert len(run.data.params) == len(params) + logged_params = [(param_key, param_val) for param_key, param_val in run.data.params.items()] + assert set(logged_params) == set([(param.key, param.value) for param in params]) + + def test_log_batch_internal_error(self): + # Verify that internal errors during log_batch result in MlflowExceptions + fs = FileStore(self.test_root) + run = self._create_run(fs) + + def _raise_exception_fn(*args, **kwargs): # pylint: disable=unused-argument + raise Exception("Some internal error") + with mock.patch("mlflow.store.file_store.FileStore.log_metric") as log_metric_mock, \ + mock.patch("mlflow.store.file_store.FileStore.log_param") as log_param_mock, \ + mock.patch("mlflow.store.file_store.FileStore.set_tag") as set_tag_mock: + log_metric_mock.side_effect = _raise_exception_fn + log_param_mock.side_effect = _raise_exception_fn + set_tag_mock.side_effect = _raise_exception_fn + for kwargs in [{"metrics": [Metric("a", 3, 1, 0)]}, {"params": [Param("b", "c")]}, + {"tags": [RunTag("c", "d")]}]: + log_batch_kwargs = {"metrics": [], "params": [], "tags": []} + log_batch_kwargs.update(kwargs) + print(log_batch_kwargs) + with self.assertRaises(MlflowException) as e: + fs.log_batch(run.info.run_id, **log_batch_kwargs) + self.assertIn(str(e.exception.message), "Some internal error") + assert e.exception.error_code == ErrorCode.Name(INTERNAL_ERROR) + + def test_log_batch_nonexistent_run(self): + fs = FileStore(self.test_root) + nonexistent_uuid = uuid.uuid4().hex + with self.assertRaises(MlflowException) as e: + fs.log_batch(nonexistent_uuid, [], [], []) + assert e.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) + assert ("Run '%s' not found" % nonexistent_uuid) in e.exception.message + + def test_log_batch_params_idempotency(self): + fs = FileStore(self.test_root) + run = self._create_run(fs) + params = [Param("p-key", "p-val")] + fs.log_batch(run.info.run_id, metrics=[], params=params, tags=[]) + fs.log_batch(run.info.run_id, metrics=[], params=params, tags=[]) + self._verify_logged(fs, run.info.run_id, metrics=[], params=params, tags=[]) + + def test_log_batch_tags_idempotency(self): + fs = FileStore(self.test_root) + run = self._create_run(fs) + fs.log_batch(run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "t-val")]) + fs.log_batch(run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "t-val")]) + self._verify_logged(fs, run.info.run_id, metrics=[], params=[], + tags=[RunTag("t-key", "t-val")]) + + def test_log_batch_allows_tag_overwrite(self): + fs = FileStore(self.test_root) + run = self._create_run(fs) + fs.log_batch(run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "val")]) + fs.log_batch(run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "newval")]) + self._verify_logged(fs, run.info.run_id, metrics=[], params=[], + tags=[RunTag("t-key", "newval")]) + + def test_log_batch_same_metric_repeated_single_req(self): + fs = FileStore(self.test_root) + run = self._create_run(fs) + metric0 = Metric(key="metric-key", value=1, timestamp=2, step=0) + metric1 = Metric(key="metric-key", value=2, timestamp=3, step=0) + fs.log_batch(run.info.run_id, params=[], metrics=[metric0, metric1], tags=[]) + self._verify_logged(fs, run.info.run_id, params=[], metrics=[metric0, metric1], tags=[]) + + def test_log_batch_same_metric_repeated_multiple_reqs(self): + fs = FileStore(self.test_root) + run = self._create_run(fs) + metric0 = Metric(key="metric-key", value=1, timestamp=2, step=0) + metric1 = Metric(key="metric-key", value=2, timestamp=3, step=0) + fs.log_batch(run.info.run_id, params=[], metrics=[metric0], tags=[]) + self._verify_logged(fs, run.info.run_id, params=[], metrics=[metric0], tags=[]) + fs.log_batch(run.info.run_id, params=[], metrics=[metric1], tags=[]) + self._verify_logged(fs, run.info.run_id, params=[], metrics=[metric0, metric1], tags=[]) + + def test_log_batch_allows_tag_overwrite_single_req(self): + fs = FileStore(self.test_root) + run = self._create_run(fs) + tags = [RunTag("t-key", "val"), RunTag("t-key", "newval")] + fs.log_batch(run.info.run_id, metrics=[], params=[], tags=tags) + self._verify_logged(fs, run.info.run_id, metrics=[], params=[], tags=[tags[-1]]) + + def test_log_batch_accepts_empty_payload(self): fs = FileStore(self.test_root) - assert fs.get_experiment(0).lifecycle_stage == Experiment.DELETED_LIFECYCLE + run = self._create_run(fs) + fs.log_batch(run.info.run_id, metrics=[], params=[], tags=[]) + self._verify_logged(fs, run.info.run_id, metrics=[], params=[], tags=[]) diff --git a/tests/store/test_ftp_artifact_repo.py b/tests/store/test_ftp_artifact_repo.py new file mode 100644 index 0000000000000..6ee7814c817b4 --- /dev/null +++ b/tests/store/test_ftp_artifact_repo.py @@ -0,0 +1,285 @@ +# pylint: disable=redefined-outer-name +from mock import MagicMock +import pytest +import posixpath +import ftplib +from ftplib import FTP + +from mlflow.store.artifact_repository_registry import get_artifact_repository +from mlflow.store.ftp_artifact_repo import FTPArtifactRepository + + +@pytest.fixture +def ftp_mock(): + return MagicMock(autospec=FTP) + + +def test_artifact_uri_factory(): + repo = get_artifact_repository("ftp://user:pass@test_ftp:123/some/path") + assert isinstance(repo, FTPArtifactRepository) + + +def test_list_artifacts_empty(ftp_mock): + repo = FTPArtifactRepository("ftp://test_ftp/some/path") + + repo.get_ftp_client = MagicMock() + call_mock = MagicMock(return_value=ftp_mock) + repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) + + ftp_mock.nlst = MagicMock(return_value=[]) + assert repo.list_artifacts() == [] + ftp_mock.nlst.assert_called_once_with("/some/path") + + +def test_list_artifacts(ftp_mock): + artifact_root_path = "/experiment_id/run_id/" + repo = FTPArtifactRepository("ftp://test_ftp"+artifact_root_path) + + repo.get_ftp_client = MagicMock() + call_mock = MagicMock(return_value=ftp_mock) + repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) + + # mocked file structure + # |- file + # |- model + # |- model.pb + + file_path = "file" + file_size = 678 + dir_path = "model" + ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None]) + ftp_mock.nlst = MagicMock(return_value=[file_path, dir_path]) + + ftp_mock.size = MagicMock(return_value=file_size) + + artifacts = repo.list_artifacts(path=None) + + ftp_mock.nlst.assert_called_once_with(artifact_root_path) + ftp_mock.size.assert_called_once_with(artifact_root_path + file_path) + + assert len(artifacts) == 2 + assert artifacts[0].path == file_path + assert artifacts[0].is_dir is False + assert artifacts[0].file_size == file_size + assert artifacts[1].path == dir_path + assert artifacts[1].is_dir is True + assert artifacts[1].file_size is None + + +def test_list_artifacts_with_subdir(ftp_mock): + artifact_root_path = "/experiment_id/run_id/" + repo = FTPArtifactRepository("sftp://test_sftp"+artifact_root_path) + + repo.get_ftp_client = MagicMock() + call_mock = MagicMock(return_value=ftp_mock) + repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) + + # mocked file structure + # |- model + # |- model.pb + # |- variables + dir_name = 'model' + + # list artifacts at sub directory level + file_path = 'model.pb' + file_size = 345 + subdir_name = 'variables' + + ftp_mock.nlst = MagicMock(return_value=[file_path, subdir_name]) + + ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None]) + + ftp_mock.size = MagicMock(return_value=file_size) + + artifacts = repo.list_artifacts(path=dir_name) + + ftp_mock.nlst.assert_called_once_with(artifact_root_path + dir_name) + ftp_mock.size.assert_called_once_with(artifact_root_path + dir_name + '/' + file_path) + + assert len(artifacts) == 2 + assert artifacts[0].path == dir_name + '/' + file_path + assert artifacts[0].is_dir is False + assert artifacts[0].file_size == file_size + assert artifacts[1].path == dir_name + '/' + subdir_name + assert artifacts[1].is_dir is True + assert artifacts[1].file_size is None + + +def test_log_artifact(ftp_mock, tmpdir): + repo = FTPArtifactRepository("ftp://test_ftp/some/path") + + repo.get_ftp_client = MagicMock() + call_mock = MagicMock(return_value=ftp_mock) + repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) + + d = tmpdir.mkdir("data") + f = d.join("test.txt") + f.write("hello world!") + fpath = d + '/test.txt' + fpath = fpath.strpath + + ftp_mock.cwd = MagicMock(side_effect=[ftplib.error_perm, None]) + + repo.log_artifact(fpath) + + ftp_mock.mkd.assert_called_once_with('/some/path') + ftp_mock.cwd.assert_called_with('/some/path') + ftp_mock.storbinary.assert_called_once() + assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test.txt' + + +def test_log_artifact_multiple_calls(ftp_mock, tmpdir): + repo = FTPArtifactRepository("ftp://test_ftp/some/path") + + repo.get_ftp_client = MagicMock() + call_mock = MagicMock(return_value=ftp_mock) + repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) + + d = tmpdir.mkdir("data") + file1 = d.join("test1.txt") + file1.write("hello world!") + fpath1 = d + '/test1.txt' + fpath1 = fpath1.strpath + + file2 = d.join("test2.txt") + file2.write("hello world!") + fpath2 = d + '/test2.txt' + fpath2 = fpath2.strpath + + ftp_mock.cwd = MagicMock(side_effect=[ + ftplib.error_perm, + None, + ftplib.error_perm, + None, + None, + None + ]) + + repo.log_artifact(fpath1) + ftp_mock.mkd.assert_called_once_with('/some/path') + ftp_mock.cwd.assert_called_with('/some/path') + ftp_mock.storbinary.assert_called() + assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test1.txt' + ftp_mock.reset_mock() + + repo.log_artifact(fpath1, "subdir") + ftp_mock.mkd.assert_called_once_with('/some/path/subdir') + ftp_mock.cwd.assert_called_with('/some/path/subdir') + ftp_mock.storbinary.assert_called() + assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test1.txt' + ftp_mock.reset_mock() + + repo.log_artifact(fpath2) + ftp_mock.mkd.assert_not_called() + ftp_mock.cwd.assert_called_with('/some/path') + ftp_mock.storbinary.assert_called() + assert ftp_mock.storbinary.call_args_list[0][0][0] == 'STOR test2.txt' + + +def test_log_artifacts(ftp_mock, tmpdir): + repo = FTPArtifactRepository("ftp://test_ftp/some/path") + + repo.get_ftp_client = MagicMock() + call_mock = MagicMock(return_value=ftp_mock) + repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) + + subd = tmpdir.mkdir("data").mkdir("subdir") + subd.join("a.txt").write("A") + subd.join("b.txt").write("B") + subd.join("c.txt").write("C") + + ftp_mock.cwd = MagicMock(side_effect=[ftplib.error_perm, None, None, None, None, None]) + + repo.log_artifacts(subd.strpath) + + ftp_mock.mkd.assert_any_call('/some/path/subdir') + ftp_mock.cwd.assert_any_call('/some/path/subdir') + assert ftp_mock.storbinary.call_count == 3 + storbinary_call_args = sorted([ftp_mock.storbinary.call_args_list[i][0][0] for i in range(3)]) + assert storbinary_call_args == ['STOR a.txt', 'STOR b.txt', 'STOR c.txt'] + + +def test_download_artifacts_single(ftp_mock): + repo = FTPArtifactRepository("ftp://test_ftp/some/path") + + repo.get_ftp_client = MagicMock() + call_mock = MagicMock(return_value=ftp_mock) + repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) + + ftp_mock.cwd = MagicMock(side_effect=ftplib.error_perm) + + repo.download_artifacts("test.txt") + + ftp_mock.retrbinary.assert_called_once() + assert ftp_mock.retrbinary.call_args_list[0][0][0] == 'RETR /some/path/test.txt' + + +def test_download_artifacts(ftp_mock): + artifact_root_path = "/some/path" + repo = FTPArtifactRepository("ftp://test_ftp" + artifact_root_path) + + repo.get_ftp_client = MagicMock() + call_mock = MagicMock(return_value=ftp_mock) + repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) + + # mocked file structure + # |- model + # |- model.pb + # |- variables + # |- test.txt + dir_path = posixpath.join(artifact_root_path, 'model') + + # list artifacts at sub directory level + model_file_path_sub = 'model.pb' + model_file_path_full = posixpath.join(dir_path, model_file_path_sub) + subdir_name = 'variables' + subdir_path_full = posixpath.join(dir_path, subdir_name) + subfile_name = 'test.txt' + subfile_path_full = posixpath.join(artifact_root_path, subdir_path_full, subfile_name) + + is_dir_mapping = { + dir_path: True, + model_file_path_full: False, + subdir_path_full: True, + subfile_path_full: False, + } + + is_dir_call_args = [ + dir_path, model_file_path_full, subdir_path_full, + model_file_path_full, + subdir_path_full, subfile_path_full, + subfile_path_full + ] + + cwd_side_effect = [ + None if is_dir_mapping[call_arg] else ftplib.error_perm for call_arg in is_dir_call_args + ] + ftp_mock.cwd = MagicMock(side_effect=cwd_side_effect) + ftp_mock.nlst = MagicMock(side_effect=[[model_file_path_sub, subdir_name], [subfile_name]]) + + repo.download_artifacts("model") + + cwd_call_args = [arg_entry[0][0] for arg_entry in ftp_mock.cwd.call_args_list] + assert cwd_call_args == is_dir_call_args + assert ftp_mock.nlst.call_count == 2 + assert ftp_mock.retrbinary.call_args_list[0][0][0] == 'RETR ' + model_file_path_full + assert ftp_mock.retrbinary.call_args_list[1][0][0] == 'RETR ' + subfile_path_full + + +def test_log_artifact_reuse_ftp_client(ftp_mock, tmpdir): + repo = FTPArtifactRepository("ftp://test_ftp/some/path") + + repo.get_ftp_client = MagicMock() + call_mock = MagicMock(return_value=ftp_mock) + repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock) + + d = tmpdir.mkdir("data") + file = d.join("test.txt") + file.write("hello world!") + fpath = file.strpath + + repo.log_artifact(fpath) + repo.log_artifact(fpath, "subdir1/subdir2") + repo.log_artifact(fpath, "subdir3") + + assert repo.get_ftp_client.call_count == 3 diff --git a/tests/store/test_gcs_artifact_repo.py b/tests/store/test_gcs_artifact_repo.py index cf523d2195a4a..01faa8ebe3067 100644 --- a/tests/store/test_gcs_artifact_repo.py +++ b/tests/store/test_gcs_artifact_repo.py @@ -5,7 +5,7 @@ from google.cloud.storage import client as gcs_client -from mlflow.store.artifact_repo import ArtifactRepository +from mlflow.store.artifact_repository_registry import get_artifact_repository from mlflow.store.gcs_artifact_repo import GCSArtifactRepository @@ -22,7 +22,7 @@ def gcs_mock(): def test_artifact_uri_factory(): - repo = ArtifactRepository.from_artifact_uri("gs://test_bucket/some/path", mock.Mock()) + repo = get_artifact_repository("gs://test_bucket/some/path") assert isinstance(repo, GCSArtifactRepository) @@ -147,9 +147,9 @@ def test_log_artifacts(gcs_mock, tmpdir): gcs_mock.Client().get_bucket.assert_called_with('test_bucket') gcs_mock.Client().get_bucket().blob().upload_from_filename\ .assert_has_calls([ - mock.call('%s/a.txt' % subd.strpath), - mock.call('%s/b.txt' % subd.strpath), - mock.call('%s/c.txt' % subd.strpath), + mock.call(os.path.normpath('%s/a.txt' % subd.strpath)), + mock.call(os.path.normpath('%s/b.txt' % subd.strpath)), + mock.call(os.path.normpath('%s/c.txt' % subd.strpath)), ], any_order=True) @@ -173,7 +173,7 @@ def mkfile(fname): gcs_mock.Client().get_bucket().get_blob().download_to_filename.call_args_list assert len(download_calls) == 1 download_path_arg = download_calls[0][0][0] - assert "/test.txt" in download_path_arg + assert "test.txt" in download_path_arg def test_download_artifacts_downloads_expected_content(gcs_mock, tmpdir): diff --git a/tests/store/test_hdfs_artifact_repo.py b/tests/store/test_hdfs_artifact_repo.py new file mode 100644 index 0000000000000..a02270670aea4 --- /dev/null +++ b/tests/store/test_hdfs_artifact_repo.py @@ -0,0 +1,151 @@ +import os +import sys +from tempfile import NamedTemporaryFile + +import mock +import pytest +from mock import call +from pyarrow import HadoopFileSystem + +from mlflow.entities import FileInfo +from mlflow.store.hdfs_artifact_repo import HdfsArtifactRepository, _resolve_base_path, \ + _relative_path_remote, _parse_extra_conf +from mlflow.utils.file_utils import TempDir + + +@mock.patch('pyarrow.hdfs.HadoopFileSystem') +def test_log_artifact(hdfs_system_mock): + repo = HdfsArtifactRepository('hdfs://host_name:8020/hdfs/path') + + with TempDir() as tmp_dir: + local_file = tmp_dir.path('sample_file') + with open(local_file, "w") as f: + f.write('PyArrow Works') + + repo.log_artifact(local_file, 'more_path/some') + + hdfs_system_mock.assert_called_once_with(driver='libhdfs', extra_conf=None, + host='host_name', + kerb_ticket=None, port=8020, + user=None) + + open_mock = hdfs_system_mock.return_value.open + open_mock.assert_called_once_with('/hdfs/path/more_path/some/sample_file', 'wb') + + write_mock = open_mock.return_value.__enter__.return_value.write + write_mock.assert_called_once_with(b'PyArrow Works') + + +@mock.patch('pyarrow.hdfs.HadoopFileSystem') +def test_log_artifact_with_kerberos_setup(hdfs_system_mock): + if sys.platform == 'win32': + pytest.skip() + os.environ['MLFLOW_KERBEROS_TICKET_CACHE'] = '/tmp/krb5cc_22222222' + os.environ['MLFLOW_KERBEROS_USER'] = 'some_kerberos_user' + os.environ['MLFLOW_HDFS_DRIVER'] = 'libhdfs3' + + repo = HdfsArtifactRepository('hdfs:/some/maybe/path') + + with NamedTemporaryFile() as tmp_local_file: + tmp_local_file.write(b'PyArrow Works') + tmp_local_file.seek(0) + + repo.log_artifact(tmp_local_file.name, 'test_hdfs/some/path') + + hdfs_system_mock.assert_called_once_with(driver='libhdfs3', extra_conf=None, + host='default', + kerb_ticket='/tmp/krb5cc_22222222', port=0, + user='some_kerberos_user') + + # TODO: refactor this magic ... + write_mock = hdfs_system_mock.return_value.open.return_value.__enter__.return_value.write + write_mock.assert_called_once_with(b'PyArrow Works') + + +@mock.patch('pyarrow.hdfs.HadoopFileSystem') +def test_log_artifact_with_invalid_local_dir(_): + repo = HdfsArtifactRepository('hdfs://host_name:8020/maybe/path') + + with pytest.raises(Exception, + match="No such file or directory: '/not/existing/local/path'"): + repo.log_artifact('/not/existing/local/path', 'test_hdfs/some/path') + + +@mock.patch('pyarrow.hdfs.HadoopFileSystem') +def test_log_artifacts(hdfs_system_mock): + os.environ['MLFLOW_KERBEROS_TICKET_CACHE'] = '/tmp/krb5cc_22222222' + os.environ['MLFLOW_KERBEROS_USER'] = 'some_kerberos_user' + os.environ['MLFLOW_HDFS_DRIVER'] = 'libhdfs3' + + repo = HdfsArtifactRepository('hdfs:/some_path/maybe/path') + + with TempDir() as root_dir: + with open(root_dir.path("file_one.txt"), "w") as f: + f.write('PyArrow Works once') + + os.mkdir(root_dir.path("subdir")) + with open(root_dir.path("subdir/file_two.txt"), "w") as f: + f.write('PyArrow Works two') + + repo.log_artifacts(root_dir._path) + + hdfs_system_mock.assert_called_once_with(driver='libhdfs3', extra_conf=None, + host='default', + kerb_ticket='/tmp/krb5cc_22222222', port=0, + user='some_kerberos_user') + + open_mock = hdfs_system_mock.return_value.open + open_mock.assert_has_calls(calls=[call('/some_path/maybe/path/file_one.txt', 'wb'), + call('/some_path/maybe/path/subdir/file_two.txt', 'wb')], + any_order=True) + write_mock = open_mock.return_value.__enter__.return_value.write + write_mock.assert_has_calls(calls=[call(b'PyArrow Works once'), + call(b'PyArrow Works two')], + any_order=True) + + +@mock.patch('pyarrow.hdfs.HadoopFileSystem') +def test_list_artifacts(hdfs_system_mock): + repo = HdfsArtifactRepository('hdfs:/some/path') + + expected = [FileInfo('conda.yaml', False, 33), + FileInfo('model.pkl', False, 33), + FileInfo('MLmodel', False, 33)] + + hdfs_system_mock.return_value.walk.return_value = [ + ('/some/path', False, ['conda.yaml', 'model.pkl', 'MLmodel'])] + hdfs_system_mock.return_value.info.return_value.get.return_value = 33 + hdfs_system_mock.return_value.isdir.side_effect = [True, False, False, False] + + actual = repo.list_artifacts() + + assert actual == expected + + +@mock.patch('pyarrow.hdfs.HadoopFileSystem', spec=HadoopFileSystem) +def test_list_artifacts_empty_hdfs_dir(hdfs_system_mock): + hdfs_system_mock.return_value.exists.return_value = False + + repo = HdfsArtifactRepository('hdfs:/some_path/maybe/path') + actual = repo.list_artifacts() + assert actual == [] + + +def test_resolve_path(): + assert _resolve_base_path('/dir/some/path', None) == '/dir/some/path' + assert _resolve_base_path('/dir/some/path', 'subdir/path') == '/dir/some/path/subdir/path' + + +def test_relative_path(): + assert _relative_path_remote('/dir/some', '/dir/some/path/file.txt') == 'path/file.txt' + assert _relative_path_remote('/dir/some', '/dir/some') is None + + +def test_parse_extra_conf(): + assert _parse_extra_conf("fs.permissions.umask-mode=022,some_other.extra.conf=abcd") == \ + {'fs.permissions.umask-mode': '022', + 'some_other.extra.conf': 'abcd'} + assert _parse_extra_conf(None) is None + + with pytest.raises(Exception): + _parse_extra_conf("missing_equals_sign") diff --git a/tests/store/test_local_artifact_repo.py b/tests/store/test_local_artifact_repo.py index 4e4af76b255b3..354bdb861cbf8 100644 --- a/tests/store/test_local_artifact_repo.py +++ b/tests/store/test_local_artifact_repo.py @@ -1,102 +1,160 @@ import os -import unittest +import pytest +import posixpath -from mock import Mock - -from mlflow.store.artifact_repo import ArtifactRepository +from mlflow.exceptions import MlflowException from mlflow.store.local_artifact_repo import LocalArtifactRepository from mlflow.utils.file_utils import TempDir -class TestLocalArtifactRepo(unittest.TestCase): - def _get_contents(self, repo, dir_name): - return sorted([(f.path, f.is_dir, f.file_size) for f in repo.list_artifacts(dir_name)]) - - def test_basic_functions(self): - with TempDir() as test_root, TempDir() as tmp: - repo = ArtifactRepository.from_artifact_uri(test_root.path(), Mock()) - self.assertIsInstance(repo, LocalArtifactRepository) - self.assertListEqual(repo.list_artifacts(), []) - with self.assertRaises(Exception): - open(repo.download_artifacts("test.txt")).read() - - # Create and log a test.txt file directly - artifact_name = "test.txt" - local_file = tmp.path(artifact_name) - with open(local_file, "w") as f: - f.write("Hello world!") - repo.log_artifact(local_file) - text = open(repo.download_artifacts(artifact_name)).read() - self.assertEqual(text, "Hello world!") - # Check that it actually got written in the expected place - text = open(os.path.join(test_root.path(), artifact_name)).read() - self.assertEqual(text, "Hello world!") - - # log artifact in subdir - repo.log_artifact(local_file, "aaa") - text = open(repo.download_artifacts(os.path.join("aaa", artifact_name))).read() - self.assertEqual(text, "Hello world!") - - # log a hidden artifact - hidden_file = tmp.path(".mystery") - with open(hidden_file, 'w') as f: - f.write("42") - repo.log_artifact(hidden_file, "aaa") - hidden_text = open(repo.download_artifacts(os.path.join("aaa", hidden_file))).read() - self.assertEqual(hidden_text, "42") - - # log artifacts in deep nested subdirs - nested_subdir = "bbb/ccc/ddd/eee/fghi" - repo.log_artifact(local_file, nested_subdir) - text = open(repo.download_artifacts(os.path.join(nested_subdir, artifact_name))).read() - self.assertEqual(text, "Hello world!") - - for bad_path in ["/", "//", "/tmp", "/bad_path", ".", "../terrible_path"]: - with self.assertRaises(Exception): - repo.log_artifact(local_file, bad_path) - - # Create a subdirectory for log_artifacts - os.mkdir(tmp.path("subdir")) - os.mkdir(tmp.path("subdir", "nested")) - with open(tmp.path("subdir", "a.txt"), "w") as f: - f.write("A") - with open(tmp.path("subdir", "b.txt"), "w") as f: - f.write("B") - with open(tmp.path("subdir", "nested", "c.txt"), "w") as f: - f.write("C") - repo.log_artifacts(tmp.path("subdir")) - text = open(repo.download_artifacts("a.txt")).read() - self.assertEqual(text, "A") - text = open(repo.download_artifacts("b.txt")).read() - self.assertEqual(text, "B") - text = open(repo.download_artifacts("nested/c.txt")).read() - self.assertEqual(text, "C") - infos = self._get_contents(repo, None) - self.assertListEqual(infos, [ - ("a.txt", False, 1), - ("aaa", True, None), - ("b.txt", False, 1), - ("bbb", True, None), - ("nested", True, None), - ("test.txt", False, 12), - ]) - - # Verify contents of subdirectories - self.assertListEqual(self._get_contents(repo, "nested"), [("nested/c.txt", False, 1)]) - - infos = self._get_contents(repo, "aaa") - self.assertListEqual(infos, [("aaa/.mystery", False, 2), ("aaa/test.txt", False, 12)]) - self.assertListEqual(self._get_contents(repo, "bbb"), [("bbb/ccc", True, None)]) - self.assertListEqual(self._get_contents(repo, "bbb/ccc"), [("bbb/ccc/ddd", True, None)]) - - infos = self._get_contents(repo, "bbb/ccc/ddd/eee") - self.assertListEqual(infos, [("bbb/ccc/ddd/eee/fghi", True, None)]) - - infos = self._get_contents(repo, "bbb/ccc/ddd/eee/fghi") - self.assertListEqual(infos, [("bbb/ccc/ddd/eee/fghi/test.txt", False, 12)]) - - # Download a subdirectory - downloaded_dir = repo.download_artifacts("nested") - self.assertEqual(os.path.basename(downloaded_dir), "nested") - text = open(os.path.join(downloaded_dir, "c.txt")).read() - self.assertEqual(text, "C") +@pytest.fixture +def local_artifact_root(tmpdir): + return str(tmpdir) + + +@pytest.fixture +def local_artifact_repo(local_artifact_root): + from mlflow.utils.file_utils import path_to_local_file_uri + return LocalArtifactRepository(artifact_uri=path_to_local_file_uri(local_artifact_root)) + + +def test_list_artifacts(local_artifact_repo, local_artifact_root): + assert len(local_artifact_repo.list_artifacts()) == 0 + + artifact_rel_path = "artifact" + artifact_path = os.path.join(local_artifact_root, artifact_rel_path) + with open(artifact_path, "w") as f: + f.write("artifact") + artifacts_list = local_artifact_repo.list_artifacts() + assert len(artifacts_list) == 1 + assert artifacts_list[0].path == artifact_rel_path + + +def test_log_artifacts(local_artifact_repo, local_artifact_root): + artifact_rel_path = "test.txt" + artifact_text = "hello world!" + with TempDir() as src_dir: + artifact_src_path = src_dir.path(artifact_rel_path) + with open(artifact_src_path, "w") as f: + f.write(artifact_text) + local_artifact_repo.log_artifact(artifact_src_path) + + artifacts_list = local_artifact_repo.list_artifacts() + assert len(artifacts_list) == 1 + assert artifacts_list[0].path == artifact_rel_path + + artifact_dst_path = os.path.join(local_artifact_root, artifact_rel_path) + assert os.path.exists(artifact_dst_path) + assert artifact_dst_path != artifact_src_path + assert open(artifact_dst_path).read() == artifact_text + + +def test_download_artifacts(local_artifact_repo): + artifact_rel_path = "test.txt" + artifact_text = "hello world!" + with TempDir(chdr=True) as local_dir: + artifact_src_path = local_dir.path(artifact_rel_path) + with open(artifact_src_path, "w") as f: + f.write(artifact_text) + local_artifact_repo.log_artifact(artifact_src_path) + dst_path = local_artifact_repo.download_artifacts(artifact_path=artifact_rel_path) + assert open(dst_path).read() == artifact_text + + +def test_download_artifacts_does_not_copy(local_artifact_repo): + """ + The LocalArtifactRepository.download_artifact function should not copy the artifact if + the ``dst_path`` argument is None. + """ + artifact_rel_path = "test.txt" + artifact_text = "hello world!" + with TempDir(chdr=True) as local_dir: + artifact_src_path = local_dir.path(artifact_rel_path) + with open(artifact_src_path, "w") as f: + f.write(artifact_text) + local_artifact_repo.log_artifact(artifact_src_path) + dst_path = local_artifact_repo.download_artifacts(artifact_path=artifact_rel_path) + assert open(dst_path).read() == artifact_text + assert dst_path.startswith(local_artifact_repo.artifact_dir), \ + 'downloaded artifact is not in local_artifact_repo.artifact_dir root' + + +def test_download_artifacts_returns_absolute_paths(local_artifact_repo): + artifact_rel_path = "test.txt" + artifact_text = "hello world!" + with TempDir(chdr=True) as local_dir: + artifact_src_path = local_dir.path(artifact_rel_path) + with open(artifact_src_path, "w") as f: + f.write(artifact_text) + local_artifact_repo.log_artifact(artifact_src_path) + + for dst_dir in ["dst1", local_dir.path("dst2"), None]: + if dst_dir is not None: + os.makedirs(dst_dir) + dst_path = local_artifact_repo.download_artifacts( + artifact_path=artifact_rel_path, + dst_path=dst_dir) + if dst_dir is not None: + # If dst_dir isn't none, assert we're actually downloading to dst_dir. + assert dst_path.startswith(os.path.abspath(dst_dir)) + assert dst_path == os.path.abspath(dst_path) + + +@pytest.mark.parametrize("repo_subdir_path", [ + "aaa", + "aaa/bbb", + "aaa/bbb/ccc/ddd", +]) +def test_artifacts_are_logged_to_and_downloaded_from_repo_subdirectory_successfully( + local_artifact_repo, repo_subdir_path): + artifact_rel_path = "test.txt" + artifact_text = "hello world!" + with TempDir(chdr=True) as local_dir: + artifact_src_path = local_dir.path(artifact_rel_path) + with open(artifact_src_path, "w") as f: + f.write(artifact_text) + local_artifact_repo.log_artifact(artifact_src_path, artifact_path=repo_subdir_path) + + downloaded_subdir = local_artifact_repo.download_artifacts(repo_subdir_path) + assert os.path.isdir(downloaded_subdir) + subdir_contents = os.listdir(downloaded_subdir) + assert len(subdir_contents) == 1 + assert artifact_rel_path in subdir_contents + assert open(os.path.join(downloaded_subdir, artifact_rel_path)).read() == artifact_text + + downloaded_file = local_artifact_repo.download_artifacts( + posixpath.join(repo_subdir_path, artifact_rel_path)) + assert open(downloaded_file).read() == artifact_text + + +def test_log_artifact_throws_exception_for_invalid_artifact_paths(local_artifact_repo): + with TempDir() as local_dir: + for bad_artifact_path in ["/", "//", "/tmp", "/bad_path", ".", "../terrible_path"]: + with pytest.raises(MlflowException) as exc_info: + local_artifact_repo.log_artifact(local_dir.path(), bad_artifact_path) + assert "Invalid artifact path" in str(exc_info) + + +def test_logging_directory_of_artifacts_produces_expected_repo_contents(local_artifact_repo): + with TempDir() as local_dir: + os.mkdir(local_dir.path("subdir")) + os.mkdir(local_dir.path("subdir", "nested")) + with open(local_dir.path("subdir", "a.txt"), "w") as f: + f.write("A") + with open(local_dir.path("subdir", "b.txt"), "w") as f: + f.write("B") + with open(local_dir.path("subdir", "nested", "c.txt"), "w") as f: + f.write("C") + local_artifact_repo.log_artifacts(local_dir.path("subdir")) + assert open(local_artifact_repo.download_artifacts("a.txt")).read() == "A" + assert open(local_artifact_repo.download_artifacts("b.txt")).read() == "B" + assert open(local_artifact_repo.download_artifacts("nested/c.txt")).read() == "C" + + +def test_hidden_files_are_logged_correctly(local_artifact_repo): + with TempDir() as local_dir: + hidden_file = local_dir.path(".mystery") + with open(hidden_file, "w") as f: + f.write("42") + local_artifact_repo.log_artifact(hidden_file) + assert open(local_artifact_repo.download_artifacts(".mystery")).read() == "42" diff --git a/tests/store/test_rest_store.py b/tests/store/test_rest_store.py index 62425a3750d15..100c56d2dd7c8 100644 --- a/tests/store/test_rest_store.py +++ b/tests/store/test_rest_store.py @@ -4,14 +4,25 @@ import mock import six +import mlflow +from mlflow.entities import Param, Metric, RunTag, SourceType, ViewType from mlflow.exceptions import MlflowException -from mlflow.entities import Param, Metric, RunTag -from mlflow.protos.service_pb2 import DeleteExperiment, RestoreExperiment, LogParam, LogMetric, \ - SetTag, DeleteRun, RestoreRun +from mlflow.protos.service_pb2 import CreateRun, DeleteExperiment, DeleteRun, LogBatch, \ + LogMetric, LogParam, RestoreExperiment, RestoreRun, RunTag as ProtoRunTag, SearchRuns, \ + SetTag, DeleteTag from mlflow.store.rest_store import RestStore from mlflow.utils.proto_json_utils import message_to_json +from mlflow.utils.rest_utils import MlflowHostCreds, _DEFAULT_HEADERS -from mlflow.utils.rest_utils import MlflowHostCreds + +class MyCoolException(Exception): + pass + + +class CustomErrorHandlingRestStore(RestStore): + def _verify_rest_response(self, response, endpoint): + if response.status_code != 200: + raise MyCoolException() class TestRestStore(unittest.TestCase): @@ -22,14 +33,14 @@ def mock_request(**kwargs): kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v is not None) assert kwargs == { 'method': 'GET', - 'json': {'view_type': 'ACTIVE_ONLY'}, - 'url': 'https://hello/api/2.0/preview/mlflow/experiments/list', - 'headers': {}, + 'params': {'view_type': 'ACTIVE_ONLY'}, + 'url': 'https://hello/api/2.0/mlflow/experiments/list', + 'headers': _DEFAULT_HEADERS, 'verify': True, } response = mock.MagicMock response.status_code = 200 - response.text = '{"experiments": [{"name": "Exp!"}]}' + response.text = '{"experiments": [{"name": "Exp!", "lifecycle_stage": "active"}]}' return response request.side_effect = mock_request @@ -50,12 +61,24 @@ def test_failed_http_request(self, request): store.list_experiments() self.assertIn("RESOURCE_DOES_NOT_EXIST: No experiment", str(cm.exception)) + @mock.patch('requests.request') + def test_failed_http_request_custom_handler(self, request): + response = mock.MagicMock + response.status_code = 404 + response.text = '{"error_code": "RESOURCE_DOES_NOT_EXIST", "message": "No experiment"}' + request.return_value = response + + store = CustomErrorHandlingRestStore(lambda: MlflowHostCreds('https://hello')) + with self.assertRaises(MyCoolException): + store.list_experiments() + @mock.patch('requests.request') def test_response_with_unknown_fields(self, request): experiment_json = { - "experiment_id": 1, + "experiment_id": "1", "name": "My experiment", "artifact_location": "foo", + "lifecycle_stage": "deleted", "OMG_WHAT_IS_THIS_FIELD": "Hooly cow", } @@ -70,11 +93,14 @@ def test_response_with_unknown_fields(self, request): assert len(experiments) == 1 assert experiments[0].name == 'My experiment' + def _args(self, host_creds, endpoint, method, json_body): + return {'host_creds': host_creds, + 'endpoint': "/api/2.0/mlflow/%s" % endpoint, + 'method': method, + 'json': json.loads(json_body)} + def _verify_requests(self, http_request, host_creds, endpoint, method, json_body): - http_request.assert_called_with(host_creds=host_creds, - endpoint="/api/2.0/preview/mlflow/%s" % endpoint, - method=method, - json=json.loads(json_body)) + http_request.assert_called_with(**(self._args(host_creds, endpoint, method, json_body))) @mock.patch('requests.request') def test_requestor(self, request): @@ -86,48 +112,125 @@ def test_requestor(self, request): creds = MlflowHostCreds('https://hello') store = RestStore(lambda: creds) - with mock.patch('mlflow.store.rest_store.http_request_safe') as mock_http: + user_name = "mock user" + source_name = "rest test" + + source_name_patch = mock.patch( + "mlflow.tracking.context.default_context._get_source_name", return_value=source_name + ) + source_type_patch = mock.patch( + "mlflow.tracking.context.default_context._get_source_type", + return_value=SourceType.LOCAL + ) + with mock.patch('mlflow.store.rest_store.http_request') as mock_http, \ + mock.patch('mlflow.tracking.utils._get_store', return_value=store), \ + mock.patch('mlflow.tracking.context.default_context._get_user', + return_value=user_name), \ + mock.patch('time.time', return_value=13579), \ + source_name_patch, source_type_patch: + with mlflow.start_run(experiment_id="43"): + cr_body = message_to_json(CreateRun(experiment_id="43", + user_id=user_name, start_time=13579000, + tags=[ProtoRunTag(key='mlflow.source.name', + value=source_name), + ProtoRunTag(key='mlflow.source.type', + value='LOCAL'), + ProtoRunTag(key='mlflow.user', + value=user_name)])) + expected_kwargs = self._args(creds, "runs/create", "POST", cr_body) + + assert mock_http.call_count == 1 + actual_kwargs = mock_http.call_args[1] + + # Test the passed tag values separately from the rest of the request + # Tag order is inconsistent on Python 2 and 3, but the order does not matter + expected_tags = expected_kwargs['json'].pop('tags') + actual_tags = actual_kwargs['json'].pop('tags') + assert ( + sorted(expected_tags, key=lambda t: t['key']) == + sorted(actual_tags, key=lambda t: t['key']) + ) + assert expected_kwargs == actual_kwargs + + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: store.log_param("some_uuid", Param("k1", "v1")) - body = message_to_json(LogParam(run_uuid="some_uuid", key="k1", value="v1")) + body = message_to_json(LogParam( + run_uuid="some_uuid", run_id="some_uuid", key="k1", value="v1")) self._verify_requests(mock_http, creds, "runs/log-parameter", "POST", body) - with mock.patch('mlflow.store.rest_store.http_request_safe') as mock_http: + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: store.set_tag("some_uuid", RunTag("t1", "abcd"*1000)) - body = message_to_json(SetTag(run_uuid="some_uuid", key="t1", value="abcd"*1000)) + body = message_to_json(SetTag( + run_uuid="some_uuid", run_id="some_uuid", key="t1", value="abcd"*1000)) self._verify_requests(mock_http, creds, "runs/set-tag", "POST", body) - with mock.patch('mlflow.store.rest_store.http_request_safe') as mock_http: - store.log_metric("u2", Metric("m1", 0.87, 12345)) - body = message_to_json(LogMetric(run_uuid="u2", key="m1", value=0.87, timestamp=12345)) + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: + store.delete_tag("some_uuid", "t1") + body = message_to_json(DeleteTag(run_id="some_uuid", key="t1")) + self._verify_requests(mock_http, creds, + "runs/delete-tag", "POST", body) + + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: + store.log_metric("u2", Metric("m1", 0.87, 12345, 3)) + body = message_to_json(LogMetric( + run_uuid="u2", run_id="u2", key="m1", value=0.87, timestamp=12345, step=3)) self._verify_requests(mock_http, creds, "runs/log-metric", "POST", body) - with mock.patch('mlflow.store.rest_store.http_request_safe') as mock_http: + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: + metrics = [Metric("m1", 0.87, 12345, 0), Metric("m2", 0.49, 12345, -1), + Metric("m3", 0.58, 12345, 2)] + params = [Param("p1", "p1val"), Param("p2", "p2val")] + tags = [RunTag("t1", "t1val"), RunTag("t2", "t2val")] + store.log_batch(run_id="u2", metrics=metrics, params=params, tags=tags) + metric_protos = [metric.to_proto() for metric in metrics] + param_protos = [param.to_proto() for param in params] + tag_protos = [tag.to_proto() for tag in tags] + body = message_to_json(LogBatch(run_id="u2", metrics=metric_protos, + params=param_protos, tags=tag_protos)) + self._verify_requests(mock_http, creds, + "runs/log-batch", "POST", body) + + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: store.delete_run("u25") self._verify_requests(mock_http, creds, "runs/delete", "POST", message_to_json(DeleteRun(run_id="u25"))) - with mock.patch('mlflow.store.rest_store.http_request_safe') as mock_http: + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: store.restore_run("u76") self._verify_requests(mock_http, creds, "runs/restore", "POST", message_to_json(RestoreRun(run_id="u76"))) - with mock.patch('mlflow.store.rest_store.http_request_safe') as mock_http: - store.delete_experiment(0) + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: + store.delete_experiment("0") self._verify_requests(mock_http, creds, "experiments/delete", "POST", - message_to_json(DeleteExperiment(experiment_id=0))) + message_to_json(DeleteExperiment(experiment_id="0"))) - with mock.patch('mlflow.store.rest_store.http_request_safe') as mock_http: - store.restore_experiment(0) + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: + store.restore_experiment("0") self._verify_requests(mock_http, creds, "experiments/restore", "POST", - message_to_json(RestoreExperiment(experiment_id=0))) + message_to_json(RestoreExperiment(experiment_id="0"))) + with mock.patch('mlflow.store.rest_store.http_request') as mock_http: + response = mock.MagicMock + response.text = '{"runs": ["1a", "2b", "3c"], "next_page_token": "67890fghij"}' + mock_http.return_value = response + result = store.search_runs(["0", "1"], "params.p1 = 'a'", ViewType.ACTIVE_ONLY, + max_results=10, order_by=["a"], page_token="12345abcde") + + expected_message = SearchRuns(experiment_ids=["0", "1"], filter="params.p1 = 'a'", + run_view_type=ViewType.to_proto(ViewType.ACTIVE_ONLY), + max_results=10, order_by=["a"], page_token="12345abcde") + self._verify_requests(mock_http, creds, + "runs/search", "POST", + message_to_json(expected_message)) + assert result.token == "67890fghij" if __name__ == '__main__': unittest.main() diff --git a/tests/store/test_runs_artifact_repo.py b/tests/store/test_runs_artifact_repo.py new file mode 100644 index 0000000000000..cd6acf807648c --- /dev/null +++ b/tests/store/test_runs_artifact_repo.py @@ -0,0 +1,63 @@ +import pytest +from mock import Mock + +import mlflow +from mlflow.exceptions import MlflowException +from mlflow.store.runs_artifact_repo import RunsArtifactRepository +from mlflow.store.s3_artifact_repo import S3ArtifactRepository + +from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import + + +@pytest.mark.parametrize("uri, expected_run_id, expected_artifact_path", [ + ('runs:/1234abcdf1394asdfwer33/path/to/model', '1234abcdf1394asdfwer33', 'path/to/model'), + ('runs:/1234abcdf1394asdfwer33/path/to/model/', '1234abcdf1394asdfwer33', 'path/to/model/'), + ('runs:/1234abcdf1394asdfwer33', '1234abcdf1394asdfwer33', None), + ('runs:/1234abcdf1394asdfwer33/', '1234abcdf1394asdfwer33', None), + ('runs:///1234abcdf1394asdfwer33/', '1234abcdf1394asdfwer33', None), +]) +def test_parse_runs_uri_valid_input(uri, expected_run_id, expected_artifact_path): + (run_id, artifact_path) = RunsArtifactRepository.parse_runs_uri(uri) + assert run_id == expected_run_id + assert artifact_path == expected_artifact_path + + +@pytest.mark.parametrize("uri", [ + 'notruns:/1234abcdf1394asdfwer33/', # wrong scheme + 'runs:/', # no run id + 'runs:1234abcdf1394asdfwer33/', # missing slash + 'runs://1234abcdf1394asdfwer33/', # hostnames are not yet supported +]) +def test_parse_runs_uri_invalid_input(uri): + with pytest.raises(MlflowException): + RunsArtifactRepository.parse_runs_uri(uri) + + +@pytest.mark.usefixtures("tracking_uri_mock") +def test_runs_artifact_repo_init(): + artifact_location = "s3://blah_bucket/" + experiment_id = mlflow.create_experiment("expr_abc", artifact_location) + with mlflow.start_run(experiment_id=experiment_id): + run_id = mlflow.active_run().info.run_id + runs_uri = "runs:/%s/path/to/model" % run_id + runs_repo = RunsArtifactRepository(runs_uri) + + assert runs_repo.artifact_uri == runs_uri + assert isinstance(runs_repo.repo, S3ArtifactRepository) + expected_absolute_uri = "%s%s/artifacts/path/to/model" % (artifact_location, run_id) + assert runs_repo.repo.artifact_uri == expected_absolute_uri + + +def test_runs_artifact_repo_uses_repo_download_artifacts(): + """ + The RunsArtifactRepo should delegate `download_artifacts` to it's self.repo.download_artifacts + function + """ + artifact_location = "s3://blah_bucket/" + experiment_id = mlflow.create_experiment("expr_abcd", artifact_location) + with mlflow.start_run(experiment_id=experiment_id): + run_id = mlflow.active_run().info.run_id + runs_repo = RunsArtifactRepository('runs:/{}'.format(run_id)) + runs_repo.repo = Mock() + runs_repo.download_artifacts('artifact_path', 'dst_path') + runs_repo.repo.download_artifacts.assert_called_once() diff --git a/tests/store/test_s3_artifact_repo.py b/tests/store/test_s3_artifact_repo.py index 32546e324c0c2..258b264614700 100644 --- a/tests/store/test_s3_artifact_repo.py +++ b/tests/store/test_s3_artifact_repo.py @@ -1,81 +1,129 @@ import os -import unittest - -import boto3 -from mock import Mock -from moto import mock_s3 - -from mlflow.store.artifact_repo import ArtifactRepository -from mlflow.store.s3_artifact_repo import S3ArtifactRepository -from mlflow.utils.file_utils import TempDir - - -class TestS3ArtifactRepo(unittest.TestCase): - @mock_s3 - def test_basic_functions(self): - with TempDir() as tmp: - # Create a mock S3 bucket in moto - # Note that we must set these as environment variables in case users - # so that boto does not attempt to assume credentials from the ~/.aws/config - # or IAM role. moto does not correctly pass the arguments to boto3.client(). - os.environ["AWS_ACCESS_KEY_ID"] = "a" - os.environ["AWS_SECRET_ACCESS_KEY"] = "b" - s3 = boto3.client("s3") - s3.create_bucket(Bucket="test_bucket") - - repo = ArtifactRepository.from_artifact_uri("s3://test_bucket/some/path", Mock()) - self.assertIsInstance(repo, S3ArtifactRepository) - self.assertListEqual(repo.list_artifacts(), []) - with self.assertRaises(Exception): - open(repo.download_artifacts("test.txt")).read() - - # Create and log a test.txt file directly - with open(tmp.path("test.txt"), "w") as f: - f.write("Hello world!") - repo.log_artifact(tmp.path("test.txt")) - text = open(repo.download_artifacts("test.txt")).read() - self.assertEqual(text, "Hello world!") - # Check that it actually made it to S3 - obj = s3.get_object(Bucket="test_bucket", Key="some/path/test.txt") - text = obj["Body"].read().decode('utf-8') - self.assertEqual(text, "Hello world!") - - # Create a subdirectory for log_artifacts - os.mkdir(tmp.path("subdir")) - os.mkdir(tmp.path("subdir", "nested")) - with open(tmp.path("subdir", "a.txt"), "w") as f: - f.write("A") - with open(tmp.path("subdir", "b.txt"), "w") as f: - f.write("B") - with open(tmp.path("subdir", "nested", "c.txt"), "w") as f: - f.write("C") - repo.log_artifacts(tmp.path("subdir")) - text = open(repo.download_artifacts("a.txt")).read() - self.assertEqual(text, "A") - text = open(repo.download_artifacts("b.txt")).read() - self.assertEqual(text, "B") - text = open(repo.download_artifacts("nested/c.txt")).read() - self.assertEqual(text, "C") - infos = sorted([(f.path, f.is_dir, f.file_size) for f in repo.list_artifacts()]) - self.assertListEqual(infos, [ - ("a.txt", False, 1), - ("b.txt", False, 1), - ("nested", True, None), - ("test.txt", False, 12) - ]) - infos = sorted([(f.path, f.is_dir, f.file_size) for f in repo.list_artifacts("nested")]) - self.assertListEqual(infos, [("nested/c.txt", False, 1)]) - - # Download a subdirectory - downloaded_dir = repo.download_artifacts("nested") - self.assertEqual(os.path.basename(downloaded_dir), "nested") - text = open(os.path.join(downloaded_dir, "c.txt")).read() - self.assertEqual(text, "C") - - # Download the root directory - downloaded_dir = repo.download_artifacts("") - dir_contents = os.listdir(downloaded_dir) - assert "nested" in dir_contents - assert os.path.isdir(os.path.join(downloaded_dir, "nested")) - assert "a.txt" in dir_contents - assert "b.txt" in dir_contents +import posixpath + +import pytest + +from mlflow.store.artifact_repository_registry import get_artifact_repository + +from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import +from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import + + +@pytest.fixture +def s3_artifact_root(mock_s3_bucket): + return "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) + + +def test_file_artifact_is_logged_and_downloaded_successfully(s3_artifact_root, tmpdir): + file_name = "test.txt" + file_path = os.path.join(str(tmpdir), file_name) + file_text = "Hello world!" + + with open(file_path, "w") as f: + f.write(file_text) + + repo = get_artifact_repository(posixpath.join(s3_artifact_root, "some/path")) + repo.log_artifact(file_path) + downloaded_text = open(repo.download_artifacts(file_name)).read() + assert downloaded_text == file_text + + +def test_file_and_directories_artifacts_are_logged_and_downloaded_successfully_in_batch( + s3_artifact_root, tmpdir): + subdir_path = str(tmpdir.mkdir("subdir")) + nested_path = os.path.join(subdir_path, "nested") + os.makedirs(nested_path) + with open(os.path.join(subdir_path, "a.txt"), "w") as f: + f.write("A") + with open(os.path.join(subdir_path, "b.txt"), "w") as f: + f.write("B") + with open(os.path.join(nested_path, "c.txt"), "w") as f: + f.write("C") + + repo = get_artifact_repository(posixpath.join(s3_artifact_root, "some/path")) + repo.log_artifacts(subdir_path) + + # Download individual files and verify correctness of their contents + downloaded_file_a_text = open(repo.download_artifacts("a.txt")).read() + assert downloaded_file_a_text == "A" + downloaded_file_b_text = open(repo.download_artifacts("b.txt")).read() + assert downloaded_file_b_text == "B" + downloaded_file_c_text = open(repo.download_artifacts("nested/c.txt")).read() + assert downloaded_file_c_text == "C" + + # Download the nested directory and verify correctness of its contents + downloaded_dir = repo.download_artifacts("nested") + assert os.path.basename(downloaded_dir) == "nested" + text = open(os.path.join(downloaded_dir, "c.txt")).read() + assert text == "C" + + # Download the root directory and verify correctness of its contents + downloaded_dir = repo.download_artifacts("") + dir_contents = os.listdir(downloaded_dir) + assert "nested" in dir_contents + assert os.path.isdir(os.path.join(downloaded_dir, "nested")) + assert "a.txt" in dir_contents + assert "b.txt" in dir_contents + + +def test_file_and_directories_artifacts_are_logged_and_listed_successfully_in_batch( + s3_artifact_root, tmpdir): + subdir_path = str(tmpdir.mkdir("subdir")) + nested_path = os.path.join(subdir_path, "nested") + os.makedirs(nested_path) + with open(os.path.join(subdir_path, "a.txt"), "w") as f: + f.write("A") + with open(os.path.join(subdir_path, "b.txt"), "w") as f: + f.write("B") + with open(os.path.join(nested_path, "c.txt"), "w") as f: + f.write("C") + + repo = get_artifact_repository(posixpath.join(s3_artifact_root, "some/path")) + repo.log_artifacts(subdir_path) + + root_artifacts_listing = sorted( + [(f.path, f.is_dir, f.file_size) for f in repo.list_artifacts()]) + assert root_artifacts_listing == [ + ("a.txt", False, 1), + ("b.txt", False, 1), + ("nested", True, None), + ] + + nested_artifacts_listing = sorted( + [(f.path, f.is_dir, f.file_size) for f in repo.list_artifacts("nested")]) + assert nested_artifacts_listing == [("nested/c.txt", False, 1)] + + +def test_download_directory_artifact_succeeds_when_artifact_root_is_s3_bucket_root( + s3_artifact_root, tmpdir): + file_a_name = "a.txt" + file_a_text = "A" + subdir_path = str(tmpdir.mkdir("subdir")) + nested_path = os.path.join(subdir_path, "nested") + os.makedirs(nested_path) + with open(os.path.join(nested_path, file_a_name), "w") as f: + f.write(file_a_text) + + repo = get_artifact_repository(s3_artifact_root) + repo.log_artifacts(subdir_path) + + downloaded_dir_path = repo.download_artifacts("nested") + assert file_a_name in os.listdir(downloaded_dir_path) + with open(os.path.join(downloaded_dir_path, file_a_name), "r") as f: + assert f.read() == file_a_text + + +def test_download_file_artifact_succeeds_when_artifact_root_is_s3_bucket_root( + s3_artifact_root, tmpdir): + file_a_name = "a.txt" + file_a_text = "A" + file_a_path = os.path.join(str(tmpdir), file_a_name) + with open(file_a_path, "w") as f: + f.write(file_a_text) + + repo = get_artifact_repository(s3_artifact_root) + repo.log_artifact(file_a_path) + + downloaded_file_path = repo.download_artifacts(file_a_name) + with open(downloaded_file_path, "r") as f: + assert f.read() == file_a_text diff --git a/tests/store/test_sftp_artifact_repo.py b/tests/store/test_sftp_artifact_repo.py index 4308572cba96d..8d8dcad75d728 100644 --- a/tests/store/test_sftp_artifact_repo.py +++ b/tests/store/test_sftp_artifact_repo.py @@ -1,11 +1,12 @@ -from mock import Mock, MagicMock +from mock import MagicMock import pytest from tempfile import NamedTemporaryFile import pysftp -from mlflow.store.artifact_repo import ArtifactRepository +from mlflow.store.artifact_repository_registry import get_artifact_repository from mlflow.store.sftp_artifact_repo import SFTPArtifactRepository from mlflow.utils.file_utils import TempDir import os +import posixpath @pytest.fixture @@ -17,11 +18,10 @@ def sftp_mock(): def test_artifact_uri_factory(): from paramiko.ssh_exception import SSHException with pytest.raises(SSHException): - ArtifactRepository.from_artifact_uri( - "sftp://user:pass@test_sftp:123/some/path", - Mock()) + get_artifact_repository("sftp://user:pass@test_sftp:123/some/path") +@pytest.mark.large def test_list_artifacts_empty(sftp_mock): repo = SFTPArtifactRepository("sftp://test_sftp/some/path", sftp_mock) sftp_mock.listdir = MagicMock(return_value=[]) @@ -29,6 +29,7 @@ def test_list_artifacts_empty(sftp_mock): sftp_mock.listdir.assert_called_once_with("/some/path") +@pytest.mark.large def test_list_artifacts(sftp_mock): artifact_root_path = "/experiment_id/run_id/" repo = SFTPArtifactRepository("sftp://test_sftp"+artifact_root_path, sftp_mock) @@ -42,8 +43,9 @@ def test_list_artifacts(sftp_mock): file_size = 678 dir_path = "model" sftp_mock.isdir = MagicMock(side_effect=lambda path: { - (artifact_root_path+file_path): False, - (artifact_root_path+dir_path): True + artifact_root_path: True, + os.path.join(artifact_root_path, file_path): False, + os.path.join(artifact_root_path, dir_path): True, }[path]) sftp_mock.listdir = MagicMock(return_value=[file_path, dir_path]) @@ -65,6 +67,7 @@ def test_list_artifacts(sftp_mock): assert artifacts[1].file_size is None +@pytest.mark.large def test_list_artifacts_with_subdir(sftp_mock): artifact_root_path = "/experiment_id/run_id/" repo = SFTPArtifactRepository("sftp://test_sftp"+artifact_root_path, sftp_mock) @@ -83,8 +86,9 @@ def test_list_artifacts_with_subdir(sftp_mock): sftp_mock.listdir = MagicMock(return_value=[file_path, subdir_name]) sftp_mock.isdir = MagicMock(side_effect=lambda path: { - (artifact_root_path+dir_name+'/'+file_path): False, - (artifact_root_path+dir_name+'/'+subdir_name): True + posixpath.join(artifact_root_path, dir_name): True, + posixpath.join(artifact_root_path, dir_name, file_path): False, + posixpath.join(artifact_root_path, dir_name, subdir_name): True, }[path]) file_stat = MagicMock() @@ -97,10 +101,10 @@ def test_list_artifacts_with_subdir(sftp_mock): sftp_mock.stat.assert_called_once_with(artifact_root_path + dir_name + '/' + file_path) assert len(artifacts) == 2 - assert artifacts[0].path == dir_name + '/' + file_path + assert artifacts[0].path == posixpath.join(dir_name, file_path) assert artifacts[0].is_dir is False assert artifacts[0].file_size == file_size - assert artifacts[1].path == dir_name + '/' + subdir_name + assert artifacts[1].path == posixpath.join(dir_name, subdir_name) assert artifacts[1].is_dir is True assert artifacts[1].file_size is None @@ -117,11 +121,11 @@ def test_log_artifact(): store = SFTPArtifactRepository(sftp_path) store.log_artifact(local.name, artifact_path) - remote_file = os.path.join( + remote_file = posixpath.join( remote.path(), '.' if artifact_path is None else artifact_path, os.path.basename(local.name)) - assert os.path.isfile(remote_file) + assert posixpath.isfile(remote_file) with open(remote_file, 'r') as remote_content: assert remote_content.read() == file_content @@ -147,16 +151,16 @@ def test_log_artifacts(): store = SFTPArtifactRepository(sftp_path) store.log_artifacts(local.path(), artifact_path) - remote_dir = os.path.join( + remote_dir = posixpath.join( remote.path(), '.' if artifact_path is None else artifact_path) - assert os.path.isdir(remote_dir) - assert os.path.isdir(os.path.join(remote_dir, directory)) - assert os.path.isfile(os.path.join(remote_dir, file1)) - assert os.path.isfile(os.path.join(remote_dir, directory, file2)) + assert posixpath.isdir(remote_dir) + assert posixpath.isdir(posixpath.join(remote_dir, directory)) + assert posixpath.isfile(posixpath.join(remote_dir, file1)) + assert posixpath.isfile(posixpath.join(remote_dir, directory, file2)) - with open(os.path.join(remote_dir, file1), 'r') as remote_content: + with open(posixpath.join(remote_dir, file1), 'r') as remote_content: assert remote_content.read() == file_content_1 - with open(os.path.join(remote_dir, directory, file2), 'rb') as remote_content: + with open(posixpath.join(remote_dir, directory, file2), 'rb') as remote_content: assert remote_content.read() == file_content_2 diff --git a/tests/store/test_sqlalchemy_store.py b/tests/store/test_sqlalchemy_store.py new file mode 100644 index 0000000000000..2646b34d2525b --- /dev/null +++ b/tests/store/test_sqlalchemy_store.py @@ -0,0 +1,1251 @@ +import os +import shutil +import six +import tempfile +import unittest +import warnings + +import mock +import pytest +import sqlalchemy +import time +import mlflow +import uuid + +import mlflow.db +from mlflow.entities import ViewType, RunTag, SourceType, RunStatus, Experiment, Metric, Param +from mlflow.protos.databricks_pb2 import ErrorCode, RESOURCE_DOES_NOT_EXIST,\ + INVALID_PARAMETER_VALUE, INTERNAL_ERROR +from mlflow.store import SEARCH_MAX_RESULTS_DEFAULT +from mlflow.store.db.utils import _get_schema_version +from mlflow.store.dbmodels import models +from mlflow import entities +from mlflow.exceptions import MlflowException +from mlflow.store.sqlalchemy_store import SqlAlchemyStore +from mlflow.utils import extract_db_type_from_uri +from tests.resources.db.initial_models import Base as InitialBase +from tests.integration.utils import invoke_cli_runner + + +DB_URI = 'sqlite:///' +ARTIFACT_URI = 'artifact_folder' + + +class TestParseDbUri(unittest.TestCase): + + def test_correct_db_type_from_uri(self): + # try each the main drivers per supported database type + target_db_type_uris = { + 'sqlite': ('pysqlite', 'pysqlcipher'), + 'postgresql': ('psycopg2', 'pg8000', 'psycopg2cffi', + 'pypostgresql', 'pygresql', 'zxjdbc'), + 'mysql': ('mysqldb', 'pymysql', 'mysqlconnector', 'cymysql', + 'oursql', 'mysqldb', 'gaerdbms', 'pyodbc', 'zxjdbc'), + 'mssql': ('pyodbc', 'mxodbc', 'pymssql', 'zxjdbc', 'adodbapi') + } + for target_db_type, drivers in target_db_type_uris.items(): + # try the driver-less version, which will revert SQLAlchemy to the default driver + uri = "%s://..." % target_db_type + parsed_db_type = extract_db_type_from_uri(uri) + self.assertEqual(target_db_type, parsed_db_type) + # try each of the popular drivers (per SQLAlchemy's dialect pages) + for driver in drivers: + uri = "%s+%s://..." % (target_db_type, driver) + parsed_db_type = extract_db_type_from_uri(uri) + self.assertEqual(target_db_type, parsed_db_type) + + def _db_uri_error(self, db_uris, expected_message_part): + for db_uri in db_uris: + with self.assertRaises(MlflowException) as e: + extract_db_type_from_uri(db_uri) + self.assertIn(expected_message_part, e.exception.message) + + def test_fail_on_unsupported_db_type(self): + bad_db_uri_strings = ['oracle://...', 'oracle+cx_oracle://...', + 'snowflake://...', '://...', 'abcdefg'] + self._db_uri_error(bad_db_uri_strings, "Supported database engines are ") + + def test_fail_on_multiple_drivers(self): + bad_db_uri_strings = ['mysql+pymsql+pyodbc://...'] + self._db_uri_error(bad_db_uri_strings, + "mlflow.org/docs/latest/tracking.html#storage for format specifications") + + +class TestSqlAlchemyStoreSqlite(unittest.TestCase): + + def _get_store(self, db_uri=''): + return SqlAlchemyStore(db_uri, ARTIFACT_URI) + + def setUp(self): + self.maxDiff = None # print all differences on assert failures + fd, self.temp_dbfile = tempfile.mkstemp() + # Close handle immediately so that we can remove the file later on in Windows + os.close(fd) + self.db_url = "%s%s" % (DB_URI, self.temp_dbfile) + self.store = self._get_store(self.db_url) + + def tearDown(self): + models.Base.metadata.drop_all(self.store.engine) + os.remove(self.temp_dbfile) + shutil.rmtree(ARTIFACT_URI) + + def _experiment_factory(self, names): + if type(names) is list: + return [self.store.create_experiment(name=name) for name in names] + + return self.store.create_experiment(name=names) + + def _verify_logged(self, run_id, metrics, params, tags): + run = self.store.get_run(run_id) + all_metrics = sum([self.store.get_metric_history(run_id, key) + for key in run.data.metrics], []) + assert len(all_metrics) == len(metrics) + logged_metrics = [(m.key, m.value, m.timestamp, m.step) for m in all_metrics] + assert set(logged_metrics) == set([(m.key, m.value, m.timestamp, m.step) for m in metrics]) + logged_tags = set([(tag_key, tag_value) for tag_key, tag_value in run.data.tags.items()]) + assert set([(tag.key, tag.value) for tag in tags]) <= logged_tags + assert len(run.data.params) == len(params) + logged_params = [(param_key, param_val) for param_key, param_val in run.data.params.items()] + assert set(logged_params) == set([(param.key, param.value) for param in params]) + + def test_default_experiment(self): + experiments = self.store.list_experiments() + self.assertEqual(len(experiments), 1) + + first = experiments[0] + self.assertEqual(first.experiment_id, "0") + self.assertEqual(first.name, "Default") + + def test_default_experiment_lifecycle(self): + default_experiment = self.store.get_experiment(experiment_id=0) + self.assertEqual(default_experiment.name, Experiment.DEFAULT_EXPERIMENT_NAME) + self.assertEqual(default_experiment.lifecycle_stage, entities.LifecycleStage.ACTIVE) + + self._experiment_factory('aNothEr') + all_experiments = [e.name for e in self.store.list_experiments()] + six.assertCountEqual(self, set(['aNothEr', 'Default']), set(all_experiments)) + + self.store.delete_experiment(0) + + six.assertCountEqual(self, ['aNothEr'], [e.name for e in self.store.list_experiments()]) + another = self.store.get_experiment(1) + self.assertEqual('aNothEr', another.name) + + default_experiment = self.store.get_experiment(experiment_id=0) + self.assertEqual(default_experiment.name, Experiment.DEFAULT_EXPERIMENT_NAME) + self.assertEqual(default_experiment.lifecycle_stage, entities.LifecycleStage.DELETED) + + # destroy SqlStore and make a new one + del self.store + self.store = self._get_store(self.db_url) + + # test that default experiment is not reactivated + default_experiment = self.store.get_experiment(experiment_id=0) + self.assertEqual(default_experiment.name, Experiment.DEFAULT_EXPERIMENT_NAME) + self.assertEqual(default_experiment.lifecycle_stage, entities.LifecycleStage.DELETED) + + six.assertCountEqual(self, ['aNothEr'], [e.name for e in self.store.list_experiments()]) + all_experiments = [e.name for e in self.store.list_experiments(ViewType.ALL)] + six.assertCountEqual(self, set(['aNothEr', 'Default']), set(all_experiments)) + + # ensure that experiment ID dor active experiment is unchanged + another = self.store.get_experiment(1) + self.assertEqual('aNothEr', another.name) + + def test_raise_duplicate_experiments(self): + with self.assertRaises(Exception): + self._experiment_factory(['test', 'test']) + + def test_raise_experiment_dont_exist(self): + with self.assertRaises(Exception): + self.store.get_experiment(experiment_id=100) + + def test_delete_experiment(self): + experiments = self._experiment_factory(['morty', 'rick', 'rick and morty']) + + all_experiments = self.store.list_experiments() + self.assertEqual(len(all_experiments), len(experiments) + 1) # default + + exp_id = experiments[0] + self.store.delete_experiment(exp_id) + + updated_exp = self.store.get_experiment(exp_id) + self.assertEqual(updated_exp.lifecycle_stage, entities.LifecycleStage.DELETED) + + self.assertEqual(len(self.store.list_experiments()), len(all_experiments) - 1) + + def test_get_experiment(self): + name = 'goku' + experiment_id = self._experiment_factory(name) + actual = self.store.get_experiment(experiment_id) + self.assertEqual(actual.name, name) + self.assertEqual(actual.experiment_id, experiment_id) + + actual_by_name = self.store.get_experiment_by_name(name) + self.assertEqual(actual_by_name.name, name) + self.assertEqual(actual_by_name.experiment_id, experiment_id) + + def test_list_experiments(self): + testnames = ['blue', 'red', 'green'] + + experiments = self._experiment_factory(testnames) + actual = self.store.list_experiments() + + self.assertEqual(len(experiments) + 1, len(actual)) # default + + with self.store.ManagedSessionMaker() as session: + for experiment_id in experiments: + res = session.query(models.SqlExperiment).filter_by( + experiment_id=experiment_id).first() + self.assertIn(res.name, testnames) + self.assertEqual(str(res.experiment_id), experiment_id) + + def test_create_experiments(self): + with self.store.ManagedSessionMaker() as session: + result = session.query(models.SqlExperiment).all() + self.assertEqual(len(result), 1) + + experiment_id = self.store.create_experiment(name='test exp') + self.assertEqual(experiment_id, "1") + with self.store.ManagedSessionMaker() as session: + result = session.query(models.SqlExperiment).all() + self.assertEqual(len(result), 2) + + test_exp = session.query(models.SqlExperiment).filter_by(name='test exp').first() + self.assertEqual(str(test_exp.experiment_id), experiment_id) + self.assertEqual(test_exp.name, 'test exp') + + actual = self.store.get_experiment(experiment_id) + self.assertEqual(actual.experiment_id, experiment_id) + self.assertEqual(actual.name, 'test exp') + + def test_run_tag_model(self): + # Create a run whose UUID we can reference when creating tag models. + # `run_id` is a foreign key in the tags table; therefore, in order + # to insert a tag with a given run UUID, the UUID must be present in + # the runs table + run = self._run_factory() + with self.store.ManagedSessionMaker() as session: + new_tag = models.SqlTag(run_uuid=run.info.run_id, key='test', value='val') + session.add(new_tag) + session.commit() + added_tags = [ + tag for tag in session.query(models.SqlTag).all() + if tag.key == new_tag.key + ] + self.assertEqual(len(added_tags), 1) + added_tag = added_tags[0].to_mlflow_entity() + self.assertEqual(added_tag.value, new_tag.value) + + def test_metric_model(self): + # Create a run whose UUID we can reference when creating metric models. + # `run_id` is a foreign key in the tags table; therefore, in order + # to insert a metric with a given run UUID, the UUID must be present in + # the runs table + run = self._run_factory() + with self.store.ManagedSessionMaker() as session: + new_metric = models.SqlMetric(run_uuid=run.info.run_id, key='accuracy', value=0.89) + session.add(new_metric) + session.commit() + metrics = session.query(models.SqlMetric).all() + self.assertEqual(len(metrics), 1) + + added_metric = metrics[0].to_mlflow_entity() + self.assertEqual(added_metric.value, new_metric.value) + self.assertEqual(added_metric.key, new_metric.key) + + def test_param_model(self): + # Create a run whose UUID we can reference when creating parameter models. + # `run_id` is a foreign key in the tags table; therefore, in order + # to insert a parameter with a given run UUID, the UUID must be present in + # the runs table + run = self._run_factory() + with self.store.ManagedSessionMaker() as session: + new_param = models.SqlParam( + run_uuid=run.info.run_id, key='accuracy', value='test param') + session.add(new_param) + session.commit() + params = session.query(models.SqlParam).all() + self.assertEqual(len(params), 1) + + added_param = params[0].to_mlflow_entity() + self.assertEqual(added_param.value, new_param.value) + self.assertEqual(added_param.key, new_param.key) + + def test_run_needs_uuid(self): + # Depending on the implementation, a NULL identity key may result in different + # exceptions, including IntegrityError (sqlite) and FlushError (MysQL). + # Therefore, we check for the more generic 'SQLAlchemyError' + with self.assertRaises(MlflowException) as exception_context: + warnings.simplefilter("ignore") + with self.store.ManagedSessionMaker() as session, warnings.catch_warnings(): + run = models.SqlRun() + session.add(run) + warnings.resetwarnings() + assert exception_context.exception.error_code == ErrorCode.Name(INTERNAL_ERROR) + + def test_run_data_model(self): + with self.store.ManagedSessionMaker() as session: + m1 = models.SqlMetric(key='accuracy', value=0.89) + m2 = models.SqlMetric(key='recal', value=0.89) + p1 = models.SqlParam(key='loss', value='test param') + p2 = models.SqlParam(key='blue', value='test param') + + session.add_all([m1, m2, p1, p2]) + + run_data = models.SqlRun(run_uuid=uuid.uuid4().hex) + run_data.params.append(p1) + run_data.params.append(p2) + run_data.metrics.append(m1) + run_data.metrics.append(m2) + + session.add(run_data) + session.commit() + + run_datums = session.query(models.SqlRun).all() + actual = run_datums[0] + self.assertEqual(len(run_datums), 1) + self.assertEqual(len(actual.params), 2) + self.assertEqual(len(actual.metrics), 2) + + def test_run_info(self): + experiment_id = self._experiment_factory('test exp') + config = { + 'experiment_id': experiment_id, + 'name': 'test run', + 'user_id': 'Anderson', + 'run_uuid': 'test', + 'status': RunStatus.to_string(RunStatus.SCHEDULED), + 'source_type': SourceType.to_string(SourceType.LOCAL), + 'source_name': 'Python application', + 'entry_point_name': 'main.py', + 'start_time': int(time.time()), + 'end_time': int(time.time()), + 'source_version': mlflow.__version__, + 'lifecycle_stage': entities.LifecycleStage.ACTIVE, + 'artifact_uri': '//' + } + run = models.SqlRun(**config).to_mlflow_entity() + + for k, v in config.items(): + # These keys were removed from RunInfo. + if k in ['source_name', 'source_type', 'source_version', 'name', 'entry_point_name']: + continue + + v2 = getattr(run.info, k) + if k == 'source_type': + self.assertEqual(v, SourceType.to_string(v2)) + else: + self.assertEqual(v, v2) + + def _get_run_configs(self, experiment_id=None, tags=(), start_time=None): + return { + 'experiment_id': experiment_id, + 'user_id': 'Anderson', + 'start_time': start_time if start_time is not None else int(time.time()), + 'tags': tags + } + + def _run_factory(self, config=None): + if not config: + config = self._get_run_configs() + + experiment_id = config.get("experiment_id", None) + if not experiment_id: + experiment_id = self._experiment_factory('test exp') + config["experiment_id"] = experiment_id + + return self.store.create_run(**config) + + def test_create_run_with_tags(self): + experiment_id = self._experiment_factory('test_create_run') + tags = [RunTag('3', '4'), RunTag('1', '2')] + expected = self._get_run_configs(experiment_id=experiment_id, tags=tags) + + actual = self.store.create_run(**expected) + + self.assertEqual(actual.info.experiment_id, experiment_id) + self.assertEqual(actual.info.user_id, expected["user_id"]) + self.assertEqual(actual.info.start_time, expected["start_time"]) + + self.assertEqual(len(actual.data.tags), len(tags)) + expected_tags = {tag.key: tag.value for tag in tags} + self.assertEqual(actual.data.tags, expected_tags) + + def test_to_mlflow_entity_and_proto(self): + # Create a run and log metrics, params, tags to the run + created_run = self._run_factory() + run_id = created_run.info.run_id + self.store.log_metric( + run_id=run_id, + metric=entities.Metric(key='my-metric', value=3.4, timestamp=0, step=0)) + self.store.log_param(run_id=run_id, param=Param(key='my-param', value='param-val')) + self.store.set_tag(run_id=run_id, tag=RunTag(key='my-tag', value='tag-val')) + + # Verify that we can fetch the run & convert it to proto - Python protobuf bindings + # will perform type-checking to ensure all values have the right types + run = self.store.get_run(run_id) + run.to_proto() + + # Verify attributes of the Python run entity + self.assertIsInstance(run.info, entities.RunInfo) + self.assertIsInstance(run.data, entities.RunData) + + self.assertEqual(run.data.metrics, {"my-metric": 3.4}) + self.assertEqual(run.data.params, {"my-param": "param-val"}) + self.assertEqual(run.data.tags["my-tag"], "tag-val") + + # Get the parent experiment of the run, verify it can be converted to protobuf + exp = self.store.get_experiment(run.info.experiment_id) + exp.to_proto() + + def test_delete_run(self): + run = self._run_factory() + + self.store.delete_run(run.info.run_id) + + with self.store.ManagedSessionMaker() as session: + actual = session.query(models.SqlRun).filter_by(run_uuid=run.info.run_id).first() + self.assertEqual(actual.lifecycle_stage, entities.LifecycleStage.DELETED) + + deleted_run = self.store.get_run(run.info.run_id) + self.assertEqual(actual.run_uuid, deleted_run.info.run_id) + + def test_log_metric(self): + run = self._run_factory() + + tkey = 'blahmetric' + tval = 100.0 + metric = entities.Metric(tkey, tval, int(time.time()), 0) + metric2 = entities.Metric(tkey, tval, int(time.time()) + 2, 0) + self.store.log_metric(run.info.run_id, metric) + self.store.log_metric(run.info.run_id, metric2) + + run = self.store.get_run(run.info.run_id) + self.assertTrue(tkey in run.data.metrics and run.data.metrics[tkey] == tval) + + # SQL store _get_run method returns full history of recorded metrics. + # Should return duplicates as well + # MLflow RunData contains only the last reported values for metrics. + with self.store.ManagedSessionMaker() as session: + sql_run_metrics = self.store._get_run(session, run.info.run_id).metrics + self.assertEqual(2, len(sql_run_metrics)) + self.assertEqual(1, len(run.data.metrics)) + + def test_log_metric_allows_multiple_values_at_same_ts_and_run_data_uses_max_ts_value(self): + run = self._run_factory() + run_id = run.info.run_id + metric_name = "test-metric-1" + # Check that we get the max of (step, timestamp, value) in that order + tuples_to_log = [ + (0, 100, 1000), + (3, 40, 100), # larger step wins even though it has smaller value + (3, 50, 10), # larger timestamp wins even though it has smaller value + (3, 50, 20), # tiebreak by max value + (3, 50, 20), # duplicate metrics with same (step, timestamp, value) are ok + # verify that we can log steps out of order / negative steps + (-3, 900, 900), + (-1, 800, 800), + ] + for step, timestamp, value in reversed(tuples_to_log): + self.store.log_metric(run_id, Metric(metric_name, value, timestamp, step)) + + metric_history = self.store.get_metric_history(run_id, metric_name) + logged_tuples = [(m.step, m.timestamp, m.value) for m in metric_history] + assert set(logged_tuples) == set(tuples_to_log) + + run_data = self.store.get_run(run_id).data + run_metrics = run_data.metrics + assert len(run_metrics) == 1 + assert run_metrics[metric_name] == 20 + metric_obj = run_data._metric_objs[0] + assert metric_obj.key == metric_name + assert metric_obj.step == 3 + assert metric_obj.timestamp == 50 + assert metric_obj.value == 20 + + def test_log_null_metric(self): + run = self._run_factory() + + tkey = 'blahmetric' + tval = None + metric = entities.Metric(tkey, tval, int(time.time()), 0) + + warnings.simplefilter("ignore") + with self.assertRaises(MlflowException) as exception_context, warnings.catch_warnings(): + self.store.log_metric(run.info.run_id, metric) + warnings.resetwarnings() + assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) + + def test_log_param(self): + run = self._run_factory() + + tkey = 'blahmetric' + tval = '100.0' + param = entities.Param(tkey, tval) + param2 = entities.Param('new param', 'new key') + self.store.log_param(run.info.run_id, param) + self.store.log_param(run.info.run_id, param2) + self.store.log_param(run.info.run_id, param2) + + run = self.store.get_run(run.info.run_id) + self.assertEqual(2, len(run.data.params)) + self.assertTrue(tkey in run.data.params and run.data.params[tkey] == tval) + + def test_log_param_uniqueness(self): + run = self._run_factory() + + tkey = 'blahmetric' + tval = '100.0' + param = entities.Param(tkey, tval) + param2 = entities.Param(tkey, 'newval') + self.store.log_param(run.info.run_id, param) + + with self.assertRaises(MlflowException) as e: + self.store.log_param(run.info.run_id, param2) + self.assertIn("Changing param value is not allowed. Param with key=", e.exception.message) + + def test_log_empty_str(self): + run = self._run_factory() + + tkey = 'blahmetric' + tval = '' + param = entities.Param(tkey, tval) + param2 = entities.Param('new param', 'new key') + self.store.log_param(run.info.run_id, param) + self.store.log_param(run.info.run_id, param2) + + run = self.store.get_run(run.info.run_id) + self.assertEqual(2, len(run.data.params)) + self.assertTrue(tkey in run.data.params and run.data.params[tkey] == tval) + + def test_log_null_param(self): + run = self._run_factory() + + tkey = 'blahmetric' + tval = None + param = entities.Param(tkey, tval) + + with self.assertRaises(MlflowException) as exception_context: + self.store.log_param(run.info.run_id, param) + assert exception_context.exception.error_code == ErrorCode.Name(INTERNAL_ERROR) + + def test_set_tag(self): + run = self._run_factory() + + tkey = 'test tag' + tval = 'a boogie' + new_val = "new val" + tag = entities.RunTag(tkey, tval) + new_tag = entities.RunTag(tkey, new_val) + self.store.set_tag(run.info.run_id, tag) + # Overwriting tags is allowed + self.store.set_tag(run.info.run_id, new_tag) + + run = self.store.get_run(run.info.run_id) + self.assertTrue(tkey in run.data.tags and run.data.tags[tkey] == new_val) + + def test_delete_tag(self): + run = self._run_factory() + k0, v0 = 'tag0', 'val0' + k1, v1 = 'tag1', 'val1' + tag0 = entities.RunTag(k0, v0) + tag1 = entities.RunTag(k1, v1) + self.store.set_tag(run.info.run_id, tag0) + self.store.set_tag(run.info.run_id, tag1) + # delete a tag and check whether it is correctly deleted. + self.store.delete_tag(run.info.run_id, k0) + run = self.store.get_run(run.info.run_id) + self.assertTrue(k0 not in run.data.tags) + self.assertTrue(k1 in run.data.tags and run.data.tags[k1] == v1) + + # test that deleting a tag works correctly with multiple runs having the same tag. + run2 = self._run_factory(config=self._get_run_configs(run.info.experiment_id)) + self.store.set_tag(run.info.run_id, tag0) + self.store.set_tag(run2.info.run_id, tag0) + self.store.delete_tag(run.info.run_id, k0) + run = self.store.get_run(run.info.run_id) + run2 = self.store.get_run(run2.info.run_id) + self.assertTrue(k0 not in run.data.tags) + self.assertTrue(k0 in run2.data.tags) + # test that you cannot delete tags that don't exist. + with pytest.raises(MlflowException): + self.store.delete_tag(run.info.run_id, "fakeTag") + # test that you cannot delete tags for nonexistent runs + with pytest.raises(MlflowException): + self.store.delete_tag("randomRunId", k0) + # test that you cannot delete tags for deleted runs. + self.store.delete_run(run.info.run_id) + with pytest.raises(MlflowException): + self.store.delete_tag(run.info.run_id, k1) + + def test_get_metric_history(self): + run = self._run_factory() + + key = 'test' + expected = [ + models.SqlMetric(key=key, value=0.6, timestamp=1, step=0).to_mlflow_entity(), + models.SqlMetric(key=key, value=0.7, timestamp=2, step=0).to_mlflow_entity() + ] + + for metric in expected: + self.store.log_metric(run.info.run_id, metric) + + actual = self.store.get_metric_history(run.info.run_id, key) + + six.assertCountEqual(self, + [(m.key, m.value, m.timestamp) for m in expected], + [(m.key, m.value, m.timestamp) for m in actual]) + + def test_list_run_infos(self): + experiment_id = self._experiment_factory('test_exp') + r1 = self._run_factory(config=self._get_run_configs(experiment_id)).info.run_id + r2 = self._run_factory(config=self._get_run_configs(experiment_id)).info.run_id + + def _runs(experiment_id, view_type): + return [r.run_id for r in self.store.list_run_infos(experiment_id, view_type)] + + six.assertCountEqual(self, [r1, r2], _runs(experiment_id, ViewType.ALL)) + six.assertCountEqual(self, [r1, r2], _runs(experiment_id, ViewType.ACTIVE_ONLY)) + self.assertEqual(0, len(_runs(experiment_id, ViewType.DELETED_ONLY))) + + self.store.delete_run(r1) + six.assertCountEqual(self, [r1, r2], _runs(experiment_id, ViewType.ALL)) + six.assertCountEqual(self, [r2], _runs(experiment_id, ViewType.ACTIVE_ONLY)) + six.assertCountEqual(self, [r1], _runs(experiment_id, ViewType.DELETED_ONLY)) + + def test_rename_experiment(self): + new_name = 'new name' + experiment_id = self._experiment_factory('test name') + self.store.rename_experiment(experiment_id, new_name) + + renamed_experiment = self.store.get_experiment(experiment_id) + + self.assertEqual(renamed_experiment.name, new_name) + + def test_update_run_info(self): + run = self._run_factory() + + new_status = entities.RunStatus.FINISHED + endtime = int(time.time()) + + actual = self.store.update_run_info(run.info.run_id, new_status, endtime) + + self.assertEqual(actual.status, RunStatus.to_string(new_status)) + self.assertEqual(actual.end_time, endtime) + + def test_restore_experiment(self): + experiment_id = self._experiment_factory('helloexp') + exp = self.store.get_experiment(experiment_id) + self.assertEqual(exp.lifecycle_stage, entities.LifecycleStage.ACTIVE) + + experiment_id = exp.experiment_id + self.store.delete_experiment(experiment_id) + + deleted = self.store.get_experiment(experiment_id) + self.assertEqual(deleted.experiment_id, experiment_id) + self.assertEqual(deleted.lifecycle_stage, entities.LifecycleStage.DELETED) + + self.store.restore_experiment(exp.experiment_id) + restored = self.store.get_experiment(exp.experiment_id) + self.assertEqual(restored.experiment_id, experiment_id) + self.assertEqual(restored.lifecycle_stage, entities.LifecycleStage.ACTIVE) + + def test_delete_restore_run(self): + run = self._run_factory() + self.assertEqual(run.info.lifecycle_stage, entities.LifecycleStage.ACTIVE) + + with self.assertRaises(MlflowException) as e: + self.store.restore_run(run.info.run_id) + self.assertIn("must be in 'deleted' state", e.exception.message) + + self.store.delete_run(run.info.run_id) + with self.assertRaises(MlflowException) as e: + self.store.delete_run(run.info.run_id) + self.assertIn("must be in 'active' state", e.exception.message) + + deleted = self.store.get_run(run.info.run_id) + self.assertEqual(deleted.info.run_id, run.info.run_id) + self.assertEqual(deleted.info.lifecycle_stage, entities.LifecycleStage.DELETED) + + self.store.restore_run(run.info.run_id) + with self.assertRaises(MlflowException) as e: + self.store.restore_run(run.info.run_id) + self.assertIn("must be in 'deleted' state", e.exception.message) + restored = self.store.get_run(run.info.run_id) + self.assertEqual(restored.info.run_id, run.info.run_id) + self.assertEqual(restored.info.lifecycle_stage, entities.LifecycleStage.ACTIVE) + + def test_error_logging_to_deleted_run(self): + exp = self._experiment_factory('error_logging') + run_id = self._run_factory(self._get_run_configs(experiment_id=exp)).info.run_id + + self.store.delete_run(run_id) + self.assertEqual(self.store.get_run(run_id).info.lifecycle_stage, + entities.LifecycleStage.DELETED) + with self.assertRaises(MlflowException) as e: + self.store.log_param(run_id, entities.Param("p1345", "v1")) + self.assertIn("must be in 'active' state", e.exception.message) + + with self.assertRaises(MlflowException) as e: + self.store.log_metric(run_id, entities.Metric("m1345", 1.0, 123, 0)) + self.assertIn("must be in 'active' state", e.exception.message) + + with self.assertRaises(MlflowException) as e: + self.store.set_tag(run_id, entities.RunTag("t1345", "tv1")) + self.assertIn("must be in 'active' state", e.exception.message) + + # restore this run and try again + self.store.restore_run(run_id) + self.assertEqual(self.store.get_run(run_id).info.lifecycle_stage, + entities.LifecycleStage.ACTIVE) + self.store.log_param(run_id, entities.Param("p1345", "v22")) + self.store.log_metric(run_id, entities.Metric("m1345", 34.0, 85, 1)) # earlier timestamp + self.store.set_tag(run_id, entities.RunTag("t1345", "tv44")) + + run = self.store.get_run(run_id) + self.assertEqual(run.data.params, {"p1345": "v22"}) + self.assertEqual(run.data.metrics, {"m1345": 34.0}) + metric_history = self.store.get_metric_history(run_id, "m1345") + self.assertEqual(len(metric_history), 1) + metric_obj = metric_history[0] + self.assertEqual(metric_obj.key, "m1345") + self.assertEqual(metric_obj.value, 34.0) + self.assertEqual(metric_obj.timestamp, 85) + self.assertEqual(metric_obj.step, 1) + self.assertTrue(set([("t1345", "tv44")]) <= set(run.data.tags.items())) + + # Tests for Search API + def _search(self, experiment_id, filter_string=None, + run_view_type=ViewType.ALL, max_results=SEARCH_MAX_RESULTS_DEFAULT): + exps = [experiment_id] if isinstance(experiment_id, int) else experiment_id + return [r.info.run_id + for r in self.store.search_runs(exps, filter_string, run_view_type, max_results)] + + def test_search_vanilla(self): + exp = self._experiment_factory('search_vanilla') + runs = [self._run_factory(self._get_run_configs(exp)).info.run_id + for r in range(3)] + + six.assertCountEqual(self, runs, self._search(exp, run_view_type=ViewType.ALL)) + six.assertCountEqual(self, runs, self._search(exp, run_view_type=ViewType.ACTIVE_ONLY)) + six.assertCountEqual(self, [], self._search(exp, run_view_type=ViewType.DELETED_ONLY)) + + first = runs[0] + + self.store.delete_run(first) + six.assertCountEqual(self, runs, self._search(exp, run_view_type=ViewType.ALL)) + six.assertCountEqual(self, runs[1:], self._search(exp, run_view_type=ViewType.ACTIVE_ONLY)) + six.assertCountEqual(self, [first], self._search(exp, run_view_type=ViewType.DELETED_ONLY)) + + self.store.restore_run(first) + six.assertCountEqual(self, runs, self._search(exp, run_view_type=ViewType.ALL)) + six.assertCountEqual(self, runs, self._search(exp, run_view_type=ViewType.ACTIVE_ONLY)) + six.assertCountEqual(self, [], self._search(exp, run_view_type=ViewType.DELETED_ONLY)) + + def test_search_params(self): + experiment_id = self._experiment_factory('search_params') + r1 = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + r2 = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + + self.store.log_param(r1, entities.Param('generic_param', 'p_val')) + self.store.log_param(r2, entities.Param('generic_param', 'p_val')) + + self.store.log_param(r1, entities.Param('generic_2', 'some value')) + self.store.log_param(r2, entities.Param('generic_2', 'another value')) + + self.store.log_param(r1, entities.Param('p_a', 'abc')) + self.store.log_param(r2, entities.Param('p_b', 'ABC')) + + # test search returns both runs + filter_string = "params.generic_param = 'p_val'" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + # test search returns appropriate run (same key different values per run) + filter_string = "params.generic_2 = 'some value'" + six.assertCountEqual(self, [r1], self._search(experiment_id, filter_string)) + filter_string = "params.generic_2 = 'another value'" + six.assertCountEqual(self, [r2], self._search(experiment_id, filter_string)) + + filter_string = "params.generic_param = 'wrong_val'" + six.assertCountEqual(self, [], self._search(experiment_id, filter_string)) + + filter_string = "params.generic_param != 'p_val'" + six.assertCountEqual(self, [], self._search(experiment_id, filter_string)) + + filter_string = "params.generic_param != 'wrong_val'" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + filter_string = "params.generic_2 != 'wrong_val'" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + filter_string = "params.p_a = 'abc'" + six.assertCountEqual(self, [r1], self._search(experiment_id, filter_string)) + + filter_string = "params.p_b = 'ABC'" + six.assertCountEqual(self, [r2], self._search(experiment_id, filter_string)) + + def test_search_tags(self): + experiment_id = self._experiment_factory('search_tags') + r1 = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + r2 = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + + self.store.set_tag(r1, entities.RunTag('generic_tag', 'p_val')) + self.store.set_tag(r2, entities.RunTag('generic_tag', 'p_val')) + + self.store.set_tag(r1, entities.RunTag('generic_2', 'some value')) + self.store.set_tag(r2, entities.RunTag('generic_2', 'another value')) + + self.store.set_tag(r1, entities.RunTag('p_a', 'abc')) + self.store.set_tag(r2, entities.RunTag('p_b', 'ABC')) + + # test search returns both runs + six.assertCountEqual(self, [r1, r2], + self._search(experiment_id, + filter_string="tags.generic_tag = 'p_val'")) + # test search returns appropriate run (same key different values per run) + six.assertCountEqual(self, [r1], + self._search(experiment_id, + filter_string="tags.generic_2 = 'some value'")) + six.assertCountEqual(self, [r2], + self._search(experiment_id, + filter_string="tags.generic_2 = 'another value'")) + six.assertCountEqual(self, [], + self._search(experiment_id, + filter_string="tags.generic_tag = 'wrong_val'")) + six.assertCountEqual(self, [], + self._search(experiment_id, + filter_string="tags.generic_tag != 'p_val'")) + six.assertCountEqual(self, [r1, r2], + self._search(experiment_id, + filter_string="tags.generic_tag != 'wrong_val'")) + six.assertCountEqual(self, [r1, r2], + self._search(experiment_id, + filter_string="tags.generic_2 != 'wrong_val'")) + six.assertCountEqual(self, [r1], self._search(experiment_id, + filter_string="tags.p_a = 'abc'")) + six.assertCountEqual(self, [r2], self._search(experiment_id, + filter_string="tags.p_b = 'ABC'")) + + def test_search_metrics(self): + experiment_id = self._experiment_factory('search_metric') + r1 = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + r2 = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + + self.store.log_metric(r1, entities.Metric("common", 1.0, 1, 0)) + self.store.log_metric(r2, entities.Metric("common", 1.0, 1, 0)) + + self.store.log_metric(r1, entities.Metric("measure_a", 1.0, 1, 0)) + self.store.log_metric(r2, entities.Metric("measure_a", 200.0, 2, 0)) + self.store.log_metric(r2, entities.Metric("measure_a", 400.0, 3, 0)) + + self.store.log_metric(r1, entities.Metric("m_a", 2.0, 2, 0)) + self.store.log_metric(r2, entities.Metric("m_b", 3.0, 2, 0)) + self.store.log_metric(r2, entities.Metric("m_b", 4.0, 8, 0)) # this is last timestamp + self.store.log_metric(r2, entities.Metric("m_b", 8.0, 3, 0)) + + filter_string = "metrics.common = 1.0" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + filter_string = "metrics.common > 0.0" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + filter_string = "metrics.common >= 0.0" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + filter_string = "metrics.common < 4.0" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + filter_string = "metrics.common <= 4.0" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + filter_string = "metrics.common != 1.0" + six.assertCountEqual(self, [], self._search(experiment_id, filter_string)) + + filter_string = "metrics.common >= 3.0" + six.assertCountEqual(self, [], self._search(experiment_id, filter_string)) + + filter_string = "metrics.common <= 0.75" + six.assertCountEqual(self, [], self._search(experiment_id, filter_string)) + + # tests for same metric name across runs with different values and timestamps + filter_string = "metrics.measure_a > 0.0" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + filter_string = "metrics.measure_a < 50.0" + six.assertCountEqual(self, [r1], self._search(experiment_id, filter_string)) + + filter_string = "metrics.measure_a < 1000.0" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + filter_string = "metrics.measure_a != -12.0" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + filter_string = "metrics.measure_a > 50.0" + six.assertCountEqual(self, [r2], self._search(experiment_id, filter_string)) + + filter_string = "metrics.measure_a = 1.0" + six.assertCountEqual(self, [r1], self._search(experiment_id, filter_string)) + + filter_string = "metrics.measure_a = 400.0" + six.assertCountEqual(self, [r2], self._search(experiment_id, filter_string)) + + # test search with unique metric keys + filter_string = "metrics.m_a > 1.0" + six.assertCountEqual(self, [r1], self._search(experiment_id, filter_string)) + + filter_string = "metrics.m_b > 1.0" + six.assertCountEqual(self, [r2], self._search(experiment_id, filter_string)) + + # there is a recorded metric this threshold but not last timestamp + filter_string = "metrics.m_b > 5.0" + six.assertCountEqual(self, [], self._search(experiment_id, filter_string)) + + # metrics matches last reported timestamp for 'm_b' + filter_string = "metrics.m_b = 4.0" + six.assertCountEqual(self, [r2], self._search(experiment_id, filter_string)) + + def test_search_attrs(self): + e1 = self._experiment_factory('search_attributes_1') + r1 = self._run_factory(self._get_run_configs(experiment_id=e1)).info.run_id + + e2 = self._experiment_factory('search_attrs_2') + r2 = self._run_factory(self._get_run_configs(experiment_id=e2)).info.run_id + + filter_string = "" + six.assertCountEqual(self, [r1, r2], self._search([e1, e2], filter_string)) + + filter_string = "attribute.status != 'blah'" + six.assertCountEqual(self, [r1, r2], self._search([e1, e2], filter_string)) + + filter_string = "attribute.status = '{}'".format(RunStatus.to_string(RunStatus.RUNNING)) + six.assertCountEqual(self, [r1, r2], self._search([e1, e2], filter_string)) + + # change status for one of the runs + self.store.update_run_info(r2, RunStatus.FAILED, 300) + + filter_string = "attribute.status = 'RUNNING'" + six.assertCountEqual(self, [r1], self._search([e1, e2], filter_string)) + + filter_string = "attribute.status = 'FAILED'" + six.assertCountEqual(self, [r2], self._search([e1, e2], filter_string)) + + filter_string = "attribute.status != 'SCHEDULED'" + six.assertCountEqual(self, [r1, r2], self._search([e1, e2], filter_string)) + + filter_string = "attribute.status = 'SCHEDULED'" + six.assertCountEqual(self, [], self._search([e1, e2], filter_string)) + + filter_string = "attribute.status = 'KILLED'" + six.assertCountEqual(self, [], self._search([e1, e2], filter_string)) + + filter_string = "attr.artifact_uri = '{}/{}/{}/artifacts'".format(ARTIFACT_URI, e1, r1) + six.assertCountEqual(self, [r1], self._search([e1, e2], filter_string)) + + filter_string = "attr.artifact_uri = '{}/{}/{}/artifacts'".format(ARTIFACT_URI, e2, r1) + six.assertCountEqual(self, [], self._search([e1, e2], filter_string)) + + filter_string = "attribute.artifact_uri = 'random_artifact_path'" + six.assertCountEqual(self, [], self._search([e1, e2], filter_string)) + + filter_string = "attribute.artifact_uri != 'random_artifact_path'" + six.assertCountEqual(self, [r1, r2], self._search([e1, e2], filter_string)) + + for (k, v) in {"experiment_id": e1, + "lifecycle_stage": "ACTIVE", + "run_id": r1, + "run_uuid": r2}.items(): + with self.assertRaises(MlflowException) as e: + self._search([e1, e2], "attribute.{} = '{}'".format(k, v)) + self.assertIn("Invalid attribute key", e.exception.message) + + def test_search_full(self): + experiment_id = self._experiment_factory('search_params') + r1 = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + r2 = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + + self.store.log_param(r1, entities.Param('generic_param', 'p_val')) + self.store.log_param(r2, entities.Param('generic_param', 'p_val')) + + self.store.log_param(r1, entities.Param('p_a', 'abc')) + self.store.log_param(r2, entities.Param('p_b', 'ABC')) + + self.store.log_metric(r1, entities.Metric("common", 1.0, 1, 0)) + self.store.log_metric(r2, entities.Metric("common", 1.0, 1, 0)) + + self.store.log_metric(r1, entities.Metric("m_a", 2.0, 2, 0)) + self.store.log_metric(r2, entities.Metric("m_b", 3.0, 2, 0)) + self.store.log_metric(r2, entities.Metric("m_b", 4.0, 8, 0)) + self.store.log_metric(r2, entities.Metric("m_b", 8.0, 3, 0)) + + filter_string = "params.generic_param = 'p_val' and metrics.common = 1.0" + six.assertCountEqual(self, [r1, r2], self._search(experiment_id, filter_string)) + + # all params and metrics match + filter_string = ("params.generic_param = 'p_val' and metrics.common = 1.0" + "and metrics.m_a > 1.0") + six.assertCountEqual(self, [r1], self._search(experiment_id, filter_string)) + + # test with mismatch param + filter_string = ("params.random_bad_name = 'p_val' and metrics.common = 1.0" + "and metrics.m_a > 1.0") + six.assertCountEqual(self, [], self._search(experiment_id, filter_string)) + + # test with mismatch metric + filter_string = ("params.generic_param = 'p_val' and metrics.common = 1.0" + "and metrics.m_a > 100.0") + six.assertCountEqual(self, [], self._search(experiment_id, filter_string)) + + def test_search_with_max_results(self): + exp = self._experiment_factory('search_with_max_results') + runs = [self._run_factory(self._get_run_configs(exp, start_time=r)).info.run_id + for r in range(1200)] + # reverse the ordering, since we created in increasing order of start_time + runs.reverse() + + assert(runs[:1000] == self._search(exp)) + for n in [0, 1, 2, 4, 8, 10, 20, 50, 100, 500, 1000, 1200, 2000]: + assert(runs[:min(1200, n)] == self._search(exp, max_results=n)) + + with self.assertRaises(MlflowException) as e: + self._search(exp, max_results=int(1e10)) + self.assertIn("Invalid value for request parameter max_results. It ", e.exception.message) + + def test_search_with_deterministic_max_results(self): + exp = self._experiment_factory('test_search_with_deterministic_max_results') + # Create 10 runs with the same start_time. + # Sort based on run_id + runs = sorted([self._run_factory(self._get_run_configs(exp, start_time=10)).info.run_id + for r in range(10)]) + for n in [0, 1, 2, 4, 8, 10, 20]: + assert(runs[:min(10, n)] == self._search(exp, max_results=n)) + + def test_search_runs_pagination(self): + exp = self._experiment_factory('test_search_runs_pagination') + # test returned token behavior + runs = sorted([self._run_factory(self._get_run_configs(exp, start_time=10)).info.run_id + for r in range(10)]) + result = self.store.search_runs([exp], None, ViewType.ALL, max_results=4) + assert [r.info.run_id for r in result] == runs[0:4] + assert result.token is not None + result = self.store.search_runs([exp], None, ViewType.ALL, max_results=4, + page_token=result.token) + assert [r.info.run_id for r in result] == runs[4:8] + assert result.token is not None + result = self.store.search_runs([exp], None, ViewType.ALL, max_results=4, + page_token=result.token) + assert [r.info.run_id for r in result] == runs[8:] + assert result.token is None + + def test_log_batch(self): + experiment_id = self._experiment_factory('log_batch') + run_id = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + metric_entities = [Metric("m1", 0.87, 12345, 0), Metric("m2", 0.49, 12345, 1)] + param_entities = [Param("p1", "p1val"), Param("p2", "p2val")] + tag_entities = [RunTag("t1", "t1val"), RunTag("t2", "t2val")] + self.store.log_batch( + run_id=run_id, metrics=metric_entities, params=param_entities, tags=tag_entities) + run = self.store.get_run(run_id) + assert run.data.tags == {"t1": "t1val", "t2": "t2val"} + assert run.data.params == {"p1": "p1val", "p2": "p2val"} + metric_histories = sum( + [self.store.get_metric_history(run_id, key) for key in run.data.metrics], []) + metrics = [(m.key, m.value, m.timestamp, m.step) for m in metric_histories] + assert set(metrics) == set([("m1", 0.87, 12345, 0), ("m2", 0.49, 12345, 1)]) + + def test_log_batch_limits(self): + # Test that log batch at the maximum allowed request size succeeds (i.e doesn't hit + # SQL limitations, etc) + experiment_id = self._experiment_factory('log_batch_limits') + run_id = self._run_factory(self._get_run_configs(experiment_id)).info.run_id + metric_tuples = [("m%s" % i, i, 12345, i * 2) for i in range(1000)] + metric_entities = [Metric(*metric_tuple) for metric_tuple in metric_tuples] + self.store.log_batch(run_id=run_id, metrics=metric_entities, params=[], tags=[]) + run = self.store.get_run(run_id) + metric_histories = sum( + [self.store.get_metric_history(run_id, key) for key in run.data.metrics], []) + metrics = [(m.key, m.value, m.timestamp, m.step) for m in metric_histories] + assert set(metrics) == set(metric_tuples) + + def test_log_batch_param_overwrite_disallowed(self): + # Test that attempting to overwrite a param via log_batch results in an exception and that + # no partial data is logged + run = self._run_factory() + tkey = 'my-param' + param = entities.Param(tkey, 'orig-val') + self.store.log_param(run.info.run_id, param) + + overwrite_param = entities.Param(tkey, 'newval') + tag = entities.RunTag("tag-key", "tag-val") + metric = entities.Metric("metric-key", 3.0, 12345, 0) + with self.assertRaises(MlflowException) as e: + self.store.log_batch(run.info.run_id, metrics=[metric], params=[overwrite_param], + tags=[tag]) + self.assertIn("Changing param value is not allowed. Param with key=", e.exception.message) + assert e.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) + self._verify_logged(run.info.run_id, metrics=[], params=[param], tags=[]) + + def test_log_batch_param_overwrite_disallowed_single_req(self): + # Test that attempting to overwrite a param via log_batch results in an exception + run = self._run_factory() + pkey = "common-key" + param0 = entities.Param(pkey, "orig-val") + param1 = entities.Param(pkey, 'newval') + tag = entities.RunTag("tag-key", "tag-val") + metric = entities.Metric("metric-key", 3.0, 12345, 0) + with self.assertRaises(MlflowException) as e: + self.store.log_batch(run.info.run_id, metrics=[metric], params=[param0, param1], + tags=[tag]) + self.assertIn("Changing param value is not allowed. Param with key=", e.exception.message) + assert e.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) + self._verify_logged(run.info.run_id, metrics=[], params=[param0], tags=[]) + + def test_log_batch_accepts_empty_payload(self): + run = self._run_factory() + self.store.log_batch(run.info.run_id, metrics=[], params=[], tags=[]) + self._verify_logged(run.info.run_id, metrics=[], params=[], tags=[]) + + def test_log_batch_internal_error(self): + # Verify that internal errors during the DB save step for log_batch result in + # MlflowExceptions + run = self._run_factory() + + def _raise_exception_fn(*args, **kwargs): # pylint: disable=unused-argument + raise Exception("Some internal error") + with mock.patch("mlflow.store.sqlalchemy_store.SqlAlchemyStore.log_metric") as metric_mock,\ + mock.patch( + "mlflow.store.sqlalchemy_store.SqlAlchemyStore.log_param") as param_mock,\ + mock.patch("mlflow.store.sqlalchemy_store.SqlAlchemyStore.set_tag") as tags_mock: + metric_mock.side_effect = _raise_exception_fn + param_mock.side_effect = _raise_exception_fn + tags_mock.side_effect = _raise_exception_fn + for kwargs in [{"metrics": [Metric("a", 3, 1, 0)]}, {"params": [Param("b", "c")]}, + {"tags": [RunTag("c", "d")]}]: + log_batch_kwargs = {"metrics": [], "params": [], "tags": []} + log_batch_kwargs.update(kwargs) + with self.assertRaises(MlflowException) as e: + self.store.log_batch(run.info.run_id, **log_batch_kwargs) + self.assertIn(str(e.exception.message), "Some internal error") + + def test_log_batch_nonexistent_run(self): + nonexistent_run_id = uuid.uuid4().hex + with self.assertRaises(MlflowException) as e: + self.store.log_batch(nonexistent_run_id, [], [], []) + assert e.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) + assert "Run with id=%s not found" % nonexistent_run_id in e.exception.message + + def test_log_batch_params_idempotency(self): + run = self._run_factory() + params = [Param("p-key", "p-val")] + self.store.log_batch(run.info.run_id, metrics=[], params=params, tags=[]) + self.store.log_batch(run.info.run_id, metrics=[], params=params, tags=[]) + self._verify_logged(run.info.run_id, metrics=[], params=params, tags=[]) + + def test_log_batch_tags_idempotency(self): + run = self._run_factory() + self.store.log_batch( + run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "t-val")]) + self.store.log_batch( + run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "t-val")]) + self._verify_logged( + run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "t-val")]) + + def test_log_batch_allows_tag_overwrite(self): + run = self._run_factory() + self.store.log_batch( + run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "val")]) + self.store.log_batch( + run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "newval")]) + self._verify_logged( + run.info.run_id, metrics=[], params=[], tags=[RunTag("t-key", "newval")]) + + def test_log_batch_allows_tag_overwrite_single_req(self): + run = self._run_factory() + tags = [RunTag("t-key", "val"), RunTag("t-key", "newval")] + self.store.log_batch(run.info.run_id, metrics=[], params=[], tags=tags) + self._verify_logged(run.info.run_id, metrics=[], params=[], tags=[tags[-1]]) + + def test_log_batch_same_metric_repeated_single_req(self): + run = self._run_factory() + metric0 = Metric(key="metric-key", value=1, timestamp=2, step=0) + metric1 = Metric(key="metric-key", value=2, timestamp=3, step=0) + self.store.log_batch(run.info.run_id, params=[], metrics=[metric0, metric1], tags=[]) + self._verify_logged(run.info.run_id, params=[], metrics=[metric0, metric1], tags=[]) + + def test_log_batch_same_metric_repeated_multiple_reqs(self): + run = self._run_factory() + metric0 = Metric(key="metric-key", value=1, timestamp=2, step=0) + metric1 = Metric(key="metric-key", value=2, timestamp=3, step=0) + self.store.log_batch(run.info.run_id, params=[], metrics=[metric0], tags=[]) + self._verify_logged(run.info.run_id, params=[], metrics=[metric0], tags=[]) + self.store.log_batch(run.info.run_id, params=[], metrics=[metric1], tags=[]) + self._verify_logged(run.info.run_id, params=[], metrics=[metric0, metric1], tags=[]) + + def test_upgrade_cli_idempotence(self): + # Repeatedly run `mlflow db upgrade` against our database, verifying that the command + # succeeds and that the DB has the latest schema + engine = sqlalchemy.create_engine(self.db_url) + assert _get_schema_version(engine) == SqlAlchemyStore._get_latest_schema_revision() + for _ in range(3): + invoke_cli_runner(mlflow.db.commands, ['upgrade', self.db_url]) + assert _get_schema_version(engine) == SqlAlchemyStore._get_latest_schema_revision() + + +class TestSqlAlchemyStoreSqliteMigratedDB(TestSqlAlchemyStoreSqlite): + """ + Test case where user has an existing DB with schema generated before MLflow 1.0, + then migrates their DB. TODO: update this test in MLflow 1.1 to use InitialBase from + mlflow.store.db.initial_models. + """ + def setUp(self): + fd, self.temp_dbfile = tempfile.mkstemp() + os.close(fd) + self.db_url = "%s%s" % (DB_URI, self.temp_dbfile) + engine = sqlalchemy.create_engine(self.db_url) + InitialBase.metadata.create_all(engine) + invoke_cli_runner(mlflow.db.commands, ['upgrade', self.db_url]) + self.store = SqlAlchemyStore(self.db_url, ARTIFACT_URI) + + def tearDown(self): + os.remove(self.temp_dbfile) + + +@pytest.mark.release +class TestSqlAlchemyStoreMysqlDb(TestSqlAlchemyStoreSqlite): + """ + Run tests against a MySQL database + """ + DEFAULT_MYSQL_PORT = 3306 + + def setUp(self): + db_username = os.environ.get("MYSQL_TEST_USERNAME") + db_password = os.environ.get("MYSQL_TEST_PASSWORD") + db_port = int(os.environ["MYSQL_TEST_PORT"]) if "MYSQL_TEST_PORT" in os.environ \ + else TestSqlAlchemyStoreMysqlDb.DEFAULT_MYSQL_PORT + if db_username is None or db_password is None: + raise Exception( + "Username and password for database tests must be specified via the " + "MYSQL_TEST_USERNAME and MYSQL_TEST_PASSWORD environment variables. " + "environment variable. In posix shells, you can rerun your test command " + "with the environment variables set, e.g: MYSQL_TEST_USERNAME=your_username " + "MYSQL_TEST_PASSWORD=your_password . You may optionally " + "specify a database port via MYSQL_TEST_PORT (default is 3306).") + self._db_name = "test_sqlalchemy_store_%s" % uuid.uuid4().hex[:5] + db_server_url = "mysql://%s:%s@localhost:%s" % (db_username, db_password, db_port) + self._engine = sqlalchemy.create_engine(db_server_url) + self._engine.execute("CREATE DATABASE %s" % self._db_name) + self.db_url = "%s/%s" % (db_server_url, self._db_name) + self.store = self._get_store(self.db_url) + + def tearDown(self): + self._engine.execute("DROP DATABASE %s" % self._db_name) + + def test_log_many_entities(self): + """ + Sanity check: verify that we can log a reasonable number of entities without failures due + to connection leaks etc. + """ + run = self._run_factory() + for i in range(100): + self.store.log_metric(run.info.run_id, entities.Metric("key", i, i * 2, i * 3)) + self.store.log_param(run.info.run_id, entities.Param("pkey-%s" % i, "pval-%s" % i)) + self.store.set_tag(run.info.run_id, entities.RunTag("tkey-%s" % i, "tval-%s" % i)) diff --git a/tests/store/test_sqlalchemy_store_schema.py b/tests/store/test_sqlalchemy_store_schema.py new file mode 100644 index 0000000000000..44e96d1099082 --- /dev/null +++ b/tests/store/test_sqlalchemy_store_schema.py @@ -0,0 +1,115 @@ +"""Tests verifying that the SQLAlchemyStore generates the expected database schema""" +import os + +import pytest +from alembic import command +from alembic.script import ScriptDirectory +from alembic.migration import MigrationContext # pylint: disable=import-error +from alembic.autogenerate import compare_metadata +import sqlalchemy + +import mlflow.db +from mlflow.exceptions import MlflowException +from mlflow.store.db.utils import _get_alembic_config +from mlflow.store.dbmodels.models import Base +from mlflow.store.sqlalchemy_store import SqlAlchemyStore +from tests.resources.db.initial_models import Base as InitialBase +from tests.store.dump_schema import dump_db_schema +from tests.integration.utils import invoke_cli_runner + + +def _assert_schema_files_equal(generated_schema_file, expected_schema_file): + """ + Assert equivalence of two SQL schema dump files consisting of CREATE TABLE statements delimited + by double-newlines, allowing for the reordering of individual lines within each CREATE TABLE + statement to account for differences in schema-dumping across platforms & Python versions. + """ + # Extract "CREATE TABLE" statement chunks from both files, assuming tables are listed in the + # same order across files + with open(generated_schema_file, "r") as generated_schema_handle: + generated_schema_table_chunks = generated_schema_handle.read().split("\n\n") + with open(expected_schema_file, "r") as expected_schema_handle: + expected_schema_table_chunks = expected_schema_handle.read().split("\n\n") + # Compare the two files table-by-table. We assume each CREATE TABLE statement is valid and + # so sort the lines within the statements before comparing them. + for generated_schema_table, expected_schema_table \ + in zip(generated_schema_table_chunks, expected_schema_table_chunks): + generated_lines = sorted(generated_schema_table.split("\n")) + expected_lines = sorted(expected_schema_table.split("\n")) + assert generated_lines == expected_lines,\ + "Generated schema did not match expected schema. Generated schema had table " \ + "definition:\n{generated_table}\nExpected schema had table definition:" \ + "\n{expected_table}\nIf you intended to make schema changes, run " \ + "'python tests/store/dump_schema.py {expected_file}' from your checkout of MLflow to " \ + "update the schema snapshot.".format( + generated_table=generated_schema_table, expected_table=expected_schema_table, + expected_file=expected_schema_file) + + +@pytest.fixture() +def expected_schema_file(): + current_dir = os.path.dirname(os.path.abspath(__file__)) + yield os.path.normpath( + os.path.join(current_dir, os.pardir, "resources", "db", "latest_schema.sql")) + + +@pytest.fixture() +def db_url(tmpdir): + return "sqlite:///%s" % tmpdir.join("db_file").strpath + + +def test_sqlalchemystore_idempotently_generates_up_to_date_schema( + tmpdir, db_url, expected_schema_file): + generated_schema_file = tmpdir.join("generated-schema.sql").strpath + # Repeatedly initialize a SQLAlchemyStore against the same DB URL. Initialization should + # succeed and the schema should be the same. + for _ in range(3): + SqlAlchemyStore(db_url, tmpdir.join("ARTIFACTS").strpath) + dump_db_schema(db_url, dst_file=generated_schema_file) + _assert_schema_files_equal(generated_schema_file, expected_schema_file) + + +def test_running_migrations_generates_expected_schema(tmpdir, expected_schema_file, db_url): + """Test that migrating an existing database generates the desired schema.""" + engine = sqlalchemy.create_engine(db_url) + InitialBase.metadata.create_all(engine) + invoke_cli_runner(mlflow.db.commands, ['upgrade', db_url]) + generated_schema_file = tmpdir.join("generated-schema.sql").strpath + dump_db_schema(db_url, generated_schema_file) + _assert_schema_files_equal(generated_schema_file, expected_schema_file) + + +def test_sqlalchemy_store_detects_schema_mismatch( + tmpdir, db_url): # pylint: disable=unused-argument + def _assert_invalid_schema(engine): + with pytest.raises(MlflowException) as ex: + SqlAlchemyStore._verify_schema(engine) + assert ex.message.contains("Detected out-of-date database schema.") + + # Initialize an empty database & verify that we detect a schema mismatch + engine = sqlalchemy.create_engine(db_url) + _assert_invalid_schema(engine) + # Create legacy tables, verify schema is still out of date + InitialBase.metadata.create_all(engine) + _assert_invalid_schema(engine) + # Run each migration. Until the last one, schema should be out of date + config = _get_alembic_config(db_url) + script = ScriptDirectory.from_config(config) + revisions = list(script.walk_revisions()) + revisions.reverse() + for rev in revisions[:-1]: + command.upgrade(config, rev.revision) + _assert_invalid_schema(engine) + # Run migrations, schema verification should now pass + invoke_cli_runner(mlflow.db.commands, ['upgrade', db_url]) + SqlAlchemyStore._verify_schema(engine) + + +def test_store_generated_schema_matches_base(tmpdir, db_url): + # Create a SQLAlchemyStore against tmpfile, directly verify that tmpfile contains a + # database with a valid schema + SqlAlchemyStore(db_url, tmpdir.join("ARTIFACTS").strpath) + engine = sqlalchemy.create_engine(db_url) + mc = MigrationContext.configure(engine.connect()) + diff = compare_metadata(mc, Base.metadata) + assert len(diff) == 0 diff --git a/tests/tensorflow/test_tensorflow_model_export.py b/tests/tensorflow/test_tensorflow_model_export.py index 0e2e38bb13b67..b9b6d2b8a5690 100644 --- a/tests/tensorflow/test_tensorflow_model_export.py +++ b/tests/tensorflow/test_tensorflow_model_export.py @@ -4,182 +4,511 @@ import collections import os -import pandas import shutil -import unittest +import pytest +import yaml +import json +import numpy as np import pandas as pd +import pandas.testing import sklearn.datasets as datasets import tensorflow as tf import mlflow -from mlflow import tensorflow, pyfunc -from mlflow.utils.file_utils import TempDir - - -class TestModelExport(unittest.TestCase): - - def helper(self, feature_spec, tmp, estimator, df): - """ - This functions handles exporting, logging, loading back, and predicting on an estimator for - testing purposes. - """ - receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec) - saved_estimator_path = tmp.path("model") - os.makedirs(saved_estimator_path) - # Saving TensorFlow model. - saved_estimator_path = estimator.export_savedmodel(saved_estimator_path, - receiver_fn).decode("utf-8") - # Logging the TensorFlow model just saved. - tensorflow.log_saved_model(saved_model_dir=saved_estimator_path, - signature_def_key="predict", - artifact_path="hello") - # Loading the saved TensorFlow model as a pyfunc. - x = pyfunc.load_pyfunc(saved_estimator_path) - # Predicting on the dataset using the pyfunc. - return x.predict(df) - - def test_log_saved_model(self): - # This tests model logging capabilities on the sklearn.iris dataset. - iris = datasets.load_iris() - X = iris.data[:, :2] # we only take the first two features. - y = iris.target - trainingFeatures = {} - for i in range(0, 2): - # TensorFlow is fickle about feature names, so we remove offending characters - iris.feature_names[i] = iris.feature_names[i].replace(" ", "") - iris.feature_names[i] = iris.feature_names[i].replace("(", "") - iris.feature_names[i] = iris.feature_names[i].replace(")", "") - trainingFeatures[iris.feature_names[i]] = iris.data[:, i:i+1] - tf_feat_cols = [] - feature_names = iris.feature_names[:2] - # Creating TensorFlow-specific numeric columns for input. - for col in iris.feature_names[:2]: - tf_feat_cols.append(tf.feature_column.numeric_column(col)) - # Creating input training function. - input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures, - y, - shuffle=False, - batch_size=1) - # Creating Deep Neural Network Regressor. - estimator = tf.estimator.DNNRegressor(feature_columns=tf_feat_cols, - hidden_units=[1]) - # Training and creating expected predictions on training dataset. - estimator.train(input_train, steps=10) - # Saving the estimator's prediction on the training data; assume the DNNRegressor - # produces a single output column named 'predictions' - pred_col = "predictions" - estimator_preds = [s[pred_col] for s in estimator.predict(input_train)] - estimator_preds_df = pd.DataFrame({pred_col: estimator_preds}) - - old_tracking_uri = mlflow.get_tracking_uri() - # should_start_run tests whether or not calling log_model() automatically starts a run. - for should_start_run in [False, True]: - with TempDir(chdr=True, remove_on_exit=True) as tmp: - try: - # Creating dict of features names (str) to placeholders (tensors) - feature_spec = {} - for name in feature_names: - feature_spec[name] = tf.placeholder("float", name=name, shape=[150]) - mlflow.set_tracking_uri("test") - if should_start_run: - mlflow.start_run() - pyfunc_preds_df = self.helper(feature_spec, tmp, estimator, - pandas.DataFrame(data=X, columns=feature_names)) - - # Asserting that the loaded model predictions are as expected. - assert estimator_preds_df.equals(pyfunc_preds_df) - finally: - # Restoring the old logging location. - mlflow.end_run() - mlflow.set_tracking_uri(old_tracking_uri) - - def test_categorical_columns(self): - """ - This tests logging capabilities on datasets with categorical columns. - See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/get_started/\ - regression/imports85.py - for reference code. - """ - with TempDir(chdr=False, remove_on_exit=True) as tmp: - path = os.path.abspath("tests/data/uci-autos-imports-85.data") - # Order is important for the csv-readers, so we use an OrderedDict here. - defaults = collections.OrderedDict([ - ("body-style", [""]), - ("curb-weight", [0.0]), - ("highway-mpg", [0.0]), - ("price", [0.0]) - ]) - - types = collections.OrderedDict((key, type(value[0])) - for key, value in defaults.items()) - df = pandas.read_csv(path, names=types.keys(), dtype=types, na_values="?") - df = df.dropna() - - # Extract the label from the features dataframe. - y_train = df.pop("price") - - # Creating the input training function required. - trainingFeatures = {} - - for i in df: - trainingFeatures[i] = df[i].values - - input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures, - y_train.values, - shuffle=False, - batch_size=1) - - # Creating the feature columns required for the DNNRegressor. - body_style_vocab = ["hardtop", "wagon", "sedan", "hatchback", "convertible"] - body_style = tf.feature_column.categorical_column_with_vocabulary_list( - key="body-style", vocabulary_list=body_style_vocab) - feature_columns = [ - tf.feature_column.numeric_column(key="curb-weight"), - tf.feature_column.numeric_column(key="highway-mpg"), - # Since this is a DNN model, convert categorical columns from sparse - # to dense. - # Wrap them in an `indicator_column` to create a - # one-hot vector from the input. - tf.feature_column.indicator_column(body_style) - ] - - # Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns - # defined above as input. - estimator = tf.estimator.DNNRegressor( - hidden_units=[20, 20], feature_columns=feature_columns) - - # Training the estimator. - estimator.train(input_fn=input_train, steps=10) - # Saving the estimator's prediction on the training data; assume the DNNRegressor - # produces a single output column named 'predictions' - pred_col = "predictions" - estimator_preds = [s[pred_col] for s in estimator.predict(input_train)] - estimator_preds_df = pd.DataFrame({pred_col: estimator_preds}) - # Setting the logging such that it is in the temp folder and deleted after the test. - old_tracking_dir = mlflow.get_tracking_uri() - tracking_dir = os.path.abspath(tmp.path("mlruns")) - mlflow.set_tracking_uri("file://%s" % tracking_dir) - mlflow.start_run() - try: - # Creating dict of features names (str) to placeholders (tensors) - feature_spec = {} - feature_spec["body-style"] = tf.placeholder("string", - name="body-style", - shape=[None]) - feature_spec["curb-weight"] = tf.placeholder("float", - name="curb-weight", - shape=[None]) - feature_spec["highway-mpg"] = tf.placeholder("float", - name="highway-mpg", - shape=[None]) - - pyfunc_preds_df = self.helper(feature_spec, tmp, estimator, df) - # Asserting that the loaded model predictions are as expected. Allow for some - # imprecision as this is expected with TensorFlow. - pandas.testing.assert_frame_equal( - pyfunc_preds_df, estimator_preds_df, check_less_precise=6) - finally: - # Restoring the old logging location. - mlflow.end_run() - mlflow.set_tracking_uri(old_tracking_dir) +import mlflow.tensorflow +import mlflow.pyfunc.scoring_server as pyfunc_scoring_server +from mlflow.exceptions import MlflowException +from mlflow import pyfunc +from mlflow.store.s3_artifact_repo import S3ArtifactRepository +from mlflow.tracking.artifact_utils import _download_artifact_from_uri +from mlflow.utils.environment import _mlflow_conda_env +from mlflow.utils.model_utils import _get_flavor_configuration + +from tests.helper_functions import score_model_in_sagemaker_docker_container +from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import +from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import + +SavedModelInfo = collections.namedtuple( + "SavedModelInfo", + ["path", "meta_graph_tags", "signature_def_key", "inference_df", "expected_results_df"]) + + +@pytest.fixture +def saved_tf_iris_model(tmpdir): + iris = datasets.load_iris() + X = iris.data[:, :2] # we only take the first two features + y = iris.target + trainingFeatures = {} + for i in range(0, 2): + # TensorFlow is fickle about feature names, so we remove offending characters + iris.feature_names[i] = iris.feature_names[i].replace(" ", "") + iris.feature_names[i] = iris.feature_names[i].replace("(", "") + iris.feature_names[i] = iris.feature_names[i].replace(")", "") + trainingFeatures[iris.feature_names[i]] = iris.data[:, i:i+1] + tf_feat_cols = [] + feature_names = iris.feature_names[:2] + # Create TensorFlow-specific numeric columns for input. + for col in iris.feature_names[:2]: + tf_feat_cols.append(tf.feature_column.numeric_column(col)) + # Create a training function for the estimator + input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures, + y, + shuffle=False, + batch_size=1) + estimator = tf.estimator.DNNRegressor(feature_columns=tf_feat_cols, + hidden_units=[1]) + # Train the estimator and obtain expected predictions on the training dataset + estimator.train(input_train, steps=10) + estimator_preds = np.array([s["predictions"] for s in estimator.predict(input_train)]).ravel() + estimator_preds_df = pd.DataFrame({"predictions": estimator_preds}) + + # Define a function for estimator inference + feature_spec = {} + for name in feature_names: + feature_spec[name] = tf.placeholder("float", name=name, shape=[150]) + receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec) + + # Save the estimator and its inference function + saved_estimator_path = str(tmpdir.mkdir("saved_model")) + saved_estimator_path = estimator.export_savedmodel(saved_estimator_path, + receiver_fn).decode("utf-8") + return SavedModelInfo(path=saved_estimator_path, + meta_graph_tags=[tf.saved_model.tag_constants.SERVING], + signature_def_key="predict", + inference_df=pd.DataFrame(data=X, columns=feature_names), + expected_results_df=estimator_preds_df) + + +@pytest.fixture +def saved_tf_categorical_model(tmpdir): + path = os.path.abspath("tests/data/uci-autos-imports-85.data") + # Order is important for the csv-readers, so we use an OrderedDict here + defaults = collections.OrderedDict([ + ("body-style", [""]), + ("curb-weight", [0.0]), + ("highway-mpg", [0.0]), + ("price", [0.0]) + ]) + types = collections.OrderedDict((key, type(value[0])) + for key, value in defaults.items()) + df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?") + df = df.dropna() + + # Extract the label from the features dataframe + y_train = df.pop("price") + + # Create the required input training function + trainingFeatures = {} + for i in df: + trainingFeatures[i] = df[i].values + input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures, + y_train.values, + shuffle=False, + batch_size=1) + + # Create the feature columns required for the DNNRegressor + body_style_vocab = ["hardtop", "wagon", "sedan", "hatchback", "convertible"] + body_style = tf.feature_column.categorical_column_with_vocabulary_list( + key="body-style", vocabulary_list=body_style_vocab) + feature_columns = [ + tf.feature_column.numeric_column(key="curb-weight"), + tf.feature_column.numeric_column(key="highway-mpg"), + # Since this is a DNN model, convert categorical columns from sparse to dense. + # Then, wrap them in an `indicator_column` to create a one-hot vector from the input + tf.feature_column.indicator_column(body_style) + ] + + # Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns + # defined above as input + estimator = tf.estimator.DNNRegressor( + hidden_units=[20, 20], feature_columns=feature_columns) + + # Train the estimator and obtain expected predictions on the training dataset + estimator.train(input_fn=input_train, steps=10) + estimator_preds = np.array([s["predictions"] for s in estimator.predict(input_train)]).ravel() + estimator_preds_df = pd.DataFrame({"predictions": estimator_preds}) + + # Define a function for estimator inference + feature_spec = { + "body-style": tf.placeholder("string", name="body-style", shape=[None]), + "curb-weight": tf.placeholder("float", name="curb-weight", shape=[None]), + "highway-mpg": tf.placeholder("float", name="highway-mpg", shape=[None]) + } + receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec) + + # Save the estimator and its inference function + saved_estimator_path = str(tmpdir.mkdir("saved_model")) + saved_estimator_path = estimator.export_savedmodel(saved_estimator_path, + receiver_fn).decode("utf-8") + return SavedModelInfo(path=saved_estimator_path, + meta_graph_tags=[tf.saved_model.tag_constants.SERVING], + signature_def_key="predict", + inference_df=df, + expected_results_df=estimator_preds_df) + + +@pytest.fixture +def tf_custom_env(tmpdir): + conda_env = os.path.join(str(tmpdir), "conda_env.yml") + _mlflow_conda_env( + conda_env, + additional_conda_deps=["tensorflow", "pytest"]) + return conda_env + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(str(tmpdir), "model") + + +@pytest.mark.large +def test_save_and_load_model_persists_and_restores_model_in_default_graph_context_successfully( + saved_tf_iris_model, model_path): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path) + + tf_graph = tf.Graph() + tf_sess = tf.Session(graph=tf_graph) + with tf_graph.as_default(): + signature_def = mlflow.tensorflow.load_model(model_uri=model_path, tf_sess=tf_sess) + + for _, input_signature in signature_def.inputs.items(): + t_input = tf_graph.get_tensor_by_name(input_signature.name) + assert t_input is not None + + for _, output_signature in signature_def.outputs.items(): + t_output = tf_graph.get_tensor_by_name(output_signature.name) + assert t_output is not None + + +@pytest.mark.large +def test_load_model_from_remote_uri_succeeds(saved_tf_iris_model, model_path, mock_s3_bucket): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path) + + artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) + artifact_path = "model" + artifact_repo = S3ArtifactRepository(artifact_root) + artifact_repo.log_artifacts(model_path, artifact_path=artifact_path) + + model_uri = artifact_root + "/" + artifact_path + tf_graph = tf.Graph() + tf_sess = tf.Session(graph=tf_graph) + with tf_graph.as_default(): + signature_def = mlflow.tensorflow.load_model(model_uri=model_uri, tf_sess=tf_sess) + + for _, input_signature in signature_def.inputs.items(): + t_input = tf_graph.get_tensor_by_name(input_signature.name) + assert t_input is not None + + for _, output_signature in signature_def.outputs.items(): + t_output = tf_graph.get_tensor_by_name(output_signature.name) + assert t_output is not None + + +@pytest.mark.large +def test_save_and_load_model_persists_and_restores_model_in_custom_graph_context_successfully( + saved_tf_iris_model, model_path): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path) + + tf_graph = tf.Graph() + tf_sess = tf.Session(graph=tf_graph) + custom_tf_context = tf_graph.device("/cpu:0") + with custom_tf_context: + signature_def = mlflow.tensorflow.load_model(model_uri=model_path, tf_sess=tf_sess) + + for _, input_signature in signature_def.inputs.items(): + t_input = tf_graph.get_tensor_by_name(input_signature.name) + assert t_input is not None + + for _, output_signature in signature_def.outputs.items(): + t_output = tf_graph.get_tensor_by_name(output_signature.name) + assert t_output is not None + + +@pytest.mark.large +def test_iris_model_can_be_loaded_and_evaluated_successfully(saved_tf_iris_model, model_path): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path) + + expected_input_keys = ["sepallengthcm", "sepalwidthcm"] + expected_output_keys = ["predictions"] + input_length = 10 + + def load_and_evaluate(tf_sess, tf_graph, tf_context): + with tf_context: + signature_def = mlflow.tensorflow.load_model(model_uri=model_path, tf_sess=tf_sess) + + input_signature = signature_def.inputs.items() + assert len(input_signature) == len(expected_input_keys) + feed_dict = {} + for input_key, input_signature in signature_def.inputs.items(): + assert input_key in expected_input_keys + t_input = tf_graph.get_tensor_by_name(input_signature.name) + feed_dict[t_input] = np.array(range(input_length), dtype=np.float32) + + output_signature = signature_def.outputs.items() + assert len(output_signature) == len(expected_output_keys) + output_tensors = [] + for output_key, output_signature in signature_def.outputs.items(): + assert output_key in expected_output_keys + t_output = tf_graph.get_tensor_by_name(output_signature.name) + output_tensors.append(t_output) + + outputs_list = tf_sess.run(output_tensors, feed_dict=feed_dict) + assert len(outputs_list) == 1 + outputs = outputs_list[0] + assert len(outputs.ravel()) == input_length + + tf_graph_1 = tf.Graph() + tf_sess_1 = tf.Session(graph=tf_graph_1) + load_and_evaluate(tf_sess=tf_sess_1, tf_graph=tf_graph_1, tf_context=tf_graph_1.as_default()) + + tf_graph_2 = tf.Graph() + tf_sess_2 = tf.Session(graph=tf_graph_2) + load_and_evaluate(tf_sess=tf_sess_2, + tf_graph=tf_graph_2, + tf_context=tf_graph_1.device("/cpu:0")) + + +@pytest.mark.large +def test_save_model_with_invalid_path_signature_def_or_metagraph_tags_throws_exception( + saved_tf_iris_model, model_path): + with pytest.raises(IOError): + mlflow.tensorflow.save_model(tf_saved_model_dir="not_a_valid_tf_model_dir", + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path) + + with pytest.raises(RuntimeError): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=["bad tags"], + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path) + + with pytest.raises(MlflowException): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key="bad signature", + path=model_path) + + with pytest.raises(IOError): + mlflow.tensorflow.save_model(tf_saved_model_dir="bad path", + tf_meta_graph_tags="bad tags", + tf_signature_def_key="bad signature", + path=model_path) + + +@pytest.mark.large +def test_load_model_loads_artifacts_from_specified_model_directory(saved_tf_iris_model, model_path): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path) + + # Verify that the MLflow model can be loaded even after deleting the TensorFlow `SavedModel` + # directory that was used to create it, implying that the artifacts were copied to and are + # loaded from the specified MLflow model path + shutil.rmtree(saved_tf_iris_model.path) + with tf.Session(graph=tf.Graph()) as tf_sess: + signature_def = mlflow.tensorflow.load_model(model_uri=model_path, tf_sess=tf_sess) + + +def test_log_model_with_non_keyword_args_fails(saved_tf_iris_model): + artifact_path = "model" + with mlflow.start_run(): + with pytest.raises(TypeError): + mlflow.tensorflow.log_model(saved_tf_iris_model.path, + saved_tf_iris_model.meta_graph_tags, + saved_tf_iris_model.signature_def_key, + artifact_path) + + +@pytest.mark.large +def test_log_and_load_model_persists_and_restores_model_successfully(saved_tf_iris_model): + artifact_path = "model" + with mlflow.start_run(): + mlflow.tensorflow.log_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + artifact_path=artifact_path) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + tf_graph = tf.Graph() + tf_sess = tf.Session(graph=tf_graph) + with tf_graph.as_default(): + signature_def = mlflow.tensorflow.load_model(model_uri=model_uri, tf_sess=tf_sess) + + for _, input_signature in signature_def.inputs.items(): + t_input = tf_graph.get_tensor_by_name(input_signature.name) + assert t_input is not None + + for _, output_signature in signature_def.outputs.items(): + t_output = tf_graph.get_tensor_by_name(output_signature.name) + assert t_output is not None + + +@pytest.mark.large +def test_save_model_persists_specified_conda_env_in_mlflow_model_directory( + saved_tf_iris_model, model_path, tf_custom_env): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path, + conda_env=tf_custom_env) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != tf_custom_env + + with open(tf_custom_env, "r") as f: + tf_custom_env_text = f.read() + with open(saved_conda_env_path, "r") as f: + saved_conda_env_text = f.read() + assert saved_conda_env_text == tf_custom_env_text + + +@pytest.mark.large +def test_save_model_accepts_conda_env_as_dict(saved_tf_iris_model, model_path): + conda_env = dict(mlflow.tensorflow.get_default_conda_env()) + conda_env["dependencies"].append("pytest") + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path, + conda_env=conda_env) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + + with open(saved_conda_env_path, "r") as f: + saved_conda_env_parsed = yaml.safe_load(f) + assert saved_conda_env_parsed == conda_env + + +@pytest.mark.large +def test_log_model_persists_specified_conda_env_in_mlflow_model_directory( + saved_tf_iris_model, tf_custom_env): + artifact_path = "model" + with mlflow.start_run(): + mlflow.tensorflow.log_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + artifact_path=artifact_path, + conda_env=tf_custom_env) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + model_path = _download_artifact_from_uri(artifact_uri=model_uri) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + assert os.path.exists(saved_conda_env_path) + assert saved_conda_env_path != tf_custom_env + + with open(tf_custom_env, "r") as f: + tf_custom_env_text = f.read() + with open(saved_conda_env_path, "r") as f: + saved_conda_env_text = f.read() + assert saved_conda_env_text == tf_custom_env_text + + +@pytest.mark.large +def test_save_model_without_specified_conda_env_uses_default_env_with_expected_dependencies( + saved_tf_iris_model, model_path): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path, + conda_env=None) + + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.tensorflow.get_default_conda_env() + + +@pytest.mark.large +def test_log_model_without_specified_conda_env_uses_default_env_with_expected_dependencies( + saved_tf_iris_model, model_path): + artifact_path = "model" + with mlflow.start_run(): + mlflow.tensorflow.log_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + artifact_path=artifact_path, + conda_env=None) + model_uri = "runs:/{run_id}/{artifact_path}".format( + run_id=mlflow.active_run().info.run_id, + artifact_path=artifact_path) + + model_path = _download_artifact_from_uri(artifact_uri=model_uri) + pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) + conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) + with open(conda_env_path, "r") as f: + conda_env = yaml.safe_load(f) + + assert conda_env == mlflow.tensorflow.get_default_conda_env() + + +@pytest.mark.large +def test_iris_data_model_can_be_loaded_and_evaluated_as_pyfunc(saved_tf_iris_model, model_path): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path) + + pyfunc_wrapper = pyfunc.load_pyfunc(model_path) + results_df = pyfunc_wrapper.predict(saved_tf_iris_model.inference_df) + assert results_df.equals(saved_tf_iris_model.expected_results_df) + + +@pytest.mark.large +def test_categorical_model_can_be_loaded_and_evaluated_as_pyfunc( + saved_tf_categorical_model, model_path): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_categorical_model.path, + tf_meta_graph_tags=saved_tf_categorical_model.meta_graph_tags, + tf_signature_def_key=saved_tf_categorical_model.signature_def_key, + path=model_path) + + pyfunc_wrapper = pyfunc.load_pyfunc(model_path) + results_df = pyfunc_wrapper.predict(saved_tf_categorical_model.inference_df) + pandas.testing.assert_frame_equal( + results_df, saved_tf_categorical_model.expected_results_df, check_less_precise=6) + + +@pytest.mark.release +def test_model_deployment_with_default_conda_env(saved_tf_iris_model, model_path): + mlflow.tensorflow.save_model(tf_saved_model_dir=saved_tf_iris_model.path, + tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags, + tf_signature_def_key=saved_tf_iris_model.signature_def_key, + path=model_path, + conda_env=None) + + scoring_response = score_model_in_sagemaker_docker_container( + model_uri=model_path, + data=saved_tf_iris_model.inference_df, + content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, + flavor=mlflow.pyfunc.FLAVOR_NAME) + deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) + + pandas.testing.assert_frame_equal( + deployed_model_preds, + saved_tf_iris_model.expected_results_df, + check_dtype=False, + check_less_precise=6) diff --git a/tests/test_cli.py b/tests/test_cli.py index 50a8c73717d40..cef3854a364a9 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,7 +1,9 @@ from click.testing import CliRunner from mock import mock +import pytest -from mlflow.cli import server +from mlflow.cli import run, server, ui +from mlflow.server import handlers def test_server_static_prefix_validation(): @@ -19,3 +21,58 @@ def test_server_static_prefix_validation(): result = CliRunner().invoke(server, ["--static-prefix", "/mlflow/"]) assert "--static-prefix should not end with a '/'." in result.output run_server_mock.assert_not_called() + + +def test_server_default_artifact_root_validation(): + with mock.patch("mlflow.cli._run_server") as run_server_mock: + result = CliRunner().invoke(server, ["--backend-store-uri", "sqlite:///my.db"]) + assert result.output.startswith("Option 'default-artifact-root' is required") + run_server_mock.assert_not_called() + + +@pytest.mark.parametrize("command", [server, ui]) +def test_tracking_uri_validation_failure(command): + handlers._store = None + with mock.patch("mlflow.cli._run_server") as run_server_mock: + # SQLAlchemy expects postgresql:// not postgres:// + CliRunner().invoke(command, + ["--backend-store-uri", "postgres://user:pwd@host:5432/mydb", + "--default-artifact-root", "./mlruns"]) + run_server_mock.assert_not_called() + + +@pytest.mark.parametrize("command", [server, ui]) +def test_tracking_uri_validation_sql_driver_uris(command): + handlers._store = None + with mock.patch("mlflow.cli._run_server") as run_server_mock,\ + mock.patch("mlflow.store.sqlalchemy_store.SqlAlchemyStore") as sql_store: + CliRunner().invoke(command, + ["--backend-store-uri", "mysql+pymysql://user:pwd@host:5432/mydb", + "--default-artifact-root", "./mlruns"]) + sql_store.assert_called_once_with("mysql+pymysql://user:pwd@host:5432/mydb", "./mlruns") + run_server_mock.assert_called() + + +def test_mlflow_run(): + with mock.patch("mlflow.cli.projects") as mock_projects: + result = CliRunner().invoke(run) + mock_projects.run.assert_not_called() + assert 'Missing argument "URI"' in result.output + + with mock.patch("mlflow.cli.projects") as mock_projects: + CliRunner().invoke(run, ["project_uri"]) + mock_projects.run.assert_called_once() + + with mock.patch("mlflow.cli.projects") as mock_projects: + CliRunner().invoke(run, ["--experiment-id", "5", "project_uri"]) + mock_projects.run.assert_called_once() + + with mock.patch("mlflow.cli.projects") as mock_projects: + CliRunner().invoke(run, ["--experiment-name", "random name", "project_uri"]) + mock_projects.run.assert_called_once() + + with mock.patch("mlflow.cli.projects") as mock_projects: + result = CliRunner().invoke(run, ["--experiment-id", "51", + "--experiment-name", "name blah", "uri"]) + mock_projects.run.assert_not_called() + assert "Specify only one of 'experiment-name' or 'experiment-id' options." in result.output diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py index 75bdb1d6d9106..8f62e2248896f 100644 --- a/tests/test_exceptions.py +++ b/tests/test_exceptions.py @@ -1,7 +1,8 @@ import json from mlflow.exceptions import MlflowException -from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, INVALID_STATE, \ + ENDPOINT_NOT_FOUND, INTERNAL_ERROR, RESOURCE_ALREADY_EXISTS, IO_ERROR class TestMlflowException(object): @@ -17,3 +18,18 @@ def test_serialize_to_json(self): deserialized = json.loads(mlflow_exception.serialize_as_json()) assert deserialized['message'] == 'test' assert deserialized['error_code'] == 'INTERNAL_ERROR' + + def test_get_http_status_code(self): + assert MlflowException('test default').get_http_status_code() == 500 + assert MlflowException('code not in map', error_code=IO_ERROR).get_http_status_code() \ + == 500 + assert MlflowException('test', error_code=INVALID_STATE).get_http_status_code() \ + == 500 + assert MlflowException('test', error_code=ENDPOINT_NOT_FOUND).get_http_status_code() \ + == 404 + assert MlflowException('test', error_code=INVALID_PARAMETER_VALUE).get_http_status_code() \ + == 400 + assert MlflowException('test', error_code=INTERNAL_ERROR).get_http_status_code() \ + == 500 + assert MlflowException('test', error_code=RESOURCE_ALREADY_EXISTS).get_http_status_code() \ + == 400 diff --git a/tests/test_runs.py b/tests/test_runs.py new file mode 100644 index 0000000000000..b460931d5a619 --- /dev/null +++ b/tests/test_runs.py @@ -0,0 +1,15 @@ +from click.testing import CliRunner +from mlflow.runs import list_run +import mlflow + + +def test_list_run(): + with mlflow.start_run(run_name='apple'): + pass + result = CliRunner().invoke(list_run, ["--experiment-id", "0"]) + assert 'apple' in result.output + + +def test_list_run_experiment_id_required(): + result = CliRunner().invoke(list_run, []) + assert 'Missing option "--experiment-id"' in result.output diff --git a/tests/tracking/conftest.py b/tests/tracking/conftest.py new file mode 100644 index 0000000000000..8b1117d0f7edb --- /dev/null +++ b/tests/tracking/conftest.py @@ -0,0 +1,19 @@ +import os + +import pytest + +import mlflow + + +@pytest.fixture +def tmp_wkdir(tmpdir): + initial_wkdir = os.getcwd() + os.chdir(str(tmpdir)) + yield + os.chdir(initial_wkdir) + + +@pytest.fixture +def reset_active_experiment(): + yield + mlflow.tracking.fluent._active_experiment_id = None diff --git a/tests/tracking/context/__init__.py b/tests/tracking/context/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tracking/context/test_databricks_notebook_context.py b/tests/tracking/context/test_databricks_notebook_context.py new file mode 100644 index 0000000000000..7463415ff4699 --- /dev/null +++ b/tests/tracking/context/test_databricks_notebook_context.py @@ -0,0 +1,51 @@ +import mock +import pytest + +from mlflow.entities import SourceType +from mlflow.utils.mlflow_tags import MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE, \ + MLFLOW_DATABRICKS_NOTEBOOK_ID, MLFLOW_DATABRICKS_NOTEBOOK_PATH, MLFLOW_DATABRICKS_WEBAPP_URL +from mlflow.tracking.context.databricks_notebook_context import DatabricksNotebookRunContext + + +@pytest.fixture +def patch_script_name(): + patch_sys_argv = mock.patch("sys.argv", [MOCK_SCRIPT_NAME]) + patch_os_path_isfile = mock.patch("os.path.isfile", return_value=False) + with patch_sys_argv, patch_os_path_isfile: + yield + + +def test_databricks_notebook_run_context_in_context(): + with mock.patch("mlflow.utils.databricks_utils.is_in_databricks_notebook") as in_notebook_mock: + assert DatabricksNotebookRunContext().in_context() == in_notebook_mock.return_value + + +def test_databricks_notebook_run_context_tags(): + patch_notebook_id = mock.patch("mlflow.utils.databricks_utils.get_notebook_id") + patch_notebook_path = mock.patch("mlflow.utils.databricks_utils.get_notebook_path") + patch_webapp_url = mock.patch("mlflow.utils.databricks_utils.get_webapp_url") + + with patch_notebook_id as notebook_id_mock, patch_notebook_path as notebook_path_mock, \ + patch_webapp_url as webapp_url_mock: + assert DatabricksNotebookRunContext().tags() == { + MLFLOW_SOURCE_NAME: notebook_path_mock.return_value, + MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK), + MLFLOW_DATABRICKS_NOTEBOOK_ID: notebook_id_mock.return_value, + MLFLOW_DATABRICKS_NOTEBOOK_PATH: notebook_path_mock.return_value, + MLFLOW_DATABRICKS_WEBAPP_URL: webapp_url_mock.return_value + } + + +def test_databricks_notebook_run_context_tags_nones(): + patch_notebook_id = mock.patch("mlflow.utils.databricks_utils.get_notebook_id", + return_value=None) + patch_notebook_path = mock.patch("mlflow.utils.databricks_utils.get_notebook_path", + return_value=None) + patch_webapp_url = mock.patch("mlflow.utils.databricks_utils.get_webapp_url", + return_value=None) + + with patch_notebook_id, patch_notebook_path, patch_webapp_url: + assert DatabricksNotebookRunContext().tags() == { + MLFLOW_SOURCE_NAME: None, + MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK), + } diff --git a/tests/tracking/context/test_default_context.py b/tests/tracking/context/test_default_context.py new file mode 100644 index 0000000000000..30a016fc06925 --- /dev/null +++ b/tests/tracking/context/test_default_context.py @@ -0,0 +1,31 @@ +import mock +import pytest + +from mlflow.entities import SourceType +from mlflow.utils.mlflow_tags import MLFLOW_USER, MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE +from mlflow.tracking.context.default_context import DefaultRunContext + + +MOCK_SCRIPT_NAME = "/path/to/script.py" + + +@pytest.fixture +def patch_script_name(): + patch_sys_argv = mock.patch("sys.argv", [MOCK_SCRIPT_NAME]) + patch_os_path_isfile = mock.patch("os.path.isfile", return_value=False) + with patch_sys_argv, patch_os_path_isfile: + yield + + +def test_default_run_context_in_context(): + assert DefaultRunContext().in_context() is True + + +def test_default_run_context_tags(patch_script_name): + mock_user = mock.Mock() + with mock.patch("getpass.getuser", return_value=mock_user): + assert DefaultRunContext().tags() == { + MLFLOW_USER: mock_user, + MLFLOW_SOURCE_NAME: MOCK_SCRIPT_NAME, + MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.LOCAL) + } diff --git a/tests/tracking/context/test_git_context.py b/tests/tracking/context/test_git_context.py new file mode 100644 index 0000000000000..1115ff77660ad --- /dev/null +++ b/tests/tracking/context/test_git_context.py @@ -0,0 +1,56 @@ +import mock +import pytest +import git + +from mlflow.utils.mlflow_tags import MLFLOW_GIT_COMMIT +from mlflow.tracking.context.git_context import GitRunContext + + +MOCK_SCRIPT_NAME = "/path/to/script.py" +MOCK_COMMIT_HASH = "commit-hash" + + +@pytest.fixture +def patch_script_name(): + patch_sys_argv = mock.patch("sys.argv", [MOCK_SCRIPT_NAME]) + patch_os_path_isfile = mock.patch("os.path.isfile", return_value=False) + with patch_sys_argv, patch_os_path_isfile: + yield + + +@pytest.fixture +def patch_git_repo(): + mock_repo = mock.Mock() + mock_repo.head.commit.hexsha = MOCK_COMMIT_HASH + with mock.patch("git.Repo", return_value=mock_repo): + yield mock_repo + + +def test_git_run_context_in_context_true(patch_script_name, patch_git_repo): + assert GitRunContext().in_context() + + +def test_git_run_context_in_context_false(patch_script_name): + with mock.patch("git.Repo", side_effect=git.InvalidGitRepositoryError): + assert not GitRunContext().in_context() + + +def test_git_run_context_tags(patch_script_name, patch_git_repo): + assert GitRunContext().tags() == { + MLFLOW_GIT_COMMIT: MOCK_COMMIT_HASH + } + + +def test_git_run_context_caching(patch_script_name): + """Check that the git commit hash is only looked up once.""" + + mock_repo = mock.Mock() + mock_hexsha = mock.PropertyMock(return_value=MOCK_COMMIT_HASH) + type(mock_repo.head.commit).hexsha = mock_hexsha + + with mock.patch("git.Repo", return_value=mock_repo): + context = GitRunContext() + context.in_context() + context.tags() + + assert mock_hexsha.call_count == 1 diff --git a/tests/tracking/context/test_registry.py b/tests/tracking/context/test_registry.py new file mode 100644 index 0000000000000..526b05fdf8602 --- /dev/null +++ b/tests/tracking/context/test_registry.py @@ -0,0 +1,140 @@ +import mock +import pytest +from six.moves import reload_module as reload + +import mlflow.tracking.context.registry +from mlflow.tracking.context.default_context import DefaultRunContext +from mlflow.tracking.context.git_context import GitRunContext +from mlflow.tracking.context.databricks_notebook_context import DatabricksNotebookRunContext +from mlflow.tracking.context.registry import RunContextProviderRegistry, resolve_tags + + +def test_run_context_provider_registry_register(): + provider_class = mock.Mock() + + registry = RunContextProviderRegistry() + registry.register(provider_class) + + assert set(registry) == {provider_class.return_value} + + +def test_run_context_provider_registry_register_entrypoints(): + provider_class = mock.Mock() + mock_entrypoint = mock.Mock() + mock_entrypoint.load.return_value = provider_class + + with mock.patch( + "entrypoints.get_group_all", return_value=[mock_entrypoint] + ) as mock_get_group_all: + registry = RunContextProviderRegistry() + registry.register_entrypoints() + + assert set(registry) == {provider_class.return_value} + mock_entrypoint.load.assert_called_once_with() + mock_get_group_all.assert_called_once_with("mlflow.run_context_provider") + + +@pytest.mark.parametrize("exception", + [AttributeError("test exception"), ImportError("test exception")]) +def test_run_context_provider_registry_register_entrypoints_handles_exception(exception): + mock_entrypoint = mock.Mock() + mock_entrypoint.load.side_effect = exception + + with mock.patch( + "entrypoints.get_group_all", return_value=[mock_entrypoint] + ) as mock_get_group_all: + registry = RunContextProviderRegistry() + # Check that the raised warning contains the message from the original exception + with pytest.warns(UserWarning, match="test exception"): + registry.register_entrypoints() + + mock_entrypoint.load.assert_called_once_with() + mock_get_group_all.assert_called_once_with("mlflow.run_context_provider") + + +def _currently_registered_run_context_provider_classes(): + return { + provider.__class__ + for provider in mlflow.tracking.context.registry._run_context_provider_registry + } + + +def test_registry_instance_defaults(): + expected_classes = {DefaultRunContext, GitRunContext, DatabricksNotebookRunContext} + assert expected_classes.issubset(_currently_registered_run_context_provider_classes()) + + +def test_registry_instance_loads_entrypoints(): + + class MockRunContext(object): + pass + + mock_entrypoint = mock.Mock() + mock_entrypoint.load.return_value = MockRunContext + + with mock.patch( + "entrypoints.get_group_all", return_value=[mock_entrypoint] + ) as mock_get_group_all: + # Entrypoints are registered at import time, so we need to reload the module to register th + # entrypoint given by the mocked extrypoints.get_group_all + reload(mlflow.tracking.context.registry) + + assert MockRunContext in _currently_registered_run_context_provider_classes() + mock_get_group_all.assert_called_once_with("mlflow.run_context_provider") + + +@pytest.mark.large +def test_run_context_provider_registry_with_installed_plugin(tmp_wkdir): + """This test requires the package in tests/resources/mlflow-test-plugin to be installed""" + + reload(mlflow.tracking.context.registry) + + from mlflow_test_plugin import PluginRunContextProvider + assert PluginRunContextProvider in _currently_registered_run_context_provider_classes() + + # The test plugin's context provider always returns False from in_context + # to avoid polluting tags in developers' environments. The following mock overrides this to + # perform the integration test. + with mock.patch.object(PluginRunContextProvider, "in_context", return_value=True): + assert resolve_tags()["test"] == "tag" + + +@pytest.fixture +def mock_run_context_providers(): + base_provider = mock.Mock() + base_provider.in_context.return_value = True + base_provider.tags.return_value = {"one": "one-val", "two": "two-val", "three": "three-val"} + + skipped_provider = mock.Mock() + skipped_provider.in_context.return_value = False + + override_provider = mock.Mock() + override_provider.in_context.return_value = True + override_provider.tags.return_value = {"one": "override", "new": "new-val"} + + providers = [base_provider, skipped_provider, override_provider] + + with mock.patch("mlflow.tracking.context.registry._run_context_provider_registry", providers): + yield + + skipped_provider.tags.assert_not_called() + + +def test_resolve_tags(mock_run_context_providers): + tags_arg = {"two": "arg-override", "arg": "arg-val"} + assert resolve_tags(tags_arg) == { + "one": "override", + "two": "arg-override", + "three": "three-val", + "new": "new-val", + "arg": "arg-val" + } + + +def test_resolve_tags_no_arg(mock_run_context_providers): + assert resolve_tags() == { + "one": "override", + "two": "two-val", + "three": "three-val", + "new": "new-val" + } diff --git a/tests/tracking/test_artifact_utils.py b/tests/tracking/test_artifact_utils.py new file mode 100644 index 0000000000000..fe74d3429d972 --- /dev/null +++ b/tests/tracking/test_artifact_utils.py @@ -0,0 +1,47 @@ +import os + +import mlflow +from mlflow.tracking.artifact_utils import _download_artifact_from_uri + + +def test_artifact_can_be_downloaded_from_absolute_uri_successfully(tmpdir): + artifact_file_name = "artifact.txt" + artifact_text = "Sample artifact text" + local_artifact_path = tmpdir.join(artifact_file_name).strpath + with open(local_artifact_path, "w") as out: + out.write(artifact_text) + + logged_artifact_path = "artifact" + with mlflow.start_run(): + mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_path) + artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_path) + + downloaded_artifact_path = os.path.join( + _download_artifact_from_uri(artifact_uri), artifact_file_name) + assert downloaded_artifact_path != local_artifact_path + assert downloaded_artifact_path != logged_artifact_path + with open(downloaded_artifact_path, "r") as f: + assert f.read() == artifact_text + + +def test_download_artifact_from_absolute_uri_persists_data_to_specified_output_directory(tmpdir): + artifact_file_name = "artifact.txt" + artifact_text = "Sample artifact text" + local_artifact_path = tmpdir.join(artifact_file_name).strpath + with open(local_artifact_path, "w") as out: + out.write(artifact_text) + + logged_artifact_subdir = "logged_artifact" + with mlflow.start_run(): + mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_subdir) + artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_subdir) + + artifact_output_path = tmpdir.join("artifact_output").strpath + os.makedirs(artifact_output_path) + _download_artifact_from_uri(artifact_uri=artifact_uri, output_path=artifact_output_path) + assert logged_artifact_subdir in os.listdir(artifact_output_path) + assert artifact_file_name in os.listdir( + os.path.join(artifact_output_path, logged_artifact_subdir)) + with open(os.path.join( + artifact_output_path, logged_artifact_subdir, artifact_file_name), "r") as f: + assert f.read() == artifact_text diff --git a/tests/tracking/test_client.py b/tests/tracking/test_client.py new file mode 100644 index 0000000000000..50e8e931cc8a6 --- /dev/null +++ b/tests/tracking/test_client.py @@ -0,0 +1,149 @@ +import pytest +import mock + +from mlflow.entities import SourceType, ViewType, RunTag +from mlflow.store import SEARCH_MAX_RESULTS_DEFAULT +from mlflow.tracking import MlflowClient +from mlflow.utils.mlflow_tags import MLFLOW_USER, MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE, \ + MLFLOW_PARENT_RUN_ID, MLFLOW_GIT_COMMIT, MLFLOW_PROJECT_ENTRY_POINT + + +@pytest.fixture +def mock_store(): + with mock.patch("mlflow.tracking.utils._get_store") as mock_get_store: + yield mock_get_store.return_value + + +@pytest.fixture +def mock_time(): + time = 1552319350.244724 + with mock.patch("time.time", return_value=time): + yield time + + +def test_client_create_run(mock_store, mock_time): + + experiment_id = mock.Mock() + + MlflowClient().create_run(experiment_id) + + mock_store.create_run.assert_called_once_with( + experiment_id=experiment_id, + user_id="unknown", + start_time=int(mock_time * 1000), + tags=[] + ) + + +def test_client_create_run_overrides(mock_store): + + experiment_id = mock.Mock() + user = mock.Mock() + start_time = mock.Mock() + tags = { + MLFLOW_USER: user, + MLFLOW_PARENT_RUN_ID: mock.Mock(), + MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.JOB), + MLFLOW_SOURCE_NAME: mock.Mock(), + MLFLOW_PROJECT_ENTRY_POINT: mock.Mock(), + MLFLOW_GIT_COMMIT: mock.Mock(), + "other-key": "other-value" + } + + MlflowClient().create_run(experiment_id, start_time, tags) + + mock_store.create_run.assert_called_once_with( + experiment_id=experiment_id, + user_id=user, + start_time=start_time, + tags=[RunTag(key, value) for key, value in tags.items()], + ) + mock_store.reset_mock() + parent_run_id = "mock-parent-run-id" + MlflowClient().create_run(experiment_id, start_time, tags) + mock_store.create_run.assert_called_once_with( + experiment_id=experiment_id, + user_id=user, + start_time=start_time, + tags=[RunTag(key, value) for key, value in tags.items()] + ) + + +def test_client_search_runs_defaults(mock_store): + MlflowClient().search_runs([1, 2, 3]) + mock_store.search_runs.assert_called_once_with(experiment_ids=[1, 2, 3], + filter_string="", + run_view_type=ViewType.ACTIVE_ONLY, + max_results=SEARCH_MAX_RESULTS_DEFAULT, + order_by=None, + page_token=None) + + +def test_client_search_runs_filter(mock_store): + MlflowClient().search_runs(["a", "b", "c"], "my filter") + mock_store.search_runs.assert_called_once_with(experiment_ids=["a", "b", "c"], + filter_string="my filter", + run_view_type=ViewType.ACTIVE_ONLY, + max_results=SEARCH_MAX_RESULTS_DEFAULT, + order_by=None, + page_token=None) + + +def test_client_search_runs_view_type(mock_store): + MlflowClient().search_runs(["a", "b", "c"], "my filter", ViewType.DELETED_ONLY) + mock_store.search_runs.assert_called_once_with(experiment_ids=["a", "b", "c"], + filter_string="my filter", + run_view_type=ViewType.DELETED_ONLY, + max_results=SEARCH_MAX_RESULTS_DEFAULT, + order_by=None, + page_token=None) + + +def test_client_search_runs_max_results(mock_store): + MlflowClient().search_runs([5], "my filter", ViewType.ALL, 2876) + mock_store.search_runs.assert_called_once_with(experiment_ids=[5], + filter_string="my filter", + run_view_type=ViewType.ALL, + max_results=2876, + order_by=None, + page_token=None) + + +def test_client_search_runs_int_experiment_id(mock_store): + MlflowClient().search_runs(123) + mock_store.search_runs.assert_called_once_with(experiment_ids=[123], + filter_string="", + run_view_type=ViewType.ACTIVE_ONLY, + max_results=SEARCH_MAX_RESULTS_DEFAULT, + order_by=None, + page_token=None) + + +def test_client_search_runs_string_experiment_id(mock_store): + MlflowClient().search_runs("abc") + mock_store.search_runs.assert_called_once_with(experiment_ids=["abc"], + filter_string="", + run_view_type=ViewType.ACTIVE_ONLY, + max_results=SEARCH_MAX_RESULTS_DEFAULT, + order_by=None, + page_token=None) + + +def test_client_search_runs_order_by(mock_store): + MlflowClient().search_runs([5], order_by=["a", "b"]) + mock_store.search_runs.assert_called_once_with(experiment_ids=[5], + filter_string="", + run_view_type=ViewType.ACTIVE_ONLY, + max_results=SEARCH_MAX_RESULTS_DEFAULT, + order_by=["a", "b"], + page_token=None) + + +def test_client_search_runs_page_token(mock_store): + MlflowClient().search_runs([5], page_token="blah") + mock_store.search_runs.assert_called_once_with(experiment_ids=[5], + filter_string="", + run_view_type=ViewType.ACTIVE_ONLY, + max_results=SEARCH_MAX_RESULTS_DEFAULT, + order_by=None, + page_token="blah") diff --git a/tests/tracking/test_fluent.py b/tests/tracking/test_fluent.py new file mode 100644 index 0000000000000..5bdad5a882860 --- /dev/null +++ b/tests/tracking/test_fluent.py @@ -0,0 +1,540 @@ +import os +import random +import uuid + +import pytest +import mock +import numpy as np +import pandas as pd +from six.moves import reload_module as reload + +import mlflow +from mlflow.entities import LifecycleStage, SourceType, Run, RunInfo, RunData, RunStatus, Metric, \ + Param, RunTag, ViewType +from mlflow.exceptions import MlflowException +from mlflow.store.abstract_store import PagedList +from mlflow.tracking.client import MlflowClient +import mlflow.tracking.fluent +import mlflow.tracking.context.registry +from mlflow.tracking.fluent import start_run, _get_experiment_id, _get_experiment_id_from_env, \ + search_runs, _EXPERIMENT_NAME_ENV_VAR, _EXPERIMENT_ID_ENV_VAR, _RUN_ID_ENV_VAR, \ + _get_paginated_runs, NUM_RUNS_PER_PAGE_PANDAS, SEARCH_MAX_RESULTS_PANDAS +from mlflow.utils.file_utils import TempDir +from mlflow.utils import mlflow_tags + + +class HelperEnv: + @classmethod + def assert_values(cls, exp_id, name): + assert os.environ.get(_EXPERIMENT_NAME_ENV_VAR) == name + assert os.environ.get(_EXPERIMENT_ID_ENV_VAR) == exp_id + + @classmethod + def set_values(cls, id=None, name=None): + if id: + os.environ[_EXPERIMENT_ID_ENV_VAR] = str(id) + elif os.environ.get(_EXPERIMENT_ID_ENV_VAR): + del os.environ[_EXPERIMENT_ID_ENV_VAR] + + if name: + os.environ[_EXPERIMENT_NAME_ENV_VAR] = str(name) + elif os.environ.get(_EXPERIMENT_NAME_ENV_VAR): + del os.environ[_EXPERIMENT_NAME_ENV_VAR] + + +def create_run(run_id="", exp_id="", uid="", start=0, metrics=None, params=None, tags=None, + status=RunStatus.FINISHED, a_uri=None): + return Run( + RunInfo( + run_uuid=run_id, + run_id=run_id, + experiment_id=exp_id, + user_id=uid, + status=status, + start_time=start, + end_time=0, + lifecycle_stage=LifecycleStage.ACTIVE, + artifact_uri=a_uri + ), RunData( + metrics=metrics, + params=params, + tags=tags + )) + + +@pytest.fixture(autouse=True) +def reset_experiment_id(): + """ + This fixture resets the active experiment id *after* the execution of the test case in which + its included + """ + yield + HelperEnv.set_values() + mlflow.tracking.fluent._active_experiment_id = None + + +@pytest.fixture(autouse=True) +def reload_context_registry(): + """Reload the context registry module to clear caches.""" + reload(mlflow.tracking.context.registry) + + +def test_get_experiment_id_from_env(): + # When no env variables are set + HelperEnv.assert_values(None, None) + assert _get_experiment_id_from_env() is None + + # set only ID + random_id = random.randint(1, 1e6) + HelperEnv.set_values(id=random_id) + HelperEnv.assert_values(str(random_id), None) + assert _get_experiment_id_from_env() == str(random_id) + + # set only name + with TempDir(chdr=True): + name = "random experiment %d" % random.randint(1, 1e6) + exp_id = mlflow.create_experiment(name) + assert exp_id is not None + HelperEnv.set_values(name=name) + HelperEnv.assert_values(None, name) + assert _get_experiment_id_from_env() == exp_id + + # set both: assert that name variable takes precedence + with TempDir(chdr=True): + name = "random experiment %d" % random.randint(1, 1e6) + exp_id = mlflow.create_experiment(name) + assert exp_id is not None + random_id = random.randint(1, 1e6) + HelperEnv.set_values(name=name, id=random_id) + HelperEnv.assert_values(str(random_id), name) + assert _get_experiment_id_from_env() == exp_id + + +def test_get_experiment_id_with_active_experiment_returns_active_experiment_id(): + # Create a new experiment and set that as active experiment + with TempDir(chdr=True): + name = "Random experiment %d" % random.randint(1, 1e6) + exp_id = mlflow.create_experiment(name) + assert exp_id is not None + mlflow.set_experiment(name) + assert _get_experiment_id() == exp_id + + +def test_get_experiment_id_with_no_active_experiments_returns_zero(): + assert _get_experiment_id() == "0" + + +def test_get_experiment_id_in_databricks_detects_notebook_id_by_default(): + notebook_id = 768 + + with mock.patch("mlflow.tracking.fluent.is_in_databricks_notebook") as notebook_detection_mock,\ + mock.patch("mlflow.tracking.fluent.get_notebook_id") as notebook_id_mock: + notebook_detection_mock.return_value = True + notebook_id_mock.return_value = notebook_id + assert _get_experiment_id() == notebook_id + + +def test_get_experiment_id_in_databricks_with_active_experiment_returns_active_experiment_id(): + with TempDir(chdr=True): + exp_name = "random experiment %d" % random.randint(1, 1e6) + exp_id = mlflow.create_experiment(exp_name) + mlflow.set_experiment(exp_name) + notebook_id = str(int(exp_id) + 73) + + with mock.patch("mlflow.tracking.fluent.is_in_databricks_notebook") as notebook_detection_mock,\ + mock.patch("mlflow.tracking.fluent.get_notebook_id") as notebook_id_mock: + notebook_detection_mock.return_value = True + notebook_id_mock.return_value = notebook_id + + assert _get_experiment_id() != notebook_id + assert _get_experiment_id() == exp_id + + +def test_get_experiment_id_in_databricks_with_experiment_defined_in_env_returns_env_experiment_id(): + with TempDir(chdr=True): + exp_name = "random experiment %d" % random.randint(1, 1e6) + exp_id = mlflow.create_experiment(exp_name) + notebook_id = str(int(exp_id) + 73) + HelperEnv.set_values(id=exp_id) + + with mock.patch("mlflow.tracking.fluent.is_in_databricks_notebook") as notebook_detection_mock,\ + mock.patch("mlflow.tracking.fluent.get_notebook_id") as notebook_id_mock: + notebook_detection_mock.side_effect = lambda *args, **kwargs: True + notebook_id_mock.side_effect = lambda *args, **kwargs: notebook_id + + assert _get_experiment_id() != notebook_id + assert _get_experiment_id() == exp_id + + +@pytest.fixture +def empty_active_run_stack(): + with mock.patch("mlflow.tracking.fluent._active_run_stack", []): + yield + + +def is_from_run(active_run, run): + return active_run.info == run.info and active_run.data == run.data + + +def test_start_run_defaults(empty_active_run_stack): + + mock_experiment_id = mock.Mock() + experiment_id_patch = mock.patch( + "mlflow.tracking.fluent._get_experiment_id", return_value=mock_experiment_id + ) + databricks_notebook_patch = mock.patch( + "mlflow.tracking.fluent.is_in_databricks_notebook", return_value=False + ) + mock_user = mock.Mock() + user_patch = mock.patch( + "mlflow.tracking.context.default_context._get_user", return_value=mock_user + ) + mock_source_name = mock.Mock() + source_name_patch = mock.patch( + "mlflow.tracking.context.default_context._get_source_name", return_value=mock_source_name + ) + source_type_patch = mock.patch( + "mlflow.tracking.context.default_context._get_source_type", return_value=SourceType.NOTEBOOK + ) + mock_source_version = mock.Mock() + source_version_patch = mock.patch( + "mlflow.tracking.context.git_context._get_source_version", return_value=mock_source_version + ) + + expected_tags = { + mlflow_tags.MLFLOW_USER: mock_user, + mlflow_tags.MLFLOW_SOURCE_NAME: mock_source_name, + mlflow_tags.MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK), + mlflow_tags.MLFLOW_GIT_COMMIT: mock_source_version + } + + create_run_patch = mock.patch.object(MlflowClient, "create_run") + + with experiment_id_patch, databricks_notebook_patch, user_patch, source_name_patch, \ + source_type_patch, source_version_patch, create_run_patch: + active_run = start_run() + MlflowClient.create_run.assert_called_once_with( + experiment_id=mock_experiment_id, + tags=expected_tags + ) + assert is_from_run(active_run, MlflowClient.create_run.return_value) + + +def test_start_run_defaults_databricks_notebook(empty_active_run_stack): + + mock_experiment_id = mock.Mock() + experiment_id_patch = mock.patch( + "mlflow.tracking.fluent._get_experiment_id", return_value=mock_experiment_id + ) + databricks_notebook_patch = mock.patch( + "mlflow.utils.databricks_utils.is_in_databricks_notebook", return_value=True + ) + mock_user = mock.Mock() + user_patch = mock.patch( + "mlflow.tracking.context.default_context._get_user", return_value=mock_user + ) + mock_source_version = mock.Mock() + source_version_patch = mock.patch( + "mlflow.tracking.context.git_context._get_source_version", return_value=mock_source_version + ) + mock_notebook_id = mock.Mock() + notebook_id_patch = mock.patch( + "mlflow.utils.databricks_utils.get_notebook_id", return_value=mock_notebook_id + ) + mock_notebook_path = mock.Mock() + notebook_path_patch = mock.patch( + "mlflow.utils.databricks_utils.get_notebook_path", return_value=mock_notebook_path + ) + mock_webapp_url = mock.Mock() + webapp_url_patch = mock.patch( + "mlflow.utils.databricks_utils.get_webapp_url", return_value=mock_webapp_url + ) + + expected_tags = { + mlflow_tags.MLFLOW_USER: mock_user, + mlflow_tags.MLFLOW_SOURCE_NAME: mock_notebook_path, + mlflow_tags.MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK), + mlflow_tags.MLFLOW_GIT_COMMIT: mock_source_version, + mlflow_tags.MLFLOW_DATABRICKS_NOTEBOOK_ID: mock_notebook_id, + mlflow_tags.MLFLOW_DATABRICKS_NOTEBOOK_PATH: mock_notebook_path, + mlflow_tags.MLFLOW_DATABRICKS_WEBAPP_URL: mock_webapp_url + } + + create_run_patch = mock.patch.object(MlflowClient, "create_run") + + with experiment_id_patch, databricks_notebook_patch, user_patch, source_version_patch, \ + notebook_id_patch, notebook_path_patch, webapp_url_patch, create_run_patch: + active_run = start_run() + MlflowClient.create_run.assert_called_once_with( + experiment_id=mock_experiment_id, + tags=expected_tags + ) + assert is_from_run(active_run, MlflowClient.create_run.return_value) + + +def test_start_run_with_parent(): + + parent_run = mock.Mock() + mock_experiment_id = mock.Mock() + mock_source_name = mock.Mock() + mock_run_name = mock.Mock() + + active_run_stack_patch = mock.patch("mlflow.tracking.fluent._active_run_stack", [parent_run]) + + databricks_notebook_patch = mock.patch( + "mlflow.tracking.fluent.is_in_databricks_notebook", return_value=False + ) + mock_user = mock.Mock() + user_patch = mock.patch( + "mlflow.tracking.context.default_context._get_user", return_value=mock_user + ) + source_name_patch = mock.patch( + "mlflow.tracking.context.default_context._get_source_name", return_value=mock_source_name + ) + + expected_tags = { + mlflow_tags.MLFLOW_USER: mock_user, + mlflow_tags.MLFLOW_SOURCE_NAME: mock_source_name, + mlflow_tags.MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.LOCAL), + mlflow_tags.MLFLOW_PARENT_RUN_ID: parent_run.info.run_id + } + + create_run_patch = mock.patch.object(MlflowClient, "create_run") + + with databricks_notebook_patch, active_run_stack_patch, create_run_patch, user_patch, \ + source_name_patch: + active_run = start_run( + experiment_id=mock_experiment_id, nested=True + ) + MlflowClient.create_run.assert_called_once_with( + experiment_id=mock_experiment_id, + tags=expected_tags + ) + assert is_from_run(active_run, MlflowClient.create_run.return_value) + + +def test_start_run_with_parent_non_nested(): + with mock.patch("mlflow.tracking.fluent._active_run_stack", [mock.Mock()]): + with pytest.raises(Exception): + start_run() + + +def test_start_run_existing_run(empty_active_run_stack): + mock_run = mock.Mock() + mock_run.info.lifecycle_stage = LifecycleStage.ACTIVE + + run_id = uuid.uuid4().hex + + with mock.patch.object(MlflowClient, "get_run", return_value=mock_run): + active_run = start_run(run_id) + + assert is_from_run(active_run, mock_run) + MlflowClient.get_run.assert_called_once_with(run_id) + + +def test_start_run_existing_run_from_environment(empty_active_run_stack): + mock_run = mock.Mock() + mock_run.info.lifecycle_stage = LifecycleStage.ACTIVE + + run_id = uuid.uuid4().hex + env_patch = mock.patch.dict("os.environ", {_RUN_ID_ENV_VAR: run_id}) + + with env_patch, mock.patch.object(MlflowClient, "get_run", return_value=mock_run): + active_run = start_run() + + assert is_from_run(active_run, mock_run) + MlflowClient.get_run.assert_called_once_with(run_id) + + +def test_start_run_existing_run_deleted(empty_active_run_stack): + mock_run = mock.Mock() + mock_run.info.lifecycle_stage = LifecycleStage.DELETED + + run_id = uuid.uuid4().hex + + with mock.patch.object(MlflowClient, "get_run", return_value=mock_run): + with pytest.raises(MlflowException): + start_run(run_id) + + +def test_search_runs_attributes(): + runs = [create_run(status=RunStatus.FINISHED, a_uri="dbfs:/test", run_id='abc', exp_id="123"), + create_run(status=RunStatus.SCHEDULED, a_uri="dbfs:/test2", run_id='def', exp_id="321")] + with mock.patch('mlflow.tracking.fluent._get_paginated_runs', return_value=runs): + pdf = search_runs() + data = {'status': [RunStatus.FINISHED, RunStatus.SCHEDULED], + 'artifact_uri': ["dbfs:/test", "dbfs:/test2"], + 'run_id': ['abc', 'def'], + 'experiment_id': ["123", "321"]} + expected_df = pd.DataFrame(data) + pd.testing.assert_frame_equal(pdf, expected_df, check_like=True, check_frame_type=False) + + +def test_search_runs_data(): + runs = [ + create_run( + metrics=[Metric("mse", 0.2, 0, 0)], + params=[Param("param", "value")], + tags=[RunTag("tag", "value")]), + create_run( + metrics=[Metric("mse", 0.6, 0, 0), Metric("loss", 1.2, 0, 5)], + params=[Param("param2", "val"), Param("k", "v")], + tags=[RunTag("tag2", "v2")])] + with mock.patch('mlflow.tracking.fluent._get_paginated_runs', return_value=runs): + pdf = search_runs() + data = { + 'status': [RunStatus.FINISHED]*2, + 'artifact_uri': [None]*2, + 'run_id': ['']*2, + 'experiment_id': [""]*2, + 'metrics.mse': [0.2, 0.6], + 'metrics.loss': [np.nan, 1.2], + 'params.param': ["value", None], + 'params.param2': [None, "val"], + 'params.k': [None, "v"], + 'tags.tag': ["value", None], + 'tags.tag2': [None, "v2"]} + expected_df = pd.DataFrame(data) + pd.testing.assert_frame_equal(pdf, expected_df, check_like=True, check_frame_type=False) + + +def test_search_runs_no_arguments(): + """" + When no experiment ID is specified, it should try to get the implicit one or + create a new experiment + """ + mock_experiment_id = mock.Mock() + experiment_id_patch = mock.patch("mlflow.tracking.fluent._get_experiment_id", + return_value=mock_experiment_id) + get_paginated_runs_patch = mock.patch('mlflow.tracking.fluent._get_paginated_runs', + return_value=[]) + with experiment_id_patch, get_paginated_runs_patch: + pdf = search_runs() + mlflow.tracking.fluent._get_paginated_runs.assert_called_once_with( + mock_experiment_id, '', ViewType.ACTIVE_ONLY, SEARCH_MAX_RESULTS_PANDAS, None + ) + + +def test_get_paginated_runs_lt_maxresults_onepage(): + """" + Number of runs is less than max_results and fits on one page, + so we only need to fetch one page. + """ + runs = [create_run() for i in range(5)] + tokenized_runs = PagedList(runs, "") + max_results = 50 + with mock.patch("mlflow.tracking.fluent.NUM_RUNS_PER_PAGE_PANDAS", 10): + with mock.patch.object(MlflowClient, "search_runs", return_value=tokenized_runs): + paginated_runs = _get_paginated_runs([], "", ViewType.ACTIVE_ONLY, max_results, None) + MlflowClient.search_runs.assert_called_once() + assert len(paginated_runs) == 5 + + +def test_get_paginated_runs_lt_maxresults_multipage(): + """" + Number of runs is less than max_results, but multiple pages are necessary to get all runs + """ + tokenized_runs = PagedList([create_run() for i in range(10)], "token") + no_token_runs = PagedList([create_run()], "") + max_results = 50 + with mock.patch("mlflow.tracking.fluent.NUM_RUNS_PER_PAGE_PANDAS", 10): + with mock.patch.object(MlflowClient, "search_runs"): + MlflowClient.search_runs.side_effect = [tokenized_runs, tokenized_runs, no_token_runs] + TOTAL_RUNS = 21 + + paginated_runs = _get_paginated_runs([], "", ViewType.ACTIVE_ONLY, max_results, None) + assert len(paginated_runs) == TOTAL_RUNS + + +def test_get_paginated_runs_eq_maxresults_blanktoken(): + """ + Runs returned are equal to max_results which are equal to a full number of pages. + The server might send a token back, or they might not (depending on if they know if + more runs exist). In this example, no token is sent back. + Expected behavior is to NOT query for more pages. + """ + # runs returned equal to max_results, blank token + runs = [create_run() for i in range(10)] + tokenized_runs = PagedList(runs, "") + no_token_runs = PagedList([], "") + max_results = 10 + with mock.patch("mlflow.tracking.fluent.NUM_RUNS_PER_PAGE_PANDAS", 10): + with mock.patch.object(MlflowClient, "search_runs"): + MlflowClient.search_runs.side_effect = [tokenized_runs, no_token_runs] + paginated_runs = _get_paginated_runs([], "", ViewType.ACTIVE_ONLY, max_results, None) + MlflowClient.search_runs.assert_called_once() + assert len(paginated_runs) == 10 + + +def test_get_paginated_runs_eq_maxresults_token(): + """ + Runs returned are equal to max_results which are equal to a full number of pages. + The server might send a token back, or they might not (depending on if they know if + more runs exist). In this example, a toke IS sent back. + Expected behavior is to NOT query for more pages. + """ + runs = [create_run() for i in range(10)] + tokenized_runs = PagedList(runs, "abc") + blank_runs = PagedList([], "") + max_results = 10 + with mock.patch("mlflow.tracking.fluent.NUM_RUNS_PER_PAGE_PANDAS", 10): + with mock.patch.object(MlflowClient, "search_runs"): + MlflowClient.search_runs.side_effect = [tokenized_runs, blank_runs] + paginated_runs = _get_paginated_runs([], "", ViewType.ACTIVE_ONLY, max_results, None) + MlflowClient.search_runs.assert_called_once() + assert len(paginated_runs) == 10 + + +def test_get_paginated_runs_gt_maxresults_multipage(): + """ + Number of runs that fit search criteria is greater than max_results. Multiple pages expected. + Expected to only get max_results number of results back. + """ + # should ask for and return the correct number of max_results + full_page_runs = PagedList([create_run() for i in range(8)], "abc") + partial_page = PagedList([create_run() for i in range(4)], "def") + max_results = 20 + with mock.patch("mlflow.tracking.fluent.NUM_RUNS_PER_PAGE_PANDAS", 8): + with mock.patch.object(MlflowClient, "search_runs"): + MlflowClient.search_runs.side_effect = [full_page_runs, full_page_runs, partial_page] + paginated_runs = _get_paginated_runs([12], "", ViewType.ACTIVE_ONLY, max_results, None) + calls = [mock.call([12], "", ViewType.ACTIVE_ONLY, 8, None, None), + mock.call([12], "", ViewType.ACTIVE_ONLY, 8, None, "abc"), + mock.call([12], "", ViewType.ACTIVE_ONLY, 20 % 8, None, "abc")] + MlflowClient.search_runs.assert_has_calls(calls) + assert len(paginated_runs) == 20 + + +def test_get_paginated_runs_gt_maxresults_onepage(): + """" + Number of runs that fit search criteria is greater than max_results. Only one page expected. + Expected to only get max_results number of results back. + """ + runs = [create_run() for i in range(10)] + tokenized_runs = PagedList(runs, "abc") + max_results = 10 + with mock.patch("mlflow.tracking.fluent.NUM_RUNS_PER_PAGE_PANDAS", 20): + with mock.patch.object(MlflowClient, "search_runs", return_value=tokenized_runs): + paginated_runs = _get_paginated_runs([123], "", ViewType.ACTIVE_ONLY, max_results, None) + MlflowClient.search_runs.assert_called_once_with( + [123], "", ViewType.ACTIVE_ONLY, max_results, None, None) + assert len(paginated_runs) == 10 + + +def test_delete_tag(): + """ + Confirm that fluent API delete tags actually works + :return: + """ + mlflow.set_tag('a', 'b') + run = MlflowClient().get_run(mlflow.active_run().info.run_id) + print(run.info.run_id) + assert 'a' in run.data.tags + mlflow.delete_tag('a') + run = MlflowClient().get_run(mlflow.active_run().info.run_id) + assert 'a' not in run.data.tags + with pytest.raises(MlflowException): + mlflow.delete_tag('a') + with pytest.raises(MlflowException): + mlflow.delete_tag('b') + mlflow.end_run() diff --git a/tests/tracking/test_rest_tracking.py b/tests/tracking/test_rest_tracking.py index 6d22789339e29..2253bc2a9c535 100644 --- a/tests/tracking/test_rest_tracking.py +++ b/tests/tracking/test_rest_tracking.py @@ -4,31 +4,29 @@ """ import mock -from multiprocessing import Process +from subprocess import Popen import os +import sys +import posixpath import pytest +from six.moves import urllib import socket +import shutil +from threading import Thread import time import tempfile -import unittest -from mlflow.server import app, FILE_STORE_ENV_VAR -from mlflow.entities import RunStatus +import mlflow.experiments +from mlflow.exceptions import MlflowException +from mlflow.entities import RunStatus, Metric, Param, RunTag, ViewType +from mlflow.server import BACKEND_STORE_URI_ENV_VAR, ARTIFACT_ROOT_ENV_VAR from mlflow.tracking import MlflowClient -from mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME, MLFLOW_PARENT_RUN_ID +from mlflow.utils.mlflow_tags import MLFLOW_USER, MLFLOW_RUN_NAME, MLFLOW_PARENT_RUN_ID, \ + MLFLOW_SOURCE_TYPE, MLFLOW_SOURCE_NAME, MLFLOW_PROJECT_ENTRY_POINT, MLFLOW_GIT_COMMIT +from mlflow.utils.file_utils import path_to_local_file_uri, local_file_uri_to_path +from tests.integration.utils import invoke_cli_runner - -LOCALHOST = '127.0.0.1' -SERVER_PORT = 0 - - -def _get_safe_port(): - """Returns an ephemeral port that is very likely to be free to bind to.""" - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.bind((LOCALHOST, 0)) - port = sock.getsockname()[1] - sock.close() - return port +from tests.helper_functions import LOCALHOST, get_safe_port def _await_server_up_or_die(port, timeout=60): @@ -56,39 +54,120 @@ def _await_server_down_or_die(process, timeout=60): """Waits until the local flask server process is terminated.""" print('Awaiting termination of server process...') start_time = time.time() - while process.is_alive() and time.time() - start_time < timeout: + + def wait(): + process.wait() + + Thread(target=wait).start() + while process.returncode is None and time.time() - start_time < timeout: time.sleep(0.5) - if process.is_alive(): + if process.returncode is None: raise Exception('Server failed to shutdown after %s seconds' % timeout) -@pytest.fixture(scope="module", autouse=True) -def init_and_tear_down_server(request): +def _init_server(backend_uri, root_artifact_uri): """ - Once per run of the entire set of tests, we create a new server, and - clean it up at the end. + Launch a new REST server using the tracking store specified by backend_uri and root artifact + directory specified by root_artifact_uri. + :returns A tuple (url, process) containing the string URL of the server and a handle to the + server process (a multiprocessing.Process object). """ - global SERVER_PORT - SERVER_PORT = _get_safe_port() - file_store_path = tempfile.mkdtemp("test_rest_tracking_file_store") - env = {FILE_STORE_ENV_VAR: file_store_path} + mlflow.set_tracking_uri(None) + server_port = get_safe_port() + env = { + BACKEND_STORE_URI_ENV_VAR: backend_uri, + ARTIFACT_ROOT_ENV_VAR: path_to_local_file_uri( + tempfile.mkdtemp(dir=local_file_uri_to_path(root_artifact_uri))), + } with mock.patch.dict(os.environ, env): - process = Process(target=lambda: app.run(LOCALHOST, SERVER_PORT)) - process.start() - _await_server_up_or_die(SERVER_PORT) + cmd = ["python", + "-c", + 'from mlflow.server import app; app.run("{hostname}", {port})'.format( + hostname=LOCALHOST, port=server_port)] + process = Popen(cmd) + + _await_server_up_or_die(server_port) + url = "http://{hostname}:{port}".format(hostname=LOCALHOST, port=server_port) + print("Launching tracking server against backend URI %s. Server URL: %s" % (backend_uri, url)) + return url, process + + +# Root directory for all stores (backend or artifact stores) created during this suite +SUITE_ROOT_DIR = tempfile.mkdtemp("test_rest_tracking") +# Root directory for all artifact stores created during this suite +SUITE_ARTIFACT_ROOT_DIR = tempfile.mkdtemp(suffix="artifacts", dir=SUITE_ROOT_DIR) + + +def _get_sqlite_uri(): + path = path_to_local_file_uri(os.path.join(SUITE_ROOT_DIR, "test-database.bd")) + path = path[len("file://"):] + + # NB: It looks like windows and posix have different requirements on number of slashes for + # whatever reason. Windows needs uri like 'sqlite:///C:/path/to/my/file' whereas posix expects + # sqlite://///path/to/my/file + prefix = "sqlite://" if sys.platform == "win32" else "sqlite:////" + return prefix + path - # Yielding here causes pytest to resume execution at the end of all tests. + +# Backend store URIs to test against +BACKEND_URIS = [ + _get_sqlite_uri(), # SqlAlchemy + path_to_local_file_uri(os.path.join(SUITE_ROOT_DIR, "file_store_root")), # FileStore +] + +# Map of backend URI to tuple (server URL, Process). We populate this map by constructing +# a server per backend URI +BACKEND_URI_TO_SERVER_URL_AND_PROC = { + uri: _init_server(backend_uri=uri, + root_artifact_uri=SUITE_ARTIFACT_ROOT_DIR) + for uri in BACKEND_URIS +} + + +def pytest_generate_tests(metafunc): + """ + Automatically parametrize each each fixture/test that depends on `backend_store_uri` with the + list of backend store URIs. + """ + if 'backend_store_uri' in metafunc.fixturenames: + metafunc.parametrize('backend_store_uri', BACKEND_URIS) + + +@pytest.fixture(scope="module", autouse=True) +def server_urls(): + """ + Clean up all servers created for testing in `pytest_generate_tests` + """ yield + for server_url, process in BACKEND_URI_TO_SERVER_URL_AND_PROC.values(): + print("Terminating server at %s..." % (server_url)) + print("type = ", type(process)) + process.terminate() + _await_server_down_or_die(process) + shutil.rmtree(SUITE_ROOT_DIR) - print("Terminating server...") - process.terminate() - _await_server_down_or_die(process) + +@pytest.fixture() +def tracking_server_uri(backend_store_uri): + url, _ = BACKEND_URI_TO_SERVER_URL_AND_PROC[backend_store_uri] + return url + + +@pytest.fixture() +def mlflow_client(tracking_server_uri): + """Provides an MLflow Tracking API client pointed at the local tracking server.""" + return MlflowClient(tracking_server_uri) @pytest.fixture() -def mlflow_client(): - """Provides an MLflow Tracking API client pointed at the local server.""" - return MlflowClient("%s:%s" % (LOCALHOST, SERVER_PORT)) +def cli_env(tracking_server_uri): + """Provides an environment for the MLflow CLI pointed at the local tracking server.""" + cli_env = { + "LC_ALL": "en_US.UTF-8", + "LANG": "en_US.UTF-8", + "MLFLOW_TRACKING_URI": tracking_server_uri, + } + return cli_env def test_create_get_list_experiment(mlflow_client): @@ -100,6 +179,14 @@ def test_create_get_list_experiment(mlflow_client): experiments = mlflow_client.list_experiments() assert set([e.name for e in experiments]) == {'My Experiment', 'Default'} + mlflow_client.delete_experiment(experiment_id) + assert set([e.name for e in mlflow_client.list_experiments()]) == {'Default'} + assert set([e.name for e in mlflow_client.list_experiments(ViewType.ACTIVE_ONLY)]) ==\ + {'Default'} + assert set([e.name for e in mlflow_client.list_experiments(ViewType.DELETED_ONLY)]) ==\ + {'My Experiment'} + assert set([e.name for e in mlflow_client.list_experiments(ViewType.ALL)]) == \ + {'My Experiment', 'Default'} def test_delete_restore_experiment(mlflow_client): @@ -111,6 +198,20 @@ def test_delete_restore_experiment(mlflow_client): assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active' +def test_delete_restore_experiment_cli(mlflow_client, cli_env): + experiment_name = "DeleteriousCLI" + invoke_cli_runner(mlflow.experiments.commands, + ['create', '--experiment-name', experiment_name], env=cli_env) + experiment_id = mlflow_client.get_experiment_by_name(experiment_name).experiment_id + assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active' + invoke_cli_runner(mlflow.experiments.commands, ['delete', '-x', str(experiment_id)], + env=cli_env) + assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'deleted' + invoke_cli_runner(mlflow.experiments.commands, ['restore', '-x', str(experiment_id)], + env=cli_env) + assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active' + + def test_rename_experiment(mlflow_client): experiment_id = mlflow_client.create_experiment('BadName') assert mlflow_client.get_experiment(experiment_id).name == 'BadName' @@ -118,85 +219,176 @@ def test_rename_experiment(mlflow_client): assert mlflow_client.get_experiment(experiment_id).name == 'GoodName' -def test_create_run_all_args(mlflow_client): - experiment_id = mlflow_client.create_experiment('Run A Lot') - expected_tags = {'my': 'tag', 'other': 'tag'} - created_run = mlflow_client.create_run( - experiment_id, user_id=123, run_name='My name', source_type='LOCAL', - source_name='Hello', entry_point_name='entry', start_time=456, - source_version='abc', tags=expected_tags, parent_run_id=7) - run_id = created_run.info.run_uuid +def test_rename_experiment_cli(mlflow_client, cli_env): + bad_experiment_name = "CLIBadName" + good_experiment_name = "CLIGoodName" + + invoke_cli_runner(mlflow.experiments.commands, ['create', '-n', bad_experiment_name], + env=cli_env) + experiment_id = mlflow_client.get_experiment_by_name(bad_experiment_name).experiment_id + assert mlflow_client.get_experiment(experiment_id).name == bad_experiment_name + invoke_cli_runner( + mlflow.experiments.commands, + ['rename', '--experiment-id', str(experiment_id), '--new-name', good_experiment_name], + env=cli_env) + assert mlflow_client.get_experiment(experiment_id).name == good_experiment_name + + +@pytest.mark.parametrize("parent_run_id_kwarg", [None, "my-parent-id"]) +def test_create_run_all_args(mlflow_client, parent_run_id_kwarg): + user = "username" + source_name = "Hello" + entry_point = "entry" + source_version = "abc" + create_run_kwargs = { + "start_time": 456, + "tags": { + MLFLOW_USER: user, + MLFLOW_SOURCE_TYPE: "LOCAL", + MLFLOW_SOURCE_NAME: source_name, + MLFLOW_PROJECT_ENTRY_POINT: entry_point, + MLFLOW_GIT_COMMIT: source_version, + MLFLOW_PARENT_RUN_ID: "7", + MLFLOW_RUN_NAME: "my name", + "my": "tag", + "other": "tag", + } + } + experiment_id = mlflow_client.create_experiment('Run A Lot (parent_run_id=%s)' + % (parent_run_id_kwarg)) + created_run = mlflow_client.create_run(experiment_id, **create_run_kwargs) + run_id = created_run.info.run_id print("Run id=%s" % run_id) - run = mlflow_client.get_run(run_id) - assert run.info.run_uuid == run_id - assert run.info.experiment_id == experiment_id - assert run.info.user_id == 123 - assert run.info.source_type == 'LOCAL' - assert run.info.source_name == 'Hello' - assert run.info.entry_point_name == 'entry' - assert run.info.start_time == 456 - assert run.info.source_version == 'abc' - actual_tags = {t.key: t.value for t in run.data.tags} - for tag in expected_tags: - assert tag in actual_tags - assert actual_tags.get(MLFLOW_RUN_NAME) == 'My name' - assert actual_tags.get(MLFLOW_PARENT_RUN_ID) == '7' - - assert mlflow_client.list_run_infos(experiment_id) == [run.info] + fetched_run = mlflow_client.get_run(run_id) + for run in [created_run, fetched_run]: + assert run.info.run_id == run_id + assert run.info.run_uuid == run_id + assert run.info.experiment_id == experiment_id + assert run.info.user_id == user + assert run.info.start_time == create_run_kwargs["start_time"] + for tag in create_run_kwargs["tags"]: + assert tag in run.data.tags + assert run.data.tags.get(MLFLOW_USER) == user + assert run.data.tags.get(MLFLOW_RUN_NAME) == "my name" + assert run.data.tags.get(MLFLOW_PARENT_RUN_ID) == parent_run_id_kwarg or "7" + assert mlflow_client.list_run_infos(experiment_id) == [run.info] def test_create_run_defaults(mlflow_client): experiment_id = mlflow_client.create_experiment('Run A Little') created_run = mlflow_client.create_run(experiment_id) - run_id = created_run.info.run_uuid + run_id = created_run.info.run_id run = mlflow_client.get_run(run_id) - assert run.info.run_uuid == run_id + assert run.info.run_id == run_id assert run.info.experiment_id == experiment_id - assert run.info.user_id is not None # we should pick some default + assert run.info.user_id == "unknown" -def test_log_metrics_params_tags(mlflow_client): +def test_log_metrics_params_tags(mlflow_client, backend_store_uri): experiment_id = mlflow_client.create_experiment('Oh My') created_run = mlflow_client.create_run(experiment_id) - run_id = created_run.info.run_uuid - mlflow_client.log_metric(run_id, 'metric', 123.456) + run_id = created_run.info.run_id + mlflow_client.log_metric(run_id, key='metric', value=123.456, timestamp=789, step=2) + mlflow_client.log_metric(run_id, key='stepless-metric', value=987.654, timestamp=321) + mlflow_client.log_param(run_id, 'param', 'value') + mlflow_client.set_tag(run_id, 'taggity', 'do-dah') + run = mlflow_client.get_run(run_id) + assert run.data.metrics.get('metric') == 123.456 + assert run.data.metrics.get('stepless-metric') == 987.654 + assert run.data.params.get('param') == 'value' + assert run.data.tags.get('taggity') == 'do-dah' + metric_history0 = mlflow_client.get_metric_history(run_id, "metric") + assert len(metric_history0) == 1 + metric0 = metric_history0[0] + assert metric0.key == "metric" + assert metric0.value == 123.456 + assert metric0.timestamp == 789 + assert metric0.step == 2 + metric_history1 = mlflow_client.get_metric_history(run_id, "stepless-metric") + assert len(metric_history1) == 1 + metric1 = metric_history1[0] + assert metric1.key == "stepless-metric" + assert metric1.value == 987.654 + assert metric1.timestamp == 321 + assert metric1.step == 0 + + +def test_delete_tag(mlflow_client, backend_store_uri): + experiment_id = mlflow_client.create_experiment('DeleteTagExperiment') + created_run = mlflow_client.create_run(experiment_id) + run_id = created_run.info.run_id + mlflow_client.log_metric(run_id, key='metric', value=123.456, timestamp=789, step=2) + mlflow_client.log_metric(run_id, key='stepless-metric', value=987.654, timestamp=321) mlflow_client.log_param(run_id, 'param', 'value') mlflow_client.set_tag(run_id, 'taggity', 'do-dah') run = mlflow_client.get_run(run_id) - metrics = {t.key: t.value for t in run.data.metrics} - params = {t.key: t.value for t in run.data.params} - tags = {t.key: t.value for t in run.data.tags} - assert metrics.get('metric') == 123.456 - assert params.get('param') == 'value' - assert tags.get('taggity') == 'do-dah' + assert 'taggity' in run.data.tags and run.data.tags['taggity'] == 'do-dah' + mlflow_client.delete_tag(run_id, 'taggity') + run = mlflow_client.get_run(run_id) + assert 'taggity' not in run.data.tags + with pytest.raises(MlflowException): + mlflow_client.delete_tag('fake_run_id', 'taggity') + with pytest.raises(MlflowException): + mlflow_client.delete_tag(run_id, 'fakeTag') + mlflow_client.delete_run(run_id) + with pytest.raises(MlflowException): + mlflow_client.delete_tag(run_id, 'taggity') + + +def test_log_batch(mlflow_client, backend_store_uri): + experiment_id = mlflow_client.create_experiment('Batch em up') + created_run = mlflow_client.create_run(experiment_id) + run_id = created_run.info.run_id + mlflow_client.log_batch( + run_id=run_id, + metrics=[Metric("metric", 123.456, 789, 3)], params=[Param("param", "value")], + tags=[RunTag("taggity", "do-dah")]) + run = mlflow_client.get_run(run_id) + assert run.data.metrics.get('metric') == 123.456 + assert run.data.params.get('param') == 'value' + assert run.data.tags.get('taggity') == 'do-dah' + metric_history = mlflow_client.get_metric_history(run_id, "metric") + assert len(metric_history) == 1 + metric = metric_history[0] + assert metric.key == "metric" + assert metric.value == 123.456 + assert metric.timestamp == 789 + assert metric.step == 3 def test_set_terminated_defaults(mlflow_client): experiment_id = mlflow_client.create_experiment('Terminator 1') created_run = mlflow_client.create_run(experiment_id) - run_id = created_run.info.run_uuid - assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'RUNNING' + run_id = created_run.info.run_id + assert mlflow_client.get_run(run_id).info.status == 'RUNNING' assert mlflow_client.get_run(run_id).info.end_time is None mlflow_client.set_terminated(run_id) - assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'FINISHED' + assert mlflow_client.get_run(run_id).info.status == 'FINISHED' assert mlflow_client.get_run(run_id).info.end_time <= int(time.time() * 1000) def test_set_terminated_status(mlflow_client): experiment_id = mlflow_client.create_experiment('Terminator 2') created_run = mlflow_client.create_run(experiment_id) - run_id = created_run.info.run_uuid - assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'RUNNING' + run_id = created_run.info.run_id + assert mlflow_client.get_run(run_id).info.status == 'RUNNING' assert mlflow_client.get_run(run_id).info.end_time is None mlflow_client.set_terminated(run_id, 'FAILED') - assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'FAILED' + assert mlflow_client.get_run(run_id).info.status == 'FAILED' assert mlflow_client.get_run(run_id).info.end_time <= int(time.time() * 1000) def test_artifacts(mlflow_client): experiment_id = mlflow_client.create_experiment('Art In Fact') + experiment_info = mlflow_client.get_experiment(experiment_id) + assert experiment_info.artifact_location.startswith( + path_to_local_file_uri(SUITE_ARTIFACT_ROOT_DIR)) + artifact_path = urllib.parse.urlparse(experiment_info.artifact_location).path + assert posixpath.split(artifact_path)[-1] == experiment_id + created_run = mlflow_client.create_run(experiment_id) - run_id = created_run.info.run_uuid + assert created_run.info.artifact_uri.startswith(experiment_info.artifact_location) + run_id = created_run.info.run_id src_dir = tempfile.mkdtemp('test_artifacts_src') src_file = os.path.join(src_dir, 'my.file') with open(src_file, 'w') as f: @@ -216,3 +408,18 @@ def test_artifacts(mlflow_client): dir_artifacts = mlflow_client.download_artifacts(run_id, 'dir') assert open('%s/my.file' % dir_artifacts, 'r').read() == 'Hello, World!' + + +def test_search_pagination(mlflow_client, backend_store_uri): + experiment_id = mlflow_client.create_experiment('search_pagination') + runs = [mlflow_client.create_run(experiment_id, start_time=1).info.run_id for _ in range(0, 10)] + runs = sorted(runs) + result = mlflow_client.search_runs([experiment_id], max_results=4, page_token=None) + assert [r.info.run_id for r in result] == runs[0:4] + assert result.token is not None + result = mlflow_client.search_runs([experiment_id], max_results=4, page_token=result.token) + assert [r.info.run_id for r in result] == runs[4:8] + assert result.token is not None + result = mlflow_client.search_runs([experiment_id], max_results=4, page_token=result.token) + assert [r.info.run_id for r in result] == runs[8:] + assert result.token is None diff --git a/tests/tracking/test_tracking.py b/tests/tracking/test_tracking.py index 217312696df35..9404f3ad8fd3a 100644 --- a/tests/tracking/test_tracking.py +++ b/tests/tracking/test_tracking.py @@ -1,16 +1,27 @@ +from __future__ import print_function import filecmp import os import random import tempfile +import time +import attrdict +import mock import pytest import mlflow from mlflow import tracking -from mlflow.entities import RunStatus +from mlflow.entities import RunStatus, LifecycleStage, Metric, Param, RunTag, ViewType from mlflow.exceptions import MlflowException +from mlflow.store.file_store import FileStore +from mlflow.protos.databricks_pb2 import ErrorCode, INVALID_PARAMETER_VALUE +from mlflow.tracking.client import MlflowClient from mlflow.tracking.fluent import start_run, end_run -from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID +from mlflow.utils.file_utils import local_file_uri_to_path +from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID, MLFLOW_USER, MLFLOW_SOURCE_NAME, \ + MLFLOW_SOURCE_TYPE +from mlflow.tracking.fluent import _RUN_ID_ENV_VAR + from tests.projects.utils import tracking_uri_mock @@ -29,7 +40,38 @@ def test_create_experiment(tracking_uri_mock): assert exp_id is not None -def test_set_experiment(tracking_uri_mock): +def test_create_experiment_with_duplicate_name(tracking_uri_mock): + name = "popular_name" + exp_id = mlflow.create_experiment(name) + + with pytest.raises(MlflowException): + mlflow.create_experiment(name) + + tracking.MlflowClient().delete_experiment(exp_id) + with pytest.raises(MlflowException): + mlflow.create_experiment(name) + + +def test_create_experiments_with_bad_names(): + # None for name + with pytest.raises(MlflowException) as e: + mlflow.create_experiment(None) + assert e.message.contains("Invalid experiment name: 'None'") + + # empty string name + with pytest.raises(MlflowException) as e: + mlflow.create_experiment("") + assert e.message.contains("Invalid experiment name: ''") + + +@pytest.mark.parametrize("name", [123, 0, -1.2, [], ["A"], {1: 2}]) +def test_create_experiments_with_bad_name_types(name): + with pytest.raises(MlflowException) as e: + mlflow.create_experiment(name) + assert e.message.contains("Invalid experiment name: %s. Expects a string." % name) + + +def test_set_experiment(tracking_uri_mock, reset_active_experiment): with pytest.raises(TypeError): mlflow.set_experiment() @@ -39,99 +81,308 @@ def test_set_experiment(tracking_uri_mock): with pytest.raises(Exception): mlflow.set_experiment("") - try: - name = "random_exp" - exp_id = mlflow.create_experiment(name) - mlflow.set_experiment(name) - run = start_run() + name = "random_exp" + exp_id = mlflow.create_experiment(name) + mlflow.set_experiment(name) + with start_run() as run: assert run.info.experiment_id == exp_id - end_run() - another_name = "another_experiment" - mlflow.set_experiment(another_name) - exp_id2 = mlflow.tracking.MlflowClient().get_experiment_by_name(another_name) - another_run = start_run() + another_name = "another_experiment" + mlflow.set_experiment(another_name) + exp_id2 = mlflow.tracking.MlflowClient().get_experiment_by_name(another_name) + with start_run() as another_run: assert another_run.info.experiment_id == exp_id2.experiment_id - end_run() - finally: - # Need to do this to clear active experiment to restore state - mlflow.tracking.fluent._active_experiment_id = None + + +def test_set_experiment_with_deleted_experiment_name(tracking_uri_mock): + name = "dead_exp" + mlflow.set_experiment(name) + with start_run() as run: + exp_id = run.info.experiment_id + + tracking.MlflowClient().delete_experiment(exp_id) + + with pytest.raises(MlflowException): + mlflow.set_experiment(name) + + +def test_list_experiments(tracking_uri_mock): + def _assert_exps(ids_to_lifecycle_stage, view_type_arg): + result = set([(exp.experiment_id, exp.lifecycle_stage) + for exp in client.list_experiments(view_type=view_type_arg)]) + assert result == set([(id, stage) for id, stage in ids_to_lifecycle_stage.items()]) + experiment_id = mlflow.create_experiment("exp_1") + assert experiment_id == '1' + client = tracking.MlflowClient() + _assert_exps({'0': LifecycleStage.ACTIVE, '1': LifecycleStage.ACTIVE}, ViewType.ACTIVE_ONLY) + _assert_exps({'0': LifecycleStage.ACTIVE, '1': LifecycleStage.ACTIVE}, ViewType.ALL) + _assert_exps({}, ViewType.DELETED_ONLY) + client.delete_experiment(experiment_id) + _assert_exps({'0': LifecycleStage.ACTIVE}, ViewType.ACTIVE_ONLY) + _assert_exps({'0': LifecycleStage.ACTIVE, '1': LifecycleStage.DELETED}, ViewType.ALL) + _assert_exps({'1': LifecycleStage.DELETED}, ViewType.DELETED_ONLY) + + +def test_set_experiment_with_zero_id(reset_mock, reset_active_experiment): + reset_mock(MlflowClient, "get_experiment_by_name", + mock.Mock(return_value=attrdict.AttrDict( + experiment_id=0, + lifecycle_stage=LifecycleStage.ACTIVE))) + reset_mock(MlflowClient, "create_experiment", mock.Mock()) + + mlflow.set_experiment("my_exp") + + MlflowClient.get_experiment_by_name.assert_called_once() + MlflowClient.create_experiment.assert_not_called() def test_start_run_context_manager(tracking_uri_mock): - first_run = start_run() - first_uuid = first_run.info.run_uuid - with first_run: + with start_run() as first_run: + first_uuid = first_run.info.run_id # Check that start_run() causes the run information to be persisted in the store persisted_run = tracking.MlflowClient().get_run(first_uuid) assert persisted_run is not None assert persisted_run.info == first_run.info finished_run = tracking.MlflowClient().get_run(first_uuid) - assert finished_run.info.status == RunStatus.FINISHED + assert finished_run.info.status == RunStatus.to_string(RunStatus.FINISHED) # Launch a separate run that fails, verify the run status is FAILED and the run UUID is # different - second_run = start_run() - assert second_run.info.run_uuid != first_uuid with pytest.raises(Exception): - with second_run: + with start_run() as second_run: + second_run_id = second_run.info.run_id raise Exception("Failing run!") - finished_run2 = tracking.MlflowClient().get_run(second_run.info.run_uuid) - assert finished_run2.info.status == RunStatus.FAILED + assert second_run_id != first_uuid + finished_run2 = tracking.MlflowClient().get_run(second_run_id) + assert finished_run2.info.status == RunStatus.to_string(RunStatus.FAILED) def test_start_and_end_run(tracking_uri_mock): # Use the start_run() and end_run() APIs without a `with` block, verify they work. - active_run = start_run() - mlflow.log_metric("name_1", 25) - end_run() - finished_run = tracking.MlflowClient().get_run(active_run.info.run_uuid) + + with start_run() as active_run: + mlflow.log_metric("name_1", 25) + finished_run = tracking.MlflowClient().get_run(active_run.info.run_id) # Validate metrics assert len(finished_run.data.metrics) == 1 - expected_pairs = {"name_1": 25} - for metric in finished_run.data.metrics: - assert expected_pairs[metric.key] == metric.value + assert finished_run.data.metrics["name_1"] == 25 + + +def test_metric_timestamp(tracking_uri_mock): + with mlflow.start_run() as active_run: + mlflow.log_metric("name_1", 25) + mlflow.log_metric("name_1", 30) + run_id = active_run.info.run_uuid + # Check that metric timestamps are between run start and finish + client = mlflow.tracking.MlflowClient() + history = client.get_metric_history(run_id, "name_1") + finished_run = client.get_run(run_id) + assert len(history) == 2 + assert all([ + m.timestamp >= finished_run.info.start_time and m.timestamp <= finished_run.info.end_time + for m in history + ]) + + +def test_log_batch(tracking_uri_mock, tmpdir): + expected_metrics = {"metric-key0": 1.0, "metric-key1": 4.0} + expected_params = {"param-key0": "param-val0", "param-key1": "param-val1"} + exact_expected_tags = {"tag-key0": "tag-val0", "tag-key1": "tag-val1"} + approx_expected_tags = set([MLFLOW_USER, MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE]) + + t = int(time.time()) + sorted_expected_metrics = sorted(expected_metrics.items(), key=lambda kv: kv[0]) + metrics = [Metric(key=key, value=value, timestamp=t, step=i) + for i, (key, value) in enumerate(sorted_expected_metrics)] + params = [Param(key=key, value=value) for key, value in expected_params.items()] + tags = [RunTag(key=key, value=value) for key, value in exact_expected_tags.items()] + + with start_run() as active_run: + run_id = active_run.info.run_id + mlflow.tracking.MlflowClient().log_batch(run_id=run_id, metrics=metrics, params=params, + tags=tags) + client = tracking.MlflowClient() + finished_run = client.get_run(run_id) + # Validate metrics + assert len(finished_run.data.metrics) == 2 + for key, value in finished_run.data.metrics.items(): + assert expected_metrics[key] == value + metric_history0 = client.get_metric_history(run_id, "metric-key0") + assert set([(m.value, m.timestamp, m.step) for m in metric_history0]) == set([ + (1.0, t, 0), + ]) + metric_history1 = client.get_metric_history(run_id, "metric-key1") + assert set([(m.value, m.timestamp, m.step) for m in metric_history1]) == set([ + (4.0, t, 1), + ]) + + # Validate tags (for automatically-set tags) + assert len(finished_run.data.tags) == len(exact_expected_tags) + len(approx_expected_tags) + for tag_key, tag_value in finished_run.data.tags.items(): + if tag_key in approx_expected_tags: + pass + else: + assert exact_expected_tags[tag_key] == tag_value + # Validate params + assert finished_run.data.params == expected_params + # test that log_batch works with fewer params + new_tags = {"1": "2", "3": "4", "5": "6"} + tags = [RunTag(key=key, value=value) for key, value in new_tags.items()] + client.log_batch(run_id=run_id, tags=tags) + finished_run_2 = client.get_run(run_id) + # Validate tags (for automatically-set tags) + assert len(finished_run_2.data.tags) == len(finished_run.data.tags) + 3 + for tag_key, tag_value in finished_run_2.data.tags.items(): + if tag_key in new_tags: + assert new_tags[tag_key] == tag_value def test_log_metric(tracking_uri_mock): - active_run = start_run() - run_uuid = active_run.info.run_uuid - with active_run: + with start_run() as active_run, mock.patch("time.time") as time_mock: + time_mock.side_effect = [123 for _ in range(100)] + run_id = active_run.info.run_id mlflow.log_metric("name_1", 25) mlflow.log_metric("name_2", -3) - mlflow.log_metric("name_1", 30) + mlflow.log_metric("name_1", 30, 5) + mlflow.log_metric("name_1", 40, -2) mlflow.log_metric("nested/nested/name", 40) - finished_run = tracking.MlflowClient().get_run(run_uuid) + finished_run = tracking.MlflowClient().get_run(run_id) # Validate metrics assert len(finished_run.data.metrics) == 3 expected_pairs = {"name_1": 30, "name_2": -3, "nested/nested/name": 40} - for metric in finished_run.data.metrics: - assert expected_pairs[metric.key] == metric.value + for key, value in finished_run.data.metrics.items(): + assert expected_pairs[key] == value + client = tracking.MlflowClient() + metric_history_name1 = client.get_metric_history(run_id, "name_1") + assert set([(m.value, m.timestamp, m.step) for m in metric_history_name1]) == set([ + (25, 123 * 1000, 0), + (30, 123 * 1000, 5), + (40, 123 * 1000, -2), + ]) + metric_history_name2 = client.get_metric_history(run_id, "name_2") + assert set([(m.value, m.timestamp, m.step) for m in metric_history_name2]) == set([ + (-3, 123 * 1000, 0), + ]) + + +def test_log_metrics_uses_millisecond_timestamp_resolution(tracking_uri_mock): + with start_run() as active_run, mock.patch("time.time") as time_mock: + time_mock.side_effect = [123 for _ in range(100)] + mlflow.log_metrics({ + "name_1": 25, + "name_2": -3, + }) + mlflow.log_metrics({ + "name_1": 30, + }) + mlflow.log_metrics({ + "name_1": 40, + }) + run_id = active_run.info.run_id + finished_run = tracking.MlflowClient().get_run(run_id) + client = tracking.MlflowClient() + metric_history_name1 = client.get_metric_history(run_id, "name_1") + assert set([(m.value, m.timestamp) for m in metric_history_name1]) == set([ + (25, 123 * 1000), + (30, 123 * 1000), + (40, 123 * 1000), + ]) + metric_history_name2 = client.get_metric_history(run_id, "name_2") + assert set([(m.value, m.timestamp) for m in metric_history_name2]) == set([ + (-3, 123 * 1000), + ]) + + +@pytest.mark.parametrize("step_kwarg", [None, -10, 5]) +def test_log_metrics_uses_common_timestamp_and_step_per_invocation(tracking_uri_mock, step_kwarg): + expected_metrics = {"name_1": 30, "name_2": -3, "nested/nested/name": 40} + with start_run() as active_run: + run_id = active_run.info.run_id + mlflow.log_metrics(expected_metrics, step=step_kwarg) + finished_run = tracking.MlflowClient().get_run(run_id) + # Validate metric key/values match what we expect, and that all metrics have the same timestamp + assert len(finished_run.data.metrics) == len(expected_metrics) + for key, value in finished_run.data.metrics.items(): + assert expected_metrics[key] == value + common_timestamp = finished_run.data._metric_objs[0].timestamp + expected_step = step_kwarg if step_kwarg is not None else 0 + for metric_obj in finished_run.data._metric_objs: + assert metric_obj.timestamp == common_timestamp + assert metric_obj.step == expected_step + + +@pytest.fixture +def get_store_mock(tmpdir): + with mock.patch("mlflow.store.file_store.FileStore.log_batch") as _get_store_mock: + yield _get_store_mock + + +def test_set_tags(tracking_uri_mock): + exact_expected_tags = {"name_1": "c", "name_2": "b", "nested/nested/name": 5} + approx_expected_tags = set([MLFLOW_USER, MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE]) + with start_run() as active_run: + run_id = active_run.info.run_id + mlflow.set_tags(exact_expected_tags) + finished_run = tracking.MlflowClient().get_run(run_id) + # Validate tags + assert len(finished_run.data.tags) == len(exact_expected_tags) + len(approx_expected_tags) + for tag_key, tag_val in finished_run.data.tags.items(): + if tag_key in approx_expected_tags: + pass + else: + assert str(exact_expected_tags[tag_key]) == tag_val def test_log_metric_validation(tracking_uri_mock): - active_run = start_run() - run_uuid = active_run.info.run_uuid - with active_run: - mlflow.log_metric("name_1", "apple") - finished_run = tracking.MlflowClient().get_run(run_uuid) + with start_run() as active_run: + run_id = active_run.info.run_id + with pytest.raises(MlflowException) as e: + mlflow.log_metric("name_1", "apple") + assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) + finished_run = tracking.MlflowClient().get_run(run_id) assert len(finished_run.data.metrics) == 0 def test_log_param(tracking_uri_mock): - print(tracking.get_tracking_uri()) - active_run = start_run() - run_uuid = active_run.info.run_uuid - with active_run: + with start_run() as active_run: + run_id = active_run.info.run_id mlflow.log_param("name_1", "a") mlflow.log_param("name_2", "b") - mlflow.log_param("name_1", "c") mlflow.log_param("nested/nested/name", 5) - finished_run = tracking.MlflowClient().get_run(run_uuid) + finished_run = tracking.MlflowClient().get_run(run_id) # Validate params - assert len(finished_run.data.params) == 3 - expected_pairs = {"name_1": "c", "name_2": "b", "nested/nested/name": "5"} - for param in finished_run.data.params: - assert expected_pairs[param.key] == param.value + assert finished_run.data.params == {"name_1": "a", "name_2": "b", "nested/nested/name": "5"} + + +def test_log_params(tracking_uri_mock): + expected_params = {"name_1": "c", "name_2": "b", "nested/nested/name": 5} + with start_run() as active_run: + run_id = active_run.info.run_id + mlflow.log_params(expected_params) + finished_run = tracking.MlflowClient().get_run(run_id) + # Validate params + assert finished_run.data.params == {"name_1": "c", "name_2": "b", "nested/nested/name": "5"} + + +def test_log_batch_validates_entity_names_and_values(tracking_uri_mock): + bad_kwargs = { + "metrics": [ + [Metric(key="../bad/metric/name", value=0.3, timestamp=3, step=0)], + [Metric(key="ok-name", value="non-numerical-value", timestamp=3, step=0)], + [Metric(key="ok-name", value=0.3, timestamp="non-numerical-timestamp", step=0)], + ], + "params": [[Param(key="../bad/param/name", value="my-val")]], + "tags": [[Param(key="../bad/tag/name", value="my-val")]], + } + with start_run() as active_run: + for kwarg, bad_values in bad_kwargs.items(): + for bad_kwarg_value in bad_values: + final_kwargs = { + "run_id": active_run.info.run_id, "metrics": [], "params": [], "tags": [], + } + final_kwargs[kwarg] = bad_kwarg_value + with pytest.raises(MlflowException) as e: + tracking.MlflowClient().log_batch(**final_kwargs) + assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_log_artifact(tracking_uri_mock): @@ -147,7 +398,8 @@ def test_log_artifact(tracking_uri_mock): artifact_parent_dirs = ["some_parent_dir", None] for parent_dir in artifact_parent_dirs: with start_run(): - run_artifact_dir = mlflow.get_artifact_uri() + artifact_uri = mlflow.get_artifact_uri() + run_artifact_dir = local_file_uri_to_path(artifact_uri) mlflow.log_artifact(path0, parent_dir) expected_dir = os.path.join(run_artifact_dir, parent_dir) \ if parent_dir is not None else run_artifact_dir @@ -157,7 +409,9 @@ def test_log_artifact(tracking_uri_mock): # Log multiple artifacts, verify they exist in the directory returned by get_artifact_uri for parent_dir in artifact_parent_dirs: with start_run(): - run_artifact_dir = mlflow.get_artifact_uri() + artifact_uri = mlflow.get_artifact_uri() + run_artifact_dir = local_file_uri_to_path(artifact_uri) + mlflow.log_artifacts(artifact_src_dir, parent_dir) # Check that the logged artifacts match expected_artifact_output_dir = os.path.join(run_artifact_dir, parent_dir) \ @@ -174,6 +428,7 @@ def test_uri_types(): assert utils._is_local_uri("mlruns") assert utils._is_local_uri("./mlruns") assert utils._is_local_uri("file:///foo/mlruns") + assert utils._is_local_uri("file:foo/mlruns") assert not utils._is_local_uri("https://whatever") assert not utils._is_local_uri("http://whatever") assert not utils._is_local_uri("databricks") @@ -199,40 +454,151 @@ def test_with_startrun(): t0 = int(time.time() * 1000) with mlflow.start_run() as active_run: assert mlflow.active_run() == active_run - run_id = active_run.info.run_uuid + run_id = active_run.info.run_id t1 = int(time.time() * 1000) run_info = mlflow.tracking._get_store().get_run(run_id).info - assert run_info.status == RunStatus.from_string("FINISHED") + assert run_info.status == "FINISHED" assert t0 <= run_info.end_time and run_info.end_time <= t1 assert mlflow.active_run() is None def test_parent_create_run(tracking_uri_mock): - parent_run = mlflow.start_run() - with pytest.raises(Exception, match='To start a nested run'): - mlflow.start_run() - child_run = mlflow.start_run(nested=True) - grand_child_run = mlflow.start_run(nested=True) + + with mlflow.start_run() as parent_run: + parent_run_id = parent_run.info.run_id + os.environ[_RUN_ID_ENV_VAR] = parent_run_id + with mlflow.start_run() as parent_run: + assert parent_run.info.run_id == parent_run_id + with pytest.raises(Exception, match='To start a nested run'): + mlflow.start_run() + with mlflow.start_run(nested=True) as child_run: + assert child_run.info.run_id != parent_run_id + with mlflow.start_run(nested=True) as grand_child_run: + pass def verify_has_parent_id_tag(child_id, expected_parent_id): tags = tracking.MlflowClient().get_run(child_id).data.tags - assert any([t.key == MLFLOW_PARENT_RUN_ID and t.value == expected_parent_id for t in tags]) - - verify_has_parent_id_tag(child_run.info.run_uuid, parent_run.info.run_uuid) - verify_has_parent_id_tag(grand_child_run.info.run_uuid, child_run.info.run_uuid) + assert tags[MLFLOW_PARENT_RUN_ID] == expected_parent_id - mlflow.end_run() - mlflow.end_run() - mlflow.end_run() + verify_has_parent_id_tag(child_run.info.run_id, parent_run.info.run_id) + verify_has_parent_id_tag(grand_child_run.info.run_id, child_run.info.run_id) assert mlflow.active_run() is None def test_start_deleted_run(): run_id = None with mlflow.start_run() as active_run: - run_id = active_run.info.run_uuid + run_id = active_run.info.run_id tracking.MlflowClient().delete_run(run_id) with pytest.raises(MlflowException, matches='because it is in the deleted state.'): - with mlflow.start_run(run_uuid=run_id): + with mlflow.start_run(run_id=run_id): pass assert mlflow.active_run() is None + + +def test_start_run_exp_id_0(tracking_uri_mock, reset_active_experiment): + mlflow.set_experiment("some-experiment") + # Create a run and verify that the current active experiment is the one we just set + with mlflow.start_run() as active_run: + exp_id = active_run.info.experiment_id + assert exp_id != FileStore.DEFAULT_EXPERIMENT_ID + assert MlflowClient().get_experiment(exp_id).name == "some-experiment" + # Set experiment ID to 0 when creating a run, verify that the specified experiment ID is honored + with mlflow.start_run(experiment_id=0) as active_run: + assert active_run.info.experiment_id == FileStore.DEFAULT_EXPERIMENT_ID + + +def test_get_artifact_uri_with_artifact_path_unspecified_returns_artifact_root_dir(): + with mlflow.start_run() as active_run: + assert mlflow.get_artifact_uri(artifact_path=None) == active_run.info.artifact_uri + + +def test_get_artifact_uri_uses_currently_active_run_id(): + artifact_path = "artifact" + with mlflow.start_run() as active_run: + assert mlflow.get_artifact_uri(artifact_path=artifact_path) == \ + tracking.artifact_utils.get_artifact_uri( + run_id=active_run.info.run_id, artifact_path=artifact_path) + + +def test_search_runs(tracking_uri_mock, reset_active_experiment): + mlflow.set_experiment("exp-for-search") + # Create a run and verify that the current active experiment is the one we just set + logged_runs = {} + with mlflow.start_run() as active_run: + logged_runs["first"] = active_run.info.run_id + mlflow.log_metric("m1", 0.001) + mlflow.log_metric("m2", 0.002) + mlflow.log_metric("m1", 0.002) + mlflow.log_param("p1", "a") + mlflow.set_tag("t1", "first-tag-val") + with mlflow.start_run() as active_run: + logged_runs["second"] = active_run.info.run_id + mlflow.log_metric("m1", 0.008) + mlflow.log_param("p2", "aa") + mlflow.set_tag("t2", "second-tag-val") + + def verify_runs(runs, expected_set): + assert set([r.info.run_id for r in runs]) == set([logged_runs[r] for r in expected_set]) + + experiment_id = MlflowClient().get_experiment_by_name("exp-for-search").experiment_id + + # 2 runs in this experiment + assert len(MlflowClient().list_run_infos(experiment_id, ViewType.ACTIVE_ONLY)) == 2 + + # 2 runs that have metric "m1" > 0.001 + runs = MlflowClient().search_runs([experiment_id], "metrics.m1 > 0.0001") + verify_runs(runs, ["first", "second"]) + + # 1 run with has metric "m1" > 0.002 + runs = MlflowClient().search_runs([experiment_id], "metrics.m1 > 0.002") + verify_runs(runs, ["second"]) + + # no runs with metric "m1" > 0.1 + runs = MlflowClient().search_runs([experiment_id], "metrics.m1 > 0.1") + verify_runs(runs, []) + + # 1 run with metric "m2" > 0 + runs = MlflowClient().search_runs([experiment_id], "metrics.m2 > 0") + verify_runs(runs, ["first"]) + + # 1 run each with param "p1" and "p2" + runs = MlflowClient().search_runs([experiment_id], "params.p1 = 'a'", ViewType.ALL) + verify_runs(runs, ["first"]) + runs = MlflowClient().search_runs([experiment_id], "params.p2 != 'a'", ViewType.ALL) + verify_runs(runs, ["second"]) + runs = MlflowClient().search_runs([experiment_id], "params.p2 = 'aa'", ViewType.ALL) + verify_runs(runs, ["second"]) + + # 1 run each with tag "t1" and "t2" + runs = MlflowClient().search_runs([experiment_id], "tags.t1 = 'first-tag-val'", ViewType.ALL) + verify_runs(runs, ["first"]) + runs = MlflowClient().search_runs([experiment_id], "tags.t2 != 'qwerty'", ViewType.ALL) + verify_runs(runs, ["second"]) + runs = MlflowClient().search_runs([experiment_id], "tags.t2 = 'second-tag-val'", ViewType.ALL) + verify_runs(runs, ["second"]) + + # delete "first" run + MlflowClient().delete_run(logged_runs["first"]) + runs = MlflowClient().search_runs([experiment_id], "params.p1 = 'a'", ViewType.ALL) + verify_runs(runs, ["first"]) + + runs = MlflowClient().search_runs([experiment_id], "params.p1 = 'a'", ViewType.DELETED_ONLY) + verify_runs(runs, ["first"]) + + runs = MlflowClient().search_runs([experiment_id], "params.p1 = 'a'", ViewType.ACTIVE_ONLY) + verify_runs(runs, []) + + +def test_search_runs_multiple_experiments(tracking_uri_mock, reset_active_experiment): + experiment_ids = [mlflow.create_experiment("exp__{}".format(id)) for id in range(1, 4)] + for eid in experiment_ids: + with mlflow.start_run(experiment_id=eid): + mlflow.log_metric("m0", 1) + mlflow.log_metric("m_{}".format(eid), 2) + + assert len(MlflowClient().search_runs(experiment_ids, "metrics.m0 > 0", ViewType.ALL)) == 3 + + assert len(MlflowClient().search_runs(experiment_ids, "metrics.m_1 > 0", ViewType.ALL)) == 1 + assert len(MlflowClient().search_runs(experiment_ids, "metrics.m_2 = 2", ViewType.ALL)) == 1 + assert len(MlflowClient().search_runs(experiment_ids, "metrics.m_3 < 4", ViewType.ALL)) == 1 diff --git a/tests/tracking/test_utils.py b/tests/tracking/test_utils.py index c050dfd585e1b..9d8f1bdd03ea1 100644 --- a/tests/tracking/test_utils.py +++ b/tests/tracking/test_utils.py @@ -1,28 +1,47 @@ import mock import os import pytest +from six.moves import reload_module as reload +import mlflow +from mlflow.store.dbmodels.db_types import DATABASE_ENGINES from mlflow.store.file_store import FileStore from mlflow.store.rest_store import RestStore +from mlflow.store.sqlalchemy_store import SqlAlchemyStore +from mlflow.tracking.registry import TrackingStoreRegistry from mlflow.tracking.utils import _get_store, _TRACKING_URI_ENV_VAR, _TRACKING_USERNAME_ENV_VAR, \ - _TRACKING_PASSWORD_ENV_VAR, _TRACKING_TOKEN_ENV_VAR, \ - _TRACKING_INSECURE_TLS_ENV_VAR, get_db_profile_from_uri + _TRACKING_PASSWORD_ENV_VAR, _TRACKING_TOKEN_ENV_VAR, _TRACKING_INSECURE_TLS_ENV_VAR, \ + get_db_profile_from_uri -def test_get_store_file_store(tmpdir): +def test_get_store_file_store(tmp_wkdir): env = {} with mock.patch.dict(os.environ, env): store = _get_store() assert isinstance(store, FileStore) - assert store.root_directory == os.path.abspath("mlruns") + assert os.path.abspath(store.root_directory) == os.path.abspath("mlruns") - # Make sure we look at the parameter... - store = _get_store(tmpdir.strpath) + +def test_get_store_file_store_from_arg(tmp_wkdir): + env = {} + with mock.patch.dict(os.environ, env): + store = _get_store("other/path") + assert isinstance(store, FileStore) + assert os.path.abspath(store.root_directory) == os.path.abspath("other/path") + + +@pytest.mark.parametrize("uri", ["other/path", "file:other/path"]) +def test_get_store_file_store_from_env(tmp_wkdir, uri): + env = { + _TRACKING_URI_ENV_VAR: uri + } + with mock.patch.dict(os.environ, env): + store = _get_store() assert isinstance(store, FileStore) - assert store.root_directory == tmpdir + assert os.path.abspath(store.root_directory) == os.path.abspath("other/path") -def test_get_store_basic_rest_store(tmpdir): +def test_get_store_basic_rest_store(): env = { _TRACKING_URI_ENV_VAR: "https://my-tracking-server:5050" } @@ -33,7 +52,7 @@ def test_get_store_basic_rest_store(tmpdir): assert store.get_host_creds().token is None -def test_get_store_rest_store_with_password(tmpdir): +def test_get_store_rest_store_with_password(): env = { _TRACKING_URI_ENV_VAR: "https://my-tracking-server:5050", _TRACKING_USERNAME_ENV_VAR: "Bob", @@ -47,7 +66,7 @@ def test_get_store_rest_store_with_password(tmpdir): assert store.get_host_creds().password == "Ross" -def test_get_store_rest_store_with_token(tmpdir): +def test_get_store_rest_store_with_token(): env = { _TRACKING_URI_ENV_VAR: "https://my-tracking-server:5050", _TRACKING_TOKEN_ENV_VAR: "my-token", @@ -58,7 +77,7 @@ def test_get_store_rest_store_with_token(tmpdir): assert store.get_host_creds().token == "my-token" -def test_get_store_rest_store_with_insecure(tmpdir): +def test_get_store_rest_store_with_insecure(): env = { _TRACKING_URI_ENV_VAR: "https://my-tracking-server:5050", _TRACKING_INSECURE_TLS_ENV_VAR: "true", @@ -69,7 +88,7 @@ def test_get_store_rest_store_with_insecure(tmpdir): assert store.get_host_creds().ignore_tls_verification -def test_get_store_rest_store_with_no_insecure(tmpdir): +def test_get_store_rest_store_with_no_insecure(): env = { _TRACKING_URI_ENV_VAR: "https://my-tracking-server:5050", _TRACKING_INSECURE_TLS_ENV_VAR: "false", @@ -89,7 +108,46 @@ def test_get_store_rest_store_with_no_insecure(tmpdir): assert not store.get_host_creds().ignore_tls_verification -def test_get_store_databricks(tmpdir): +@pytest.mark.parametrize("db_type", DATABASE_ENGINES) +def test_get_store_sqlalchemy_store(tmp_wkdir, db_type): + patch_create_engine = mock.patch("sqlalchemy.create_engine") + + uri = "{}://hostname/database".format(db_type) + env = { + _TRACKING_URI_ENV_VAR: uri + } + with mock.patch.dict(os.environ, env), patch_create_engine as mock_create_engine,\ + mock.patch("mlflow.store.sqlalchemy_store.SqlAlchemyStore._verify_schema"), \ + mock.patch("mlflow.store.sqlalchemy_store.SqlAlchemyStore._initialize_tables"): + store = _get_store() + assert isinstance(store, SqlAlchemyStore) + assert store.db_uri == uri + assert store.artifact_root_uri == "./mlruns" + + mock_create_engine.assert_called_once_with(uri) + + +@pytest.mark.parametrize("db_type", DATABASE_ENGINES) +def test_get_store_sqlalchemy_store_with_artifact_uri(tmp_wkdir, db_type): + patch_create_engine = mock.patch("sqlalchemy.create_engine") + uri = "{}://hostname/database".format(db_type) + env = { + _TRACKING_URI_ENV_VAR: uri + } + artifact_uri = "file:artifact/path" + + with mock.patch.dict(os.environ, env), patch_create_engine as mock_create_engine, \ + mock.patch("mlflow.store.sqlalchemy_store.SqlAlchemyStore._verify_schema"), \ + mock.patch("mlflow.store.sqlalchemy_store.SqlAlchemyStore._initialize_tables"): + store = _get_store(artifact_uri=artifact_uri) + assert isinstance(store, SqlAlchemyStore) + assert store.db_uri == uri + assert store.artifact_root_uri == artifact_uri + + mock_create_engine.assert_called_once_with(uri) + + +def test_get_store_databricks(): env = { _TRACKING_URI_ENV_VAR: "databricks", 'DATABRICKS_HOST': "https://my-tracking-server", @@ -102,7 +160,7 @@ def test_get_store_databricks(tmpdir): assert store.get_host_creds().token == "abcdef" -def test_get_store_databricks_profile(tmpdir): +def test_get_store_databricks_profile(): env = { _TRACKING_URI_ENV_VAR: "databricks://mycoolprofile", } @@ -116,5 +174,113 @@ def test_get_store_databricks_profile(tmpdir): assert 'mycoolprofile' in str(e_info.value) +def test_standard_store_registry_with_mocked_entrypoint(): + mock_entrypoint = mock.Mock() + mock_entrypoint.name = "mock-scheme" + + with mock.patch( + "entrypoints.get_group_all", return_value=[mock_entrypoint] + ): + # Entrypoints are registered at import time, so we need to reload the + # module to register the entrypoint given by the mocked + # extrypoints.get_group_all + reload(mlflow.tracking.utils) + + expected_standard_registry = { + '', + 'file', + 'http', + 'https', + 'postgresql', + 'mysql', + 'sqlite', + 'mssql', + 'databricks', + 'mock-scheme' + } + assert expected_standard_registry.issubset( + mlflow.tracking.utils._tracking_store_registry._registry.keys() + ) + + +@pytest.mark.large +def test_standard_store_registry_with_installed_plugin(tmp_wkdir): + """This test requires the package in tests/resources/mlflow-test-plugin to be installed""" + + reload(mlflow.tracking.utils) + assert "file-plugin" in mlflow.tracking.utils._tracking_store_registry._registry.keys() + + from mlflow_test_plugin import PluginFileStore + + env = { + _TRACKING_URI_ENV_VAR: "file-plugin:test-path", + } + with mock.patch.dict(os.environ, env): + plugin_file_store = mlflow.tracking.utils._get_store() + assert isinstance(plugin_file_store, PluginFileStore) + assert plugin_file_store.is_plugin + + +def test_plugin_registration(): + tracking_store = TrackingStoreRegistry() + + test_uri = "mock-scheme://fake-host/fake-path" + test_scheme = "mock-scheme" + + mock_plugin = mock.Mock() + tracking_store.register(test_scheme, mock_plugin) + assert test_scheme in tracking_store._registry + assert tracking_store.get_store(test_uri) == mock_plugin.return_value + mock_plugin.assert_called_once_with(store_uri=test_uri, artifact_uri=None) + + +def test_plugin_registration_via_entrypoints(): + mock_plugin_function = mock.Mock() + mock_entrypoint = mock.Mock(load=mock.Mock(return_value=mock_plugin_function)) + mock_entrypoint.name = "mock-scheme" + + with mock.patch( + "entrypoints.get_group_all", return_value=[mock_entrypoint] + ) as mock_get_group_all: + + tracking_store = TrackingStoreRegistry() + tracking_store.register_entrypoints() + + assert tracking_store.get_store("mock-scheme://") == mock_plugin_function.return_value + + mock_plugin_function.assert_called_once_with(store_uri="mock-scheme://", artifact_uri=None) + mock_get_group_all.assert_called_once_with("mlflow.tracking_store") + + +@pytest.mark.parametrize("exception", + [AttributeError("test exception"), + ImportError("test exception")]) +def test_handle_plugin_registration_failure_via_entrypoints(exception): + mock_entrypoint = mock.Mock(load=mock.Mock(side_effect=exception)) + mock_entrypoint.name = "mock-scheme" + + with mock.patch( + "entrypoints.get_group_all", return_value=[mock_entrypoint] + ) as mock_get_group_all: + + tracking_store = TrackingStoreRegistry() + + # Check that the raised warning contains the message from the original exception + with pytest.warns(UserWarning, match="test exception"): + tracking_store.register_entrypoints() + + mock_entrypoint.load.assert_called_once() + mock_get_group_all.assert_called_once_with("mlflow.tracking_store") + + +def test_get_store_for_unregistered_scheme(): + + tracking_store = TrackingStoreRegistry() + + with pytest.raises(mlflow.exceptions.MlflowException, + match="Unexpected URI scheme"): + tracking_store.get_store("unknown-scheme://") + + def test_get_db_profile_from_uri_casing(): assert get_db_profile_from_uri('databricks://aAbB') == 'aAbB' diff --git a/tests/utils/test_environment.py b/tests/utils/test_environment.py index 5d68fdc347559..e988a30bea7fa 100644 --- a/tests/utils/test_environment.py +++ b/tests/utils/test_environment.py @@ -1,14 +1,28 @@ import os +import pytest from mlflow.utils.environment import _mlflow_conda_env -def test_save(tmpdir): - filename = os.path.join(str(tmpdir), "conda_env.yml") - _mlflow_conda_env(filename, additional_conda_deps=["conda-dep-1=0.0.1", "conda-dep-2"], - additional_pip_deps=["pip-dep-1", "pip-dep2==0.1.0"]) - print("") - print("env start") - with open(filename) as f: - print(f.read()) - print("env end") +@pytest.fixture +def conda_env_path(tmpdir): + return os.path.join(tmpdir.strpath, "conda_env.yaml") + + +def test_mlflow_conda_env_returns_none_when_output_path_is_specified(conda_env_path): + env_creation_output = _mlflow_conda_env( + path=conda_env_path, + additional_conda_deps=["conda-dep-1=0.0.1", "conda-dep-2"], + additional_pip_deps=["pip-dep-1", "pip-dep2==0.1.0"]) + + assert env_creation_output is None + + +def test_mlflow_conda_env_returns_expected_env_dict_when_output_path_is_not_specified(): + conda_deps = ["conda-dep-1=0.0.1", "conda-dep-2"] + env = _mlflow_conda_env( + path=None, + additional_conda_deps=conda_deps) + + for conda_dep in conda_deps: + assert conda_dep in env["dependencies"] diff --git a/tests/utils/test_exception.py b/tests/utils/test_exception.py index 86fd511a9416d..ddc2b7a5041ca 100644 --- a/tests/utils/test_exception.py +++ b/tests/utils/test_exception.py @@ -1,6 +1,34 @@ -from mlflow.exceptions import ExecutionException +import json +from mlflow.exceptions import ExecutionException, RestException def test_execution_exception_string_repr(): exc = ExecutionException("Uh oh") assert str(exc) == "Uh oh" + json.loads(exc.serialize_as_json()) + + +def test_rest_exception_default_error_code(): + exc = RestException({"message": "something important."}) + assert "something important." in str(exc) + + +def test_rest_exception_error_code_is_not_none(): + error_string = "something important." + exc = RestException({"message": error_string}) + assert "None" not in error_string + assert "None" not in str(exc) + json.loads(exc.serialize_as_json()) + + +def test_rest_exception_without_message(): + exc = RestException({"my_property": "something important."}) + assert "something important." in str(exc) + json.loads(exc.serialize_as_json()) + + +def test_rest_exception_error_code_and_no_message(): + exc = RestException({"error_code": 2, "messages": "something important."}) + assert "something important." in str(exc) + assert "2" in str(exc) + json.loads(exc.serialize_as_json()) diff --git a/tests/utils/test_file_utils.py b/tests/utils/test_file_utils.py index b08b436ea218d..bcb4feaa3dbb1 100644 --- a/tests/utils/test_file_utils.py +++ b/tests/utils/test_file_utils.py @@ -10,10 +10,10 @@ import tarfile from mlflow.utils import file_utils -from mlflow.utils.file_utils import get_parent_dir +from mlflow.utils.file_utils import get_parent_dir, _copy_file_or_tree, TempDir from tests.projects.utils import TEST_PROJECT_DIR -from tests.helper_functions import random_int, random_file +from tests.helper_functions import random_int, random_file, safe_edit_yaml def test_yaml_read_and_write(tmpdir): @@ -25,7 +25,7 @@ def test_yaml_read_and_write(tmpdir): file_utils.write_yaml(temp_dir, yaml_file, data) read_data = file_utils.read_yaml(temp_dir, yaml_file) assert data == read_data - yaml_path = file_utils.build_path(temp_dir, yaml_file) + yaml_path = os.path.join(temp_dir, yaml_file) with codecs.open(yaml_path, encoding="utf-8") as handle: contents = handle.read() assert "!!python" not in contents @@ -33,6 +33,17 @@ def test_yaml_read_and_write(tmpdir): # representations of their byte sequences). assert u"中文" in contents + def edit_func(old_dict): + old_dict["more_text"] = u"西班牙语" + return old_dict + + assert "more_text" not in file_utils.read_yaml(temp_dir, yaml_file) + with safe_edit_yaml(temp_dir, yaml_file, edit_func): + editted_dict = file_utils.read_yaml(temp_dir, yaml_file) + assert "more_text" in editted_dict + assert editted_dict["more_text"] == u"西班牙语" + assert "more_text" not in file_utils.read_yaml(temp_dir, yaml_file) + def test_mkdir(tmpdir): temp_dir = str(tmpdir) @@ -78,3 +89,35 @@ def test_make_tarfile(tmpdir): def test_get_parent_dir(tmpdir): child_dir = tmpdir.join('dir').mkdir() assert str(tmpdir) == get_parent_dir(str(child_dir)) + + +def test_file_copy(): + with TempDir() as tmp: + file_path = tmp.path("test_file.txt") + copy_path = tmp.path("test_dir1/") + os.mkdir(copy_path) + with open(file_path, 'a') as f: + f.write("testing") + _copy_file_or_tree(file_path, copy_path, "") + assert filecmp.cmp(file_path, os.path.join(copy_path, "test_file.txt")) + + +def test_dir_create(): + with TempDir() as tmp: + file_path = tmp.path("test_file.txt") + create_dir = tmp.path("test_dir2/") + with open(file_path, 'a') as f: + f.write("testing") + name = _copy_file_or_tree(file_path, file_path, create_dir) + assert filecmp.cmp(file_path, name) + + +def test_dir_copy(): + with TempDir() as tmp: + dir_path = tmp.path("test_dir1/") + copy_path = tmp.path("test_dir2") + os.mkdir(dir_path) + with open(os.path.join(dir_path, "test_file.txt"), 'a') as f: + f.write("testing") + _copy_file_or_tree(dir_path, copy_path, "") + assert filecmp.dircmp(dir_path, copy_path) diff --git a/tests/utils/test_model_utils.py b/tests/utils/test_model_utils.py new file mode 100644 index 0000000000000..6d6da078fdc30 --- /dev/null +++ b/tests/utils/test_model_utils.py @@ -0,0 +1,61 @@ +import os + +import pytest +import sklearn.datasets as datasets +import sklearn.neighbors as knn + +import mlflow.sklearn +import mlflow.utils.model_utils as mlflow_model_utils +from mlflow.exceptions import MlflowException +from mlflow.models import Model +from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST +from mlflow.mleap import FLAVOR_NAME as MLEAP_FLAVOR_NAME + + +@pytest.fixture(scope="session") +def sklearn_knn_model(): + iris = datasets.load_iris() + X = iris.data[:, :2] # we only take the first two features. + y = iris.target + knn_model = knn.KNeighborsClassifier() + knn_model.fit(X, y) + return knn_model + + +@pytest.fixture +def model_path(tmpdir): + return os.path.join(str(tmpdir), "model") + + +def test_get_flavor_configuration_throws_exception_when_model_configuration_does_not_exist( + model_path): + with pytest.raises(MlflowException) as exc: + mlflow_model_utils._get_flavor_configuration( + model_path=model_path, flavor_name=mlflow.mleap.FLAVOR_NAME) + assert exc.error_code == RESOURCE_DOES_NOT_EXIST + + +def test_get_flavor_configuration_throws_exception_when_requested_flavor_is_missing( + model_path, sklearn_knn_model): + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=model_path) + + # The saved model contains the "sklearn" flavor, so this call should succeed + sklearn_flavor_config = mlflow_model_utils._get_flavor_configuration( + model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME) + assert sklearn_flavor_config is not None + + # The saved model does not contain the "mleap" flavor, so this call should fail + with pytest.raises(MlflowException) as exc: + mlflow_model_utils._get_flavor_configuration( + model_path=model_path, flavor_name=MLEAP_FLAVOR_NAME) + assert exc.error_code == RESOURCE_DOES_NOT_EXIST + + +def test_get_flavor_configuration_with_present_flavor_returns_expected_configuration( + sklearn_knn_model, model_path): + mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=model_path) + + sklearn_flavor_config = mlflow_model_utils._get_flavor_configuration( + model_path=model_path, flavor_name=mlflow.sklearn.FLAVOR_NAME) + model_config = Model.load(os.path.join(model_path, "MLmodel")) + assert sklearn_flavor_config == model_config.flavors[mlflow.sklearn.FLAVOR_NAME] diff --git a/tests/utils/test_proto_json_utils.py b/tests/utils/test_proto_json_utils.py index 6e8666c526a85..b427897f57ae6 100644 --- a/tests/utils/test_proto_json_utils.py +++ b/tests/utils/test_proto_json_utils.py @@ -1,12 +1,14 @@ import json -from mlflow.entities import Experiment +from mlflow.entities import Experiment, Metric from mlflow.protos.service_pb2 import Experiment as ProtoExperiment -from mlflow.utils.proto_json_utils import message_to_json, parse_dict +from mlflow.protos.service_pb2 import Metric as ProtoMetric + +from mlflow.utils.proto_json_utils import message_to_json, parse_dict, _stringify_all_experiment_ids def test_message_to_json(): - json_out = message_to_json(Experiment(123, "name", "arty", 'active').to_proto()) + json_out = message_to_json(Experiment("123", "name", "arty", 'active').to_proto()) assert json.loads(json_out) == { "experiment_id": "123", "name": "name", @@ -20,6 +22,43 @@ def test_parse_dict(): message = ProtoExperiment() parse_dict(in_json, message) experiment = Experiment.from_proto(message) - assert experiment.experiment_id == 123 + assert experiment.experiment_id == "123" + assert experiment.name == 'name' + assert experiment.artifact_location == '' + + +def test_parse_dict_int_as_string_backcompat(): + in_json = {"timestamp": "123"} + message = ProtoMetric() + parse_dict(in_json, message) + experiment = Metric.from_proto(message) + assert experiment.timestamp == 123 + + +def test_parse_legacy_experiment(): + in_json = {"experiment_id": 123, "name": "name", "unknown": "field"} + message = ProtoExperiment() + parse_dict(in_json, message) + experiment = Experiment.from_proto(message) + assert experiment.experiment_id == "123" assert experiment.name == 'name' assert experiment.artifact_location == '' + + +def test_back_compat(): + in_json = {"experiment_id": 123, + "name": "name", + "unknown": "field", + "experiment_ids": [1, 2, 3, 4, 5], + "things": {"experiment_id": 4, + "more_things": {"experiment_id": 7, "experiment_ids": [2, 3, 4, 5]}}} + + _stringify_all_experiment_ids(in_json) + exp_json = {"experiment_id": "123", + "name": "name", + "unknown": "field", + "experiment_ids": ["1", "2", "3", "4", "5"], + "things": {"experiment_id": "4", + "more_things": {"experiment_id": "7", + "experiment_ids": ["2", "3", "4", "5"]}}} + assert exp_json == in_json diff --git a/tests/utils/test_rest_utils.py b/tests/utils/test_rest_utils.py index 4fe79dda69f36..ce7486f183f9a 100644 --- a/tests/utils/test_rest_utils.py +++ b/tests/utils/test_rest_utils.py @@ -4,8 +4,9 @@ import numpy import pytest -from mlflow.utils.rest_utils import NumpyEncoder, http_request, http_request_safe,\ - MlflowHostCreds +from mlflow.utils.rest_utils import http_request, http_request_safe,\ + MlflowHostCreds, _DEFAULT_HEADERS +from mlflow.pyfunc.scoring_server import NumpyEncoder from mlflow.exceptions import MlflowException, RestException @@ -19,7 +20,7 @@ def test_http_request_hostonly(request): request.assert_called_with( url='http://my-host/my/endpoint', verify=True, - headers={}, + headers=_DEFAULT_HEADERS, ) @@ -34,7 +35,7 @@ def test_http_request_cleans_hostname(request): request.assert_called_with( url='http://my-host/my/endpoint', verify=True, - headers={}, + headers=_DEFAULT_HEADERS, ) @@ -45,12 +46,12 @@ def test_http_request_with_basic_auth(request): response.status_code = 200 request.return_value = response http_request(host_only, '/my/endpoint') + headers = dict(_DEFAULT_HEADERS) + headers['Authorization'] = 'Basic dXNlcjpwYXNz' request.assert_called_with( url='http://my-host/my/endpoint', verify=True, - headers={ - 'Authorization': 'Basic dXNlcjpwYXNz' - }, + headers=headers, ) @@ -61,12 +62,12 @@ def test_http_request_with_token(request): response.status_code = 200 request.return_value = response http_request(host_only, '/my/endpoint') + headers = dict(_DEFAULT_HEADERS) + headers['Authorization'] = 'Bearer my-token' request.assert_called_with( url='http://my-host/my/endpoint', verify=True, - headers={ - 'Authorization': 'Bearer my-token' - }, + headers=headers, ) @@ -80,7 +81,7 @@ def test_http_request_with_insecure(request): request.assert_called_with( url='http://my-host/my/endpoint', verify=False, - headers={}, + headers=_DEFAULT_HEADERS, ) @@ -94,7 +95,7 @@ def test_http_request_wrapper(request): request.assert_called_with( url='http://my-host/my/endpoint', verify=False, - headers={}, + headers=_DEFAULT_HEADERS, ) response.status_code = 400 response.text = "" @@ -116,6 +117,9 @@ def test_numpy_encoder(): def test_numpy_encoder_fail(): + if not hasattr(numpy, "float128"): + pytest.skip("numpy on exit" + "this platform has no float128") test_number = numpy.float128 with pytest.raises(TypeError): ne = NumpyEncoder() diff --git a/tests/utils/test_search_utils.py b/tests/utils/test_search_utils.py new file mode 100644 index 0000000000000..defae4cadee35 --- /dev/null +++ b/tests/utils/test_search_utils.py @@ -0,0 +1,367 @@ +import base64 +import json +import pytest + +from mlflow.entities import RunInfo, RunData, Run, LifecycleStage, RunStatus, Metric, Param, RunTag +from mlflow.exceptions import MlflowException +from mlflow.utils.search_utils import SearchUtils + + +@pytest.mark.parametrize("filter_string, parsed_filter", [ + ("metric.acc >= 0.94", [{'comparator': '>=', 'key': 'acc', 'type': 'metric', 'value': '0.94'}]), + ("metric.acc>=100", [{'comparator': '>=', 'key': 'acc', 'type': 'metric', 'value': '100'}]), + ("params.m!='tf'", [{'comparator': '!=', 'key': 'm', 'type': 'parameter', 'value': 'tf'}]), + ('params."m"!="tf"', [{'comparator': '!=', 'key': 'm', 'type': 'parameter', 'value': 'tf'}]), + ('metric."legit name" >= 0.243', [{'comparator': '>=', + 'key': 'legit name', + 'type': 'metric', + 'value': '0.243'}]), + ("metrics.XYZ = 3", [{'comparator': '=', 'key': 'XYZ', 'type': 'metric', 'value': '3'}]), + ('params."cat dog" = "pets"', [{'comparator': '=', + 'key': 'cat dog', + 'type': 'parameter', + 'value': 'pets'}]), + ('metrics."X-Y-Z" = 3', [{'comparator': '=', 'key': 'X-Y-Z', 'type': 'metric', 'value': '3'}]), + ('metrics."X//Y#$$@&Z" = 3', [{'comparator': '=', + 'key': 'X//Y#$$@&Z', + 'type': 'metric', + 'value': '3'}]), + ("params.model = 'LinearRegression'", [{'comparator': '=', + 'key': 'model', + 'type': 'parameter', + 'value': "LinearRegression"}]), + ("metrics.rmse < 1 and params.model_class = 'LR'", [ + {'comparator': '<', 'key': 'rmse', 'type': 'metric', 'value': '1'}, + {'comparator': '=', 'key': 'model_class', 'type': 'parameter', 'value': "LR"} + ]), + ('', []), + ("`metric`.a >= 0.1", [{'comparator': '>=', 'key': 'a', 'type': 'metric', 'value': '0.1'}]), + ("`params`.model >= 'LR'", [{'comparator': '>=', + 'key': 'model', + 'type': 'parameter', + 'value': "LR"}]), + ("tags.version = 'commit-hash'", [{'comparator': '=', + 'key': 'version', + 'type': 'tag', + 'value': "commit-hash"}]), + ("`tags`.source_name = 'a notebook'", [{'comparator': '=', + 'key': 'source_name', + 'type': 'tag', + 'value': "a notebook"}]), + ('metrics."accuracy.2.0" > 5', [{'comparator': '>', + 'key': 'accuracy.2.0', + 'type': 'metric', + 'value': '5'}]), + ('metrics.`spacey name` > 5', [{'comparator': '>', + 'key': 'spacey name', + 'type': 'metric', + 'value': '5'}]), + ('params."p.a.r.a.m" != "a"', [{'comparator': '!=', + 'key': 'p.a.r.a.m', + 'type': 'parameter', + 'value': 'a'}]), + ('tags."t.a.g" = "a"', [{'comparator': '=', + 'key': 't.a.g', + 'type': 'tag', + 'value': 'a'}]), + ("attribute.artifact_uri = '1/23/4'", [{'type': 'attribute', + 'comparator': '=', + 'key': 'artifact_uri', + 'value': '1/23/4'}]), + ("run.status = 'RUNNING'", [{'type': 'attribute', + 'comparator': '=', + 'key': 'status', + 'value': 'RUNNING'}]), +]) +def test_filter(filter_string, parsed_filter): + assert SearchUtils._parse_search_filter(filter_string) == parsed_filter + + +@pytest.mark.parametrize("filter_string, parsed_filter", [ + ("params.m = 'LR'", [{'type': 'parameter', 'comparator': '=', 'key': 'm', 'value': 'LR'}]), + ("params.m = \"LR\"", [{'type': 'parameter', 'comparator': '=', 'key': 'm', 'value': 'LR'}]), + ('params.m = "LR"', [{'type': 'parameter', 'comparator': '=', 'key': 'm', 'value': 'LR'}]), + ('params.m = "L\'Hosp"', [{'type': 'parameter', 'comparator': '=', + 'key': 'm', 'value': "L'Hosp"}]), +]) +def test_correct_quote_trimming(filter_string, parsed_filter): + assert SearchUtils._parse_search_filter(filter_string) == parsed_filter + + +@pytest.mark.parametrize("filter_string, error_message", [ + ("metric.acc >= 0.94; metrics.rmse < 1", "Search filter contained multiple expression"), + ("m.acc >= 0.94", "Invalid entity type"), + ("acc >= 0.94", "Invalid identifier"), + ("p.model >= 'LR'", "Invalid entity type"), + ("attri.x != 1", "Invalid entity type"), + ("a.x != 1", "Invalid entity type"), + ("model >= 'LR'", "Invalid identifier"), + ("metrics.A > 0.1 OR params.B = 'LR'", "Invalid clause(s) in filter string"), + ("metrics.A > 0.1 NAND params.B = 'LR'", "Invalid clause(s) in filter string"), + ("metrics.A > 0.1 AND (params.B = 'LR')", "Invalid clause(s) in filter string"), + ("`metrics.A > 0.1", "Invalid clause(s) in filter string"), + ("param`.A > 0.1", "Invalid clause(s) in filter string"), + ("`dummy.A > 0.1", "Invalid clause(s) in filter string"), + ("dummy`.A > 0.1", "Invalid clause(s) in filter string"), + ("attribute.start != 1", "Invalid attribute key"), + ("attribute.start_time != 1", "Invalid attribute key"), + ("attribute.end_time != 1", "Invalid attribute key"), + ("attribute.run_id != 1", "Invalid attribute key"), + ("attribute.run_uuid != 1", "Invalid attribute key"), + ("attribute.experiment_id != 1", "Invalid attribute key"), + ("attribute.lifecycle_stage = 'ACTIVE'", "Invalid attribute key"), + ("attribute.name != 1", "Invalid attribute key"), + ("attribute.time != 1", "Invalid attribute key"), + ("attribute._status != 'RUNNING'", "Invalid attribute key"), + ("attribute.status = true", "Invalid clause(s) in filter string"), +]) +def test_error_filter(filter_string, error_message): + with pytest.raises(MlflowException) as e: + SearchUtils._parse_search_filter(filter_string) + assert error_message in e.value.message + + +@pytest.mark.parametrize("filter_string, error_message", [ + ("metric.model = 'LR'", "Expected numeric value type for metric"), + ("metric.model = '5'", "Expected numeric value type for metric"), + ("params.acc = 5", "Expected a quoted string value for param"), + ("tags.acc = 5", "Expected a quoted string value for tag"), + ("metrics.acc != metrics.acc", "Expected numeric value type for metric"), + ("1.0 > metrics.acc", "Expected 'Identifier' found"), + ("attribute.status = 1", "Expected a quoted string value for attributes"), +]) +def test_error_comparison_clauses(filter_string, error_message): + with pytest.raises(MlflowException) as e: + SearchUtils._parse_search_filter(filter_string) + assert error_message in e.value.message + + +@pytest.mark.parametrize("filter_string, error_message", [ + ("params.acc = LR", "value is either not quoted or unidentified quote types"), + ("tags.acc = LR", "value is either not quoted or unidentified quote types"), + ("params.acc = `LR`", "value is either not quoted or unidentified quote types"), + ("params.'acc = LR", "Invalid clause(s) in filter string"), + ("params.acc = 'LR", "Invalid clause(s) in filter string"), + ("params.acc = LR'", "Invalid clause(s) in filter string"), + ("params.acc = \"LR'", "Invalid clause(s) in filter string"), + ("tags.acc = \"LR'", "Invalid clause(s) in filter string"), + ("tags.acc = = 'LR'", "Invalid clause(s) in filter string"), + ("attribute.status IS 'RUNNING'", "Invalid clause(s) in filter string"), +]) +def test_bad_quotes(filter_string, error_message): + with pytest.raises(MlflowException) as e: + SearchUtils._parse_search_filter(filter_string) + assert error_message in e.value.message + + +@pytest.mark.parametrize("filter_string, error_message", [ + ("params.acc LR !=", "Invalid clause(s) in filter string"), + ("params.acc LR", "Invalid clause(s) in filter string"), + ("metric.acc !=", "Invalid clause(s) in filter string"), + ("acc != 1.0", "Invalid identifier"), + ("foo is null", "Invalid clause(s) in filter string"), + ("1=1", "Expected 'Identifier' found"), + ("1==2", "Expected 'Identifier' found"), +]) +def test_invalid_clauses(filter_string, error_message): + with pytest.raises(MlflowException) as e: + SearchUtils._parse_search_filter(filter_string) + assert error_message in e.value.message + + +@pytest.mark.parametrize("entity_type, bad_comparators, key, entity_value", [ + ("metrics", ["~", "~="], "abc", 1.0), + ("params", [">", "<", ">=", "<=", "~"], "abc", "'my-param-value'"), + ("tags", [">", "<", ">=", "<=", "~"], "abc", "'my-tag-value'"), + ("attributes", [">", "<", ">=", "<=", "~"], "status", "'my-tag-value'"), +]) +def test_bad_comparators(entity_type, bad_comparators, key, entity_value): + run = Run(run_info=RunInfo( + run_uuid="hi", run_id="hi", experiment_id=0, + user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED), + start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData(metrics=[], params=[], tags=[]) + ) + for bad_comparator in bad_comparators: + bad_filter = "{entity_type}.{key} {comparator} {value}".format( + entity_type=entity_type, key=key, comparator=bad_comparator, value=entity_value) + with pytest.raises(MlflowException) as e: + SearchUtils.filter([run], bad_filter) + assert "Invalid comparator" in str(e.value.message) + + +@pytest.mark.parametrize("filter_string, matching_runs", [ + (None, [0, 1, 2]), + ("", [0, 1, 2]), + ("attributes.status = 'FAILED'", [0, 2]), + ("metrics.key1 = 123", [1]), + ("metrics.key1 != 123", [0, 2]), + ("metrics.key1 >= 123", [1, 2]), + ("params.my_param = 'A'", [0, 1]), + ("tags.tag1 = 'D'", [2]), + ("tags.tag1 != 'D'", [1]), + ("params.my_param = 'A' AND attributes.status = 'FAILED'", [0]), +]) +def test_correct_filtering(filter_string, matching_runs): + runs = [ + Run(run_info=RunInfo( + run_uuid="hi", run_id="hi", experiment_id=0, + user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED), + start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData( + metrics=[Metric("key1", 121, 1, 0)], + params=[Param("my_param", "A")], + tags=[])), + Run(run_info=RunInfo( + run_uuid="hi2", run_id="hi2", experiment_id=0, + user_id="user-id", status=RunStatus.to_string(RunStatus.FINISHED), + start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData( + metrics=[Metric("key1", 123, 1, 0)], + params=[Param("my_param", "A")], + tags=[RunTag("tag1", "C")])), + Run(run_info=RunInfo( + run_uuid="hi3", run_id="hi3", experiment_id=1, + user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED), + start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData( + metrics=[Metric("key1", 125, 1, 0)], + params=[Param("my_param", "B")], + tags=[RunTag("tag1", "D")])), + ] + filtered_runs = SearchUtils.filter(runs, filter_string) + assert set(filtered_runs) == set([runs[i] for i in matching_runs]) + + +@pytest.mark.parametrize("order_bys, matching_runs", [ + (None, [2, 1, 0]), + ([], [2, 1, 0]), + (["tags.noSuchTag"], [2, 1, 0]), + (["attributes.status"], [2, 0, 1]), + (["attributes.start_time"], [0, 2, 1]), + (["metrics.key1 asc"], [0, 1, 2]), + (["metrics.\"key1\" desc"], [2, 1, 0]), + (["params.my_param"], [1, 0, 2]), + (["params.my_param aSc", "attributes.status ASC"], [0, 1, 2]), + (["params.my_param", "attributes.status DESC"], [1, 0, 2]), + (["params.my_param DESC", "attributes.status DESC"], [2, 1, 0]), + (["params.`my_param` DESC", "attributes.status DESC"], [2, 1, 0]), + (["tags.tag1"], [1, 2, 0]), + (["tags.tag1 DESC"], [2, 1, 0]), +]) +def test_correct_sorting(order_bys, matching_runs): + runs = [ + Run(run_info=RunInfo( + run_uuid="9", run_id="9", experiment_id=0, + user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED), + start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData( + metrics=[Metric("key1", 121, 1, 0)], + params=[Param("my_param", "A")], + tags=[])), + Run(run_info=RunInfo( + run_uuid="8", run_id="8", experiment_id=0, + user_id="user-id", status=RunStatus.to_string(RunStatus.FINISHED), + start_time=1, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData( + metrics=[Metric("key1", 123, 1, 0)], + params=[Param("my_param", "A")], + tags=[RunTag("tag1", "C")])), + Run(run_info=RunInfo( + run_uuid="7", run_id="7", experiment_id=1, + user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED), + start_time=1, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData( + metrics=[Metric("key1", 125, 1, 0)], + params=[Param("my_param", "B")], + tags=[RunTag("tag1", "D")])), + ] + sorted_runs = SearchUtils.sort(runs, order_bys) + sorted_run_indices = [] + for run in sorted_runs: + for i, r in enumerate(runs): + if r == run: + sorted_run_indices.append(i) + break + assert sorted_run_indices == matching_runs + + +@pytest.mark.parametrize("order_by, error_message", [ + ("m.acc", "Invalid entity type"), + ("acc", "Invalid identifier"), + ("attri.x", "Invalid entity type"), + ("`metrics.A", "Invalid order_by clause"), + ("`metrics.A`", "Invalid entity type"), + ("attribute.start", "Invalid attribute key"), + ("attribute.run_id", "Invalid attribute key"), + ("attribute.experiment_id", "Invalid attribute key"), + ("metrics.A != 1", "Invalid order_by clause"), + ("params.my_param ", "Invalid order_by clause"), +]) +def test_invalid_order_by(order_by, error_message): + with pytest.raises(MlflowException) as e: + SearchUtils._parse_order_by(order_by) + assert error_message in e.value.message + + +@pytest.mark.parametrize("page_token, max_results, matching_runs, expected_next_page_token", [ + (None, 1, [0], {"offset": 1}), + (None, 2, [0, 1], {"offset": 2}), + (None, 3, [0, 1, 2], None), + (None, 5, [0, 1, 2], None), + ({"offset": 1}, 1, [1], {"offset": 2}), + ({"offset": 1}, 2, [1, 2], None), + ({"offset": 1}, 3, [1, 2], None), + ({"offset": 2}, 1, [2], None), + ({"offset": 2}, 2, [2], None), + ({"offset": 2}, 0, [], {"offset": 2}), + ({"offset": 3}, 1, [], None), +]) +def test_pagination(page_token, max_results, matching_runs, expected_next_page_token): + runs = [ + Run(run_info=RunInfo( + run_uuid="0", run_id="0", experiment_id=0, + user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED), + start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData([], [], [])), + Run(run_info=RunInfo( + run_uuid="1", run_id="1", experiment_id=0, + user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED), + start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData([], [], [])), + Run(run_info=RunInfo( + run_uuid="2", run_id="2", experiment_id=0, + user_id="user-id", status=RunStatus.to_string(RunStatus.FAILED), + start_time=0, end_time=1, lifecycle_stage=LifecycleStage.ACTIVE), + run_data=RunData([], [], [])) + ] + encoded_page_token = None + if page_token: + encoded_page_token = base64.b64encode(json.dumps(page_token).encode("utf-8")) + paginated_runs, next_page_token = SearchUtils.paginate(runs, encoded_page_token, max_results) + + paginated_run_indices = [] + for run in paginated_runs: + for i, r in enumerate(runs): + if r == run: + paginated_run_indices.append(i) + break + assert paginated_run_indices == matching_runs + + decoded_next_page_token = None + if next_page_token: + decoded_next_page_token = json.loads(base64.b64decode(next_page_token)) + assert decoded_next_page_token == expected_next_page_token + + +@pytest.mark.parametrize("page_token, error_message", [ + (base64.b64encode(json.dumps({}).encode("utf-8")), "Invalid page token"), + (base64.b64encode(json.dumps({"offset": "a"}).encode("utf-8")), "Invalid page token"), + (base64.b64encode(json.dumps({"offsoot": 7}).encode("utf-8")), "Invalid page token"), + (base64.b64encode("not json".encode("utf-8")), "Invalid page token"), + ("not base64", "Invalid page token"), +]) +def test_invalid_page_tokens(page_token, error_message): + with pytest.raises(MlflowException) as e: + SearchUtils.paginate([], page_token, 1) + assert error_message in e.value.message diff --git a/tests/utils/test_utils.py b/tests/utils/test_utils.py new file mode 100644 index 0000000000000..968c31c19c3aa --- /dev/null +++ b/tests/utils/test_utils.py @@ -0,0 +1,34 @@ +import pytest + +from mlflow.exceptions import MlflowException +from mlflow.store.dbmodels.db_types import DATABASE_ENGINES +from mlflow.utils import get_unique_resource_id, extract_db_type_from_uri, get_uri_scheme + + +def test_get_unique_resource_id_respects_max_length(): + for max_length in range(5, 30, 5): + for _ in range(10000): + assert len(get_unique_resource_id(max_length=max_length)) <= max_length + + +def test_get_unique_resource_id_with_invalid_max_length_throws_exception(): + with pytest.raises(ValueError): + get_unique_resource_id(max_length=-50) + + with pytest.raises(ValueError): + get_unique_resource_id(max_length=0) + + +def test_extract_db_type_from_uri(): + uri = "{}://username:password@host:port/database" + for legit_db in DATABASE_ENGINES: + assert legit_db == extract_db_type_from_uri(uri.format(legit_db)) + assert legit_db == get_uri_scheme(uri.format(legit_db)) + + with_driver = legit_db + "+driver-string" + assert legit_db == extract_db_type_from_uri(uri.format(with_driver)) + assert legit_db == get_uri_scheme(uri.format(with_driver)) + + for unsupported_db in ["a", "aa", "sql"]: + with pytest.raises(MlflowException): + extract_db_type_from_uri(unsupported_db) diff --git a/tests/utils/test_validation.py b/tests/utils/test_validation.py index 76d38803b1ae6..3f1b1ca4e7e3e 100644 --- a/tests/utils/test_validation.py +++ b/tests/utils/test_validation.py @@ -1,7 +1,12 @@ +import copy import pytest +from mlflow.exceptions import MlflowException +from mlflow.entities import Metric, Param, RunTag +from mlflow.protos.databricks_pb2 import ErrorCode, INVALID_PARAMETER_VALUE from mlflow.utils.validation import _validate_metric_name, _validate_param_name, \ - _validate_tag_name, _validate_run_id + _validate_tag_name, _validate_run_id, _validate_batch_log_data, \ + _validate_batch_log_limits, _validate_experiment_artifact_location, _validate_db_type_string GOOD_METRIC_OR_PARAM_NAMES = [ "a", "Ab-5_", "a/b/c", "a.b.c", ".a", "b.", "a..a/._./o_O/.e.", "a b/c d", @@ -15,29 +20,114 @@ def test_validate_metric_name(): for good_name in GOOD_METRIC_OR_PARAM_NAMES: _validate_metric_name(good_name) for bad_name in BAD_METRIC_OR_PARAM_NAMES: - with pytest.raises(Exception, match="Invalid metric name"): + with pytest.raises(MlflowException, match="Invalid metric name") as e: _validate_metric_name(bad_name) + assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_validate_param_name(): for good_name in GOOD_METRIC_OR_PARAM_NAMES: _validate_param_name(good_name) for bad_name in BAD_METRIC_OR_PARAM_NAMES: - with pytest.raises(Exception, match="Invalid parameter name"): + with pytest.raises(MlflowException, match="Invalid parameter name") as e: _validate_param_name(bad_name) + assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_validate_tag_name(): for good_name in GOOD_METRIC_OR_PARAM_NAMES: _validate_tag_name(good_name) for bad_name in BAD_METRIC_OR_PARAM_NAMES: - with pytest.raises(Exception, match="Invalid tag name"): + with pytest.raises(MlflowException, match="Invalid tag name") as e: _validate_tag_name(bad_name) + assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_validate_run_id(): - for good_id in ["a" * 32, "f0" * 16, "abcdef0123456789" * 2]: + for good_id in ["a" * 32, "f0" * 16, "abcdef0123456789" * 2, "a" * 33, "a" * 31, + "a" * 256, "A" * 32, "g" * 32, "a_" * 32, "abcdefghijklmnopqrstuvqxyz"]: _validate_run_id(good_id) - for bad_id in ["a" * 33, "a" * 31, "A" * 32, "g" * 32, "a/bc" * 8, "_" * 32]: - with pytest.raises(Exception, match="Invalid run ID"): + for bad_id in ["a/bc" * 8, "", "a" * 400, "*" * 5]: + with pytest.raises(MlflowException, match="Invalid run ID") as e: _validate_run_id(bad_id) + assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) + + +def test_validate_batch_log_limits(): + too_many_metrics = [Metric("metric-key-%s" % i, 1, 0, i * 2) for i in range(1001)] + too_many_params = [Param("param-key-%s" % i, "b") for i in range(101)] + too_many_tags = [RunTag("tag-key-%s" % i, "b") for i in range(101)] + + good_kwargs = {"metrics": [], "params": [], "tags": []} + bad_kwargs = { + "metrics": [too_many_metrics], + "params": [too_many_params], + "tags": [too_many_tags], + } + for arg_name, arg_values in bad_kwargs.items(): + for arg_value in arg_values: + final_kwargs = copy.deepcopy(good_kwargs) + final_kwargs[arg_name] = arg_value + with pytest.raises(MlflowException): + _validate_batch_log_limits(**final_kwargs) + # Test the case where there are too many entities in aggregate + with pytest.raises(MlflowException): + _validate_batch_log_limits(too_many_metrics[:900], too_many_params[:51], + too_many_tags[:50]) + # Test that we don't reject entities within the limit + _validate_batch_log_limits(too_many_metrics[:1000], [], []) + _validate_batch_log_limits([], too_many_params[:100], []) + _validate_batch_log_limits([], [], too_many_tags[:100]) + + +def test_validate_batch_log_data(): + metrics_with_bad_key = [Metric("good-metric-key", 1.0, 0, 0), + Metric("super-long-bad-key" * 1000, 4.0, 0, 0)] + metrics_with_bad_val = [Metric("good-metric-key", "not-a-double-val", 0, 0)] + metrics_with_bad_ts = [Metric("good-metric-key", 1.0, "not-a-timestamp", 0)] + metrics_with_neg_ts = [Metric("good-metric-key", 1.0, -123, 0)] + metrics_with_bad_step = [Metric("good-metric-key", 1.0, 0, "not-a-step")] + params_with_bad_key = [Param("good-param-key", "hi"), + Param("super-long-bad-key" * 1000, "but-good-val")] + params_with_bad_val = [Param("good-param-key", "hi"), + Param("another-good-key", "but-bad-val" * 1000)] + tags_with_bad_key = [RunTag("good-tag-key", "hi"), + RunTag("super-long-bad-key" * 1000, "but-good-val")] + tags_with_bad_val = [RunTag("good-tag-key", "hi"), + RunTag("another-good-key", "but-bad-val" * 1000)] + bad_kwargs = { + "metrics": [metrics_with_bad_key, metrics_with_bad_val, metrics_with_bad_ts, + metrics_with_neg_ts, metrics_with_bad_step], + "params": [params_with_bad_key, params_with_bad_val], + "tags": [tags_with_bad_key, tags_with_bad_val], + } + good_kwargs = {"metrics": [], "params": [], "tags": []} + for arg_name, arg_values in bad_kwargs.items(): + for arg_value in arg_values: + final_kwargs = copy.deepcopy(good_kwargs) + final_kwargs[arg_name] = arg_value + with pytest.raises(MlflowException): + _validate_batch_log_data(**final_kwargs) + # Test that we don't reject entities within the limit + _validate_batch_log_data( + metrics=[Metric("metric-key", 1.0, 0, 0)], params=[Param("param-key", "param-val")], + tags=[RunTag("tag-key", "tag-val")]) + + +def test_validate_experiment_artifact_location(): + _validate_experiment_artifact_location('abcde') + _validate_experiment_artifact_location(None) + with pytest.raises(MlflowException): + _validate_experiment_artifact_location('runs:/blah/bleh/blergh') + + +def test_db_type(): + for db_type in ["mysql", "mssql", "postgresql", "sqlite"]: + # should not raise an exception + _validate_db_type_string(db_type) + + # error cases + for db_type in ["MySQL", "mongo", "cassandra", "sql", ""]: + with pytest.raises(MlflowException) as e: + _validate_db_type_string(db_type) + assert "Invalid database engine" in e.value.message diff --git a/travis/install-common-deps.sh b/travis/install-common-deps.sh new file mode 100755 index 0000000000000..4beeeb1b240cf --- /dev/null +++ b/travis/install-common-deps.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -ex +sudo mkdir -p /travis-install +sudo chown travis /travis-install +# (The conda installation steps below are taken from http://conda.pydata.org/docs/travis.html) +# We do this conditionally because it saves us some downloading if the +# version is the same. +if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then + wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O /travis-install/miniconda.sh; +else + wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /travis-install/miniconda.sh; +fi + +bash /travis-install/miniconda.sh -b -p $HOME/miniconda +export PATH="$HOME/miniconda/bin:$PATH" +hash -r +conda config --set always_yes yes --set changeps1 no +# Useful for debugging any issues with conda +conda info -a +conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION +source activate test-environment +python --version +pip install --upgrade pip +# Install Python test dependencies only if we're running Python tests +if [[ "$INSTALL_SMALL_PYTHON_DEPS" == "true" ]]; then + pip install -r ./travis/small-requirements.txt +fi +if [[ "$INSTALL_LARGE_PYTHON_DEPS" == "true" ]]; then + pip install -r ./travis/large-requirements.txt +fi +pip install . +export MLFLOW_HOME=$(pwd) +# Remove boto config present in Travis VMs (https://github.com/travis-ci/travis-ci/issues/7940) +sudo rm -f /etc/boto.cfg +# Print current environment info +pip list +which mlflow +echo $MLFLOW_HOME + +# Turn off trace output & exit-on-errors +set +ex diff --git a/travis/large-requirements.txt b/travis/large-requirements.txt new file mode 100644 index 0000000000000..5d33cb5e95ecd --- /dev/null +++ b/travis/large-requirements.txt @@ -0,0 +1,35 @@ +# Large test reqs +azure-storage==0.36.0 +google-cloud-storage==1.14.0 +botocore==1.12.84 +boto3==1.9.84 +mock==2.0.0 +moto==1.3.7 +h2o==3.22.1.4 +mock==2.0.0 +onnx==1.4.1; python_version >= "3.0" +onnxmltools==1.4.0; python_version >= "3.0" +onnxruntime==0.3.0; python_version >= "3.0" +mleap==0.8.1 +pandas<=0.23.4 +pyarrow==0.12.1 +pyspark==2.4.0 +pytest==3.2.1 +pytest-cov==2.6.0 +scikit-learn==0.20.2 +scipy==1.2.1 +tensorflow==1.12.0 +torch==1.0.1 +torchvision==0.2.2 +# Install typing to fix torch on Python 2.7 (see https://github.com/pytorch/pytorch/issues/16775) +typing==3.6.6 +pysftp==0.2.9 +keras==2.2.4 +attrdict==2.0.0 +azureml-sdk==1.0.17; python_version >= "3.0" +cloudpickle==0.8.0 +pytest-localserver==0.5.0 +sqlalchemy==1.3.0 +kubernetes==9.0.0 +# test plugin +tests/resources/mlflow-test-plugin/ diff --git a/travis/lint-requirements.txt b/travis/lint-requirements.txt new file mode 100644 index 0000000000000..2b926a11d457b --- /dev/null +++ b/travis/lint-requirements.txt @@ -0,0 +1,5 @@ +prospector[with_pyroma]==0.12.7 +pep8==1.7.1 +pyarrow==0.12.1 +pylint==1.8.2 +rstcheck==3.2 \ No newline at end of file diff --git a/travis/run-large-python-tests.sh b/travis/run-large-python-tests.sh new file mode 100755 index 0000000000000..db0c5cb354981 --- /dev/null +++ b/travis/run-large-python-tests.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +set -x +# Set err=1 if any commands exit with non-zero status as described in +# https://stackoverflow.com/a/42219754 +err=0 +trap 'err=1' ERR + + +# TODO(czumar): Re-enable container building and associated SageMaker tests once the container +# build process is no longer hanging +# - SAGEMAKER_OUT=$(mktemp) +# - if mlflow sagemaker build-and-push-container --no-push --mlflow-home . > $SAGEMAKER_OUT 2>&1; then +# echo "Sagemaker container build succeeded."; +# else +# echo "Sagemaker container build failed, output:"; +# cat $SAGEMAKER_OUT; +# fi +# NB: Also add --ignore'd tests to run-small-python-tests.sh +pytest tests --large --ignore=tests/h2o --ignore=tests/keras \ + --ignore=tests/pytorch --ignore=tests/pyfunc --ignore=tests/sagemaker --ignore=tests/sklearn \ + --ignore=tests/spark --ignore=tests/tensorflow --ignore=tests/azureml --ignore=tests/onnx \ + --ignore=tests/autologging +# Run ML framework tests in their own Python processes to avoid OOM issues due to per-framework +# overhead +pytest --verbose tests/h2o --large +# TODO(smurching): Re-enable Keras tests once they're no longer flaky +# - pytest --verbose tests/keras --large +pytest --verbose tests/onnx --large; +pytest --verbose tests/pytorch --large +pytest --verbose tests/pyfunc --large +pytest --verbose tests/sagemaker --large +pytest --verbose tests/sagemaker/mock --large +pytest --verbose tests/sklearn --large +pytest --verbose tests/spark --large +pytest --verbose tests/tensorflow --large +pytest --verbose tests/azureml --large +pytest --verbose tests/models --large +pytest --verbose tests/autologging --large +test $err = 0 diff --git a/travis/run-small-python-tests.sh b/travis/run-small-python-tests.sh new file mode 100755 index 0000000000000..faf1cf64868b9 --- /dev/null +++ b/travis/run-small-python-tests.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +set -x +# Set err=1 if any commands exit with non-zero status as described in +# https://stackoverflow.com/a/42219754 +err=0 +trap 'err=1' ERR + +# NB: Also add --ignore'd tests to run-large-python-tests.sh +pytest --cov=mlflow --verbose --ignore=tests/h2o --ignore=tests/keras \ + --ignore=tests/pytorch --ignore=tests/pyfunc --ignore=tests/sagemaker --ignore=tests/sklearn \ + --ignore=tests/spark --ignore=tests/tensorflow --ignore=tests/autologging \ + --ignore tests/azureml --ignore tests/onnx tests + +test $err = 0 diff --git a/travis/small-requirements.txt b/travis/small-requirements.txt new file mode 100644 index 0000000000000..e1ae3599b1cd7 --- /dev/null +++ b/travis/small-requirements.txt @@ -0,0 +1,21 @@ +# Small test reqs +azure-storage==0.36.0 +google-cloud-storage==1.14.0 +botocore==1.12.84 # pinned for moto +boto3==1.9.84 # pinned for moto +mock==2.0.0 +moto==1.3.7 +pandas<=0.23.4 +scikit-learn==0.20.2 +scipy==1.2.1 +pyarrow==0.12.1 +pysftp==0.2.9 +attrdict==2.0.0 +cloudpickle==0.8.0 +pytest==3.2.1 +pytest-cov==2.6.0 +pytest-localserver==0.5.0 +sqlalchemy==1.3.0 +kubernetes==9.0.0 +# test plugin +tests/resources/mlflow-test-plugin/