diff --git a/README.md b/README.md index 1e061797..ba9ba968 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Climate Health Anlysis Platform (CHAP) +# Climate Health Analysis Platform (CHAP) CHAP offers a platform for analysing the relationship between climate and health. The platform is designed to be modular and flexible, allowing for easy integration of new models and data sources. The platform is designed to be used by researchers and public health professionals to forecast and assess the impact of climate on health outcomes. # Installation diff --git a/setup.py b/setup.py index 1201e96c..f65a3ead 100644 --- a/setup.py +++ b/setup.py @@ -38,6 +38,7 @@ 'earthengine-api', 'mlflow', 'gluonts', + 'xarray' ] test_requirements = ['pytest>=3', "hypothesis"] diff --git a/tests/external/test_external_models.py b/tests/external/test_external_models.py index 98e6a7f1..4bb76455 100644 --- a/tests/external/test_external_models.py +++ b/tests/external/test_external_models.py @@ -10,8 +10,7 @@ from climate_health.testing.external_model import sanity_check_external_model logging.basicConfig(level=logging.INFO) -from climate_health.external.external_model import (get_model_from_yaml_file, run_command, - ExternalCommandLineModel, +from climate_health.external.external_model import (run_command, get_model_from_directory_or_github_url) from ..data_fixtures import train_data, train_data_pop, future_climate_data from climate_health.util import conda_available, docker_available, pyenv_available diff --git a/tests/test_external_model_evaluation_acceptance.py b/tests/test_external_model_evaluation_acceptance.py index 8b4b4f70..d0b70200 100644 --- a/tests/test_external_model_evaluation_acceptance.py +++ b/tests/test_external_model_evaluation_acceptance.py @@ -75,16 +75,6 @@ def get_predictions(self, train_data: IsSpatioTemporalDataSet[ClimateHealthTimeS return DataSet(new_dict) -# @pytest.mark.xfail -@pytest.mark.slow -@pytest.mark.skip(reason="Outdated") -def test_external_model_evaluation(dataset_name, output_filename, load_data_func, external_predictive_model): - external_model = external_predictive_model - data_set = load_data_func(dataset_name) - report = evaluate_model(data_set, external_model) - report.save(output_filename) - - @pytest.mark.skip @pytest.mark.parametrize('mode', ['forecast']) def test_summary_model_evaluation(dataset_name, output_filename, load_data_func, mode):