Skip to content

Commit

Permalink
[#14,#50] add min/max occurs int support + [#17] fix some tests + fix…
Browse files Browse the repository at this point in the history
… formatting invalid schema json response
  • Loading branch information
fmigneault committed Aug 22, 2019
1 parent 09ee09e commit cd61545
Show file tree
Hide file tree
Showing 9 changed files with 223 additions and 59 deletions.
2 changes: 2 additions & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ New Features:
- Add more functional tests (#11, #17)
- Add support of ``label`` synonym as ``title`` for inputs and process description
(`CWL` specifying a ``label`` will set it in `WPS` process) (#31)
- Add support of input ``minOccurs`` and ``maxOccurs`` as ``int`` while maintaining ``str`` support (#14)

Changes:
- Use ``bump2version`` and move all config under ``setup.cfg``
Expand All @@ -31,6 +32,7 @@ Fixes:
- Fix case where omitted ``format[s]`` in both `CWL` and `WPS` deploy bodies generated a process description with
complex I/O (file) without required ``formats`` field. Default ``text/plain`` format is now automatically added.
- Fix case where ``format[s]`` lists between `CWL` and `WPS` where incorrectly merged.
- Fix invalid JSON response formatting on failing schema validation of process deployment body.

0.2.2 (2019-05-31)
==================
Expand Down
30 changes: 30 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ BUILDOUT_FILES := parts eggs develop-eggs bin .installed.cfg .mr.developer.cfg *
.PHONY: all
all: help

# FIXME: use autodoc target comments
.PHONY: help
help:
@echo "Please use \`make <target>' where <target> is one of"
Expand Down Expand Up @@ -85,7 +86,11 @@ help:
@echo "Testing targets:"
@echo " test-unit run unit tests (skip long running and online tests)."
@echo " test-func run funtional tests (online and usage specific)."
@echo " test-online run online tests (running instance required)."
@echo " test-offline run offline tests (not marked as online)."
@echo " test-no-tb14 run all tests except ones marked for 'Testbed-14'."
@echo " test-all run all tests (including long running tests)."
@echo " test run custom tests from input specification (make TESTS='<spec>' test).
@echo " coverage run all tests using coverage analysis."
@echo " pep8 run pep8 code style checks."
@echo "Sphinx targets:"
Expand Down Expand Up @@ -335,12 +340,37 @@ test-func:
bash -c "source $(CONDA_HOME)/bin/activate $(CONDA_ENV); \
pytest tests -v -m 'functional' --junitxml $(CURDIR)/tests/results.xml"

.PHONY: test-online
test-online:
@echo "Running online tests (running instance required)..."
bash -c "source $(CONDA_HOME)/bin/activate $(CONDA_ENV); \
pytest tests -v -m 'online' --junitxml $(CURDIR)/tests/results.xml"

.PHONY: test-offline
test-offline:
@echo "Running offline tests (not marked as online)..."
bash -c "source $(CONDA_HOME)/bin/activate $(CONDA_ENV); \
pytest tests -v -m 'not online' --junitxml $(CURDIR)/tests/results.xml"

.PHONY: test-no-tb14
test-no-tb14:
@echo "Running all tests except ones marked for 'Testbed-14'..."
bash -c "source $(CONDA_HOME)/bin/activate $(CONDA_ENV); \
pytest tests -v -m 'not testbed14' --junitxml $(CURDIR)/tests/results.xml"

.PHONY: test-all
test-all:
@echo "Running all tests (including slow and online tests)..."
bash -c "source $(CONDA_HOME)/bin/activate $(CONDA_ENV); \
pytest tests -v --junitxml $(CURDIR)/tests/results.xml"

.PHONY: test
test:
@echo "Running custom tests from input specification..."
@[ "${TESTS}" ] || ( echo ">> 'TESTS' is not set"; exit 1 )
bash -c "source $(CONDA_HOME)/bin/activate $(CONDA_ENV); \
pytest tests -v -m '${TESTS}' --junitxml $(CURDIR)/tests/results.xml"

.PHONY: coverage
coverage:
@echo "Running coverage analysis..."
Expand Down
2 changes: 2 additions & 0 deletions templates/weaver.ini
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,13 @@ handlers = console
level = ${settings:log-level}
handlers = console
qualname = weaver
propagate = 0

[logger_celery]
level = ${settings:log-level}
handlers = console
qualname = celery
propagate = 0

[handler_console]
class = StreamHandler
Expand Down
5 changes: 3 additions & 2 deletions tests/functional/test_ems_end2end.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def setUpClass(cls):
.format(cls.logger_separator_cases, cls.current_case_name(), now(), cls.logger_separator_cases))

# test execution configs
cls.WEAVER_TEST_REQUEST_TIMEOUT = int(os.getenv("WEAVER_TEST_JOB_ACCEPTED_MAX_TIMEOUT", 60))
cls.WEAVER_TEST_JOB_ACCEPTED_MAX_TIMEOUT = int(os.getenv("WEAVER_TEST_JOB_ACCEPTED_MAX_TIMEOUT", 30))
cls.WEAVER_TEST_JOB_RUNNING_MAX_TIMEOUT = int(os.getenv("WEAVER_TEST_JOB_RUNNING_MAX_TIMEOUT", 6000))
cls.WEAVER_TEST_JOB_GET_STATUS_INTERVAL = int(os.getenv("WEAVER_TEST_JOB_GET_STATUS_INTERVAL", 5))
Expand Down Expand Up @@ -433,7 +434,7 @@ def request(cls, method, url, ignore_errors=False, force_requests=False, log_ena
cls.log(trace)

if with_requests:
kw.update({"verify": False})
kw.update({"verify": False, "timeout": cls.WEAVER_TEST_REQUEST_TIMEOUT})
resp = requests.request(method, url, json=json_body, data=data_body, **kw)

# add some properties similar to `webtest.TestApp`
Expand Down Expand Up @@ -550,7 +551,7 @@ def validate_test_server(cls):
servers.append(cls.WEAVER_TEST_WSO2_URL)
servers.append(cls.WEAVER_TEST_MAGPIE_URL)
for server_url in servers:
cls.request("GET", server_url, headers=cls.headers, status=HTTPOk.code, timeout=10)
cls.request("GET", server_url, headers=cls.headers, status=HTTPOk.code)
# verify that EMS configuration requirement is met
resp = cls.request("GET", cls.WEAVER_RESTAPI_URL, headers=cls.headers, status=HTTPOk.code)
cls.assert_test(lambda: resp.json.get("configuration") == WEAVER_CONFIGURATION_EMS,
Expand Down
103 changes: 102 additions & 1 deletion tests/functional/test_wps_package.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from tests import resources
import pytest
import unittest
import colander
import six

EDAM_PLAIN = EDAM_NAMESPACE + ":" + EDAM_MAPPING[CONTENT_TYPE_TEXT_PLAIN]
Expand Down Expand Up @@ -684,7 +685,7 @@ def test_resolution_io_min_max_occurs(self):
assert desc["process"]["inputs"][9]["minOccurs"] == "0"
assert desc["process"]["inputs"][9]["maxOccurs"] == "1"
assert desc["process"]["inputs"][10]["id"] == "required_array_min_fixed_from_wps"
# FIXME:
# FIXME: https://github.com/crim-ca/weaver/issues/50
# `maxOccurs=1` not updated to `maxOccurs="unbounded"` as it is evaluated as a single value,
# but it should be considered an array since `minOccurs>1`
# (see: https://github.com/crim-ca/weaver/issues/17)
Expand Down Expand Up @@ -739,6 +740,106 @@ def test_resolution_io_min_max_occurs(self):
assert pkg["inputs"][13]["id"] == "optional_array_max_fixed_from_wps"
# assert pkg["inputs"][13]["type"] == "string[]?"

# FIXME: https://github.com/crim-ca/weaver/issues/50
# 'unbounded' value should not override literal 2/'2'
@pytest.mark.xfail(reason="MinOccurs/MaxOccurs values in response should be preserved as defined in deploy body")
def test_valid_io_min_max_occurs_as_str_or_int(self):
"""
Test validates that I/O definitions with ``minOccurs`` and/or ``maxOccurs`` are permitted as both integer
and string definitions in order to support (1, "1", "unbounded") variations.
.. seealso::
- :meth:`test_invalid_io_min_max_occurs_wrong_format`
"""
cwl = {
"cwlVersion": "v1.0",
"class": "CommandLineTool",
"inputs": [
{"id": "io_min_int_max_int", "type": "string"},
{"id": "io_min_int_max_str", "type": "string"},
{"id": "io_min_str_max_int", "type": "string"},
{"id": "io_min_str_max_str", "type": "string"},
{"id": "io_min_int_max_unbounded", "type": "string"},
{"id": "io_min_str_max_unbounded", "type": "string"},
],
"outputs": {"values": {"type": "string"}}
}
body = {
"processDescription": {
"process": {
"id": self._testMethodName,
"title": "some title",
"abstract": "this is a test",
},
"inputs": [
{"id": "io_min_int_max_int", "minOccurs": 1, "maxOccurs": 2},
{"id": "io_min_int_max_str", "minOccurs": 1, "maxOccurs": "2"},
{"id": "io_min_str_max_int", "minOccurs": "1", "maxOccurs": 2},
{"id": "io_min_str_max_str", "minOccurs": "1", "maxOccurs": "2"},
{"id": "io_min_int_max_unbounded", "minOccurs": 1, "maxOccurs": "unbounded"},
{"id": "io_min_str_max_unbounded", "minOccurs": "1", "maxOccurs": "unbounded"},
]
},
"deploymentProfileName": "http://www.opengis.net/profiles/eoc/wpsApplication",
"executionUnit": [{"unit": cwl}],
}
try:
desc, _ = self.deploy_process(body)
except colander.Invalid:
self.fail("MinOccurs/MaxOccurs values defined as valid int/str should not raise an invalid schema error")

inputs = body["processDescription"]["inputs"]
assert isinstance(desc["process"]["inputs"], list)
assert len(desc["process"]["inputs"]) == len(inputs)
for i, input in enumerate(inputs):
assert desc["process"]["inputs"][i]["id"] == input["id"]
for field in ["minOccurs", "maxOccurs"]:
assert desc["process"]["inputs"][i][field] in (input[field], str(input[field]))

# FIXME: test not working
# same payloads sent directly to running weaver properly raise invalid schema -> bad request error
# somehow they don't work within this test (not raised)...
@pytest.mark.xfail(reason="MinOccurs/MaxOccurs somehow fail validation here, but s")
def test_invalid_io_min_max_occurs_wrong_format(self):
"""
Test verifies that ``minOccurs`` and/or ``maxOccurs`` definitions other than allowed formats are
raised as invalid schemas.
.. seealso::
:meth:`test_valid_io_min_max_occurs_as_str_or_int`
"""
cwl = {
"cwlVersion": "v1.0",
"class": "CommandLineTool",
"inputs": [{}], # updated after
"outputs": {"values": {"type": "string"}}
}
body = {
"processDescription": {
"process": {
"id": self._testMethodName,
"title": "some title",
"abstract": "this is a test",
},
"inputs": [{}] # updated after
},
"deploymentProfileName": "http://www.opengis.net/profiles/eoc/wpsApplication",
"executionUnit": [{"unit": cwl}],
}

# replace by invalid min/max and check that it raises
cwl["inputs"][0] = {"id": "test", "type": {"type": "array", "items": "string"}}
body["processDescription"]["inputs"][0] = {"id": "test", "minOccurs": [1], "maxOccurs": 1}
with self.assertRaises(colander.Invalid):
desc, _ = self.deploy_process(body)
self.fail("Invalid input minOccurs schema definition should have been raised")

cwl["inputs"][0] = {"id": "test", "type": {"type": "array", "items": "string"}}
body["processDescription"]["inputs"][0] = {"id": "test", "minOccurs": 1, "maxOccurs": 3.1416}
with self.assertRaises(HTTPBadRequest):
desc, _ = self.deploy_process(body)
self.fail("Invalid input maxOccurs schema definition should have been raised")

def test_complex_io_from_package(self):
"""
Test validates that complex I/O definitions *only* defined in the `CWL` package as `JSON` within the
Expand Down
51 changes: 30 additions & 21 deletions tests/test_opensearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
COLLECTION_IDS = {
"sentinel2": "EOP:IPT:Sentinel2",
"probav": "EOP:VITO:PROBAV_P_V001",
# "deimos": "DE2_PS3_L1C",
"deimos": "DE2_PS3_L1C",
}


Expand Down Expand Up @@ -116,7 +116,7 @@ def make_deque(id_, value):
make_deque(OPENSEARCH_START_DATE, "2018-01-30T00:00:00.000Z"),
make_deque(OPENSEARCH_END_DATE, "2018-01-31T23:59:59.999Z"),
make_deque(OPENSEARCH_AOI, "100.4,15.3,104.6,19.3"),
make_deque("files", "EOP:IPT:Sentinel2"),
make_deque("files", COLLECTION_IDS["sentinel2"]),
make_deque("output_file_type", "GEOTIFF"),
make_deque("output_name", "stack_result.tif"),
]
Expand All @@ -134,7 +134,7 @@ def make_deque(id_, value):
)

with mock.patch.object(opensearch.OpenSearchQuery, "query_datasets", return_value=mocked_query):
eo_image_source_info = make_eo_image_source_info("files", "EOP:IPT:Sentinel2")
eo_image_source_info = make_eo_image_source_info("files", COLLECTION_IDS["sentinel2"])
mime_types = {'files': eo_image_source_info['files']['mime_types']}
transformed = opensearch.query_eo_images_from_wps_inputs(inputs, eo_image_source_info, mime_types)

Expand Down Expand Up @@ -241,19 +241,17 @@ def test_get_additional_parameters():
assert ("UniqueTOI", ["true"]) in params


@pytest.mark.online
def test_get_template_urls():
def get_template_urls(collection_id):
all_fields = set()
for name, collection_id in COLLECTION_IDS.items():
o = opensearch.OpenSearchQuery(collection_id, osdd_url=OSDD_URL)
template = o.get_template_url()
params = parse_qsl(urlparse(template).query)
param_names = list(sorted(p[0] for p in params))
if all_fields:
# noinspection PyUnresolvedReferences
all_fields = all_fields.intersection(param_names)
else:
all_fields.update(param_names)
o = opensearch.OpenSearchQuery(collection_id, osdd_url=OSDD_URL)
template = o.get_template_url()
params = parse_qsl(urlparse(template).query)
param_names = list(sorted(p[0] for p in params))
if all_fields:
# noinspection PyUnresolvedReferences
all_fields = all_fields.intersection(param_names)
else:
all_fields.update(param_names)

fields_in_all_queries = list(sorted(all_fields))
expected = [
Expand All @@ -274,6 +272,17 @@ def test_get_template_urls():
assert not set(expected) - set(fields_in_all_queries)


@pytest.mark.online
def test_get_template_sentinel2():
get_template_urls(COLLECTION_IDS["sentinel2"])


@pytest.mark.xfail(reason="Collection 'probav' dataset series cannot be found.")
@pytest.mark.online
def test_get_template_probav():
get_template_urls(COLLECTION_IDS["probav"])


def inputs_unique_aoi_toi(files_id):
return {
OPENSEARCH_AOI: deque([LiteralInput(OPENSEARCH_AOI, "Area", data_type="string")]),
Expand Down Expand Up @@ -318,13 +327,13 @@ def sentinel2_inputs(unique_aoi_toi=True):
else:
inputs = inputs_non_unique_aoi_toi(sentinel_id)

inputs[sentinel_id][0].data = "EOP:IPT:Sentinel2"
inputs[sentinel_id][0].data = COLLECTION_IDS["sentinel2"]
inputs[end_date][0].data = u"2018-01-31T23:59:59.999Z"
inputs[start_date][0].data = u"2018-01-30T00:00:00.000Z"
# inputs[aoi][0].data = u"POLYGON ((100 15, 104 15, 104 19, 100 19, 100 15))"
inputs[aoi][0].data = u"100.0, 15.0, 104.0, 19.0"

eo_image_source_info = make_eo_image_source_info(sentinel_id, "EOP:IPT:Sentinel2")
eo_image_source_info = make_eo_image_source_info(sentinel_id, COLLECTION_IDS["sentinel2"])
return inputs, eo_image_source_info


Expand All @@ -336,14 +345,14 @@ def probav_inputs(unique_aoi_toi=True):
else:
inputs = inputs_non_unique_aoi_toi(probav_id)

inputs[probav_id][0].data = "EOP:VITO:PROBAV_P_V001"
inputs[probav_id][0].data = COLLECTION_IDS["probav"]
inputs[end_date][0].data = u"2018-01-31T23:59:59.999Z"
inputs[start_date][0].data = u"2018-01-30T00:00:00.000Z"
# inputs[aoi][0].data = u"POLYGON ((100 15, 104 15, 104 19, 100 19, 100 15))"
inputs[aoi][0].data = u"100.0, 15.0, 104.0, 19.0"

eo_image_source_info = make_eo_image_source_info(
probav_id, "EOP:VITO:PROBAV_P_V001"
probav_id, COLLECTION_IDS["probav"]
)

return inputs, eo_image_source_info
Expand All @@ -367,13 +376,13 @@ def deimos_inputs(unique_aoi_toi=True):
end_date, start_date, aoi = query_param_names(unique_aoi_toi, deimos_id)
inputs = inputs_unique_aoi_toi(deimos_id)

inputs[deimos_id][0].data = "DE2_PS3_L1C"
inputs[deimos_id][0].data = COLLECTION_IDS["deimos"]
inputs[start_date][0].data = u"2008-01-01T00:00:00Z"
inputs[end_date][0].data = u"2009-01-01T00:00:00Z"
# inputs[aoi][0].data = u"MULTIPOINT ((-117 32), (-115 34))"
inputs[aoi][0].data = u"-117, 32, -115, 34"

eo_image_source_info = make_eo_image_source_info(deimos_id, "DE2_PS3_L1C")
eo_image_source_info = make_eo_image_source_info(deimos_id, COLLECTION_IDS["deimos"])
return inputs, eo_image_source_info


Expand Down
Loading

0 comments on commit cd61545

Please sign in to comment.