diff --git a/.github/workflows/test_package.yaml b/.github/workflows/test_package.yaml index e239c4b..e39cee8 100644 --- a/.github/workflows/test_package.yaml +++ b/.github/workflows/test_package.yaml @@ -11,6 +11,8 @@ jobs: steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: @@ -34,4 +36,8 @@ jobs: SSO_CLIENT_SECRET: ${{ secrets.SSO_CLIENT_SECRET }} ENVIRONMENT: ${{ vars.ENVIRONMENT }} run: | - pytest tests --retries 2 + pytest tests --cov=runregistry --cov-report xml --retries 2 + - name: Upload results to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/README.md b/README.md index b768b23..a7c7f69 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ ![Build Status](https://github.com/cms-DQM/runregistry_api_client/actions/workflows/test_package.yaml/badge.svg) +[![codecov](https://codecov.io/github/cms-DQM/runregistry_api_client/graph/badge.svg?token=IRADJ57684)](https://codecov.io/github/cms-DQM/runregistry_api_client) +[![PyPI version](https://badge.fury.io/py/runregistry.png)](https://badge.fury.io/py/runregistry) # Run Registry Client @@ -389,17 +391,17 @@ generated_json = runregistry.create_json(json_logic=json_logic, dataset_name_fil You can also manipulate runs via API: 1. Mark run significant: - ```python - runregistry.make_significant_runs(run=362761) - ``` + ```python + runregistry.make_significant_runs(run=362761) + ``` 2. Reset RR attributes and reload data from OMS: - ```python - runregistry.reset_RR_attributes_and_refresh_runs(run=362761) - ``` + ```python + runregistry.reset_RR_attributes_and_refresh_runs(run=362761) + ``` 3. Move runs from one state to another: - ```python - runregistry.move_runs("OPEN", "SIGNOFF", run=362761) - ``` + ```python + runregistry.move_runs("OPEN", "SIGNOFF", run=362761) + ``` ## Troubleshooting @@ -414,6 +416,7 @@ python3 -m pip install --upgrade pip build twine python3 -m build python3 -m twine upload --skip-existing --repository pypi dist/* ``` + Instructions from [here](https://packaging.python.org/en/latest/tutorials/packaging-projects/). ## Running the tests @@ -451,12 +454,12 @@ No. ### Should I be using `runregistry_api_client` for getting OMS data? -No*. +No\*. Our recommendation is to query Run Registry only for data that RR is responsible for. -*It's not that you can't, it's just that this puts extra burden on the application, making it slow for everyone. +\*It's not that you can't, it's just that this puts extra burden on the application, making it slow for everyone. ### Is the token stored somewhere and reused? -No, almost every function call gets a new token. This is not ideal, and it may be improved in the future. \ No newline at end of file +No, almost every function call gets a new token. This is not ideal, and it may be improved in the future. diff --git a/runregistry/runregistry.py b/runregistry/runregistry.py index b973bad..4a387b1 100644 --- a/runregistry/runregistry.py +++ b/runregistry/runregistry.py @@ -2,7 +2,6 @@ import time import json import requests -import warnings from dotenv import load_dotenv from cernrequests import get_api_token, get_with_token from runregistry.utils import ( @@ -66,6 +65,10 @@ def setup(target): api_url = "https://cmsrunregistry.web.cern.ch/api" use_cookies = True target_application = "webframeworks-paas-cmsrunregistry" + else: + raise Exception( + f'Invalid setup target "{target}". Valid options: "local", "development", "production".' + ) def _get_headers(token: str = ""): @@ -345,25 +348,6 @@ def get_joint_lumisection_ranges(run_number, dataset_name="online", **kwargs): return _get_lumisection_helper(url, run_number, dataset_name, **kwargs) -# DO NOT USE Using compiler (not-safe): -def generate_json(json_logic, **kwargs): - """ - DO NOT USE, USE THE ONE BELOW (create_json)... - It receives a json logic configuration and returns a json with lumisections which pass the filter - """ - warnings.warn( - "The generate_json is unsafe and will be deprecated. Please use create_json instead", - PendingDeprecationWarning, - ) - if not isinstance(json_logic, str): - json_logic = json.dumps(json_logic) - url = "{}/json_creation/generate".format(api_url) - headers = _get_headers(token=_get_token()) - payload = json.dumps({"json_logic": json_logic}) - response = requests.post(url, headers=headers, data=payload).json() - return response["final_json"] - - # Using json portal (safe): def create_json(json_logic, dataset_name_filter, **kwargs): """ @@ -406,6 +390,15 @@ def create_json(json_logic, dataset_name_filter, **kwargs): ) +def get_datasets_accepted(): + """ + Method for fetching current datasets accepted in Offline Run Registry + """ + url = "{}/datasets_accepted".format(api_url) + headers = _get_headers(token=_get_token()) + return requests.get(url, headers=headers).json() + + # advanced RR operations ============================================================================== # Online Table def move_runs(from_, to_, run=None, runs=[], **kwargs): @@ -426,14 +419,17 @@ def move_runs(from_, to_, run=None, runs=[], **kwargs): headers = _get_headers(token=_get_token()) if run: - payload = json.dumps({"run_number": run}) - return requests.post(url, headers=headers, data=payload) + runs = [run] answers = [] for run_number in runs: payload = json.dumps({"run_number": run_number}) answer = requests.post(url, headers=headers, data=payload) - answers.append(answer) + if answer.status_code != 200: + raise Exception( + f"Got response {answer.status_code} when moving datasets: {answer.text}" + ) + answers.append(answer.json()) return answers @@ -448,18 +444,20 @@ def make_significant_runs(run=None, runs=[], **kwargs): ) url = "%s/runs/mark_significant" % (api_url) - headers = _get_headers(token=_get_token()) if run: - data = {"run_number": run} - return requests.post(url, headers=headers, json=data) + runs = [run] answers = [] for run_number in runs: data = {"run_number": run} answer = requests.post(url, headers=headers, json=data) - answers.append(answer) + if answer.status_code != 200: + raise Exception( + f"Got response {answer.status_code} when making runs significant: {answer.text}" + ) + answers.append(answer.json()) return answers @@ -478,7 +476,11 @@ def reset_RR_attributes_and_refresh_runs(runs=[], **kwargs): for run_number in runs: url = "%s/runs/reset_and_refresh_run/%d" % (api_url, run_number) answer = requests.post(url, headers=headers) - answers.append(answer) + if answer.status_code != 200: + raise Exception( + f"Got response {answer.status_code} when resetting and refreshing runs: {answer.text}" + ) + answers.append(answer.json()) return answers @@ -500,7 +502,11 @@ def manually_refresh_components_statuses_for_runs(runs=[], **kwargs): for run_number in runs: url = "%s/runs/refresh_run/%d" % (api_url, run_number) answer = requests.post(url, headers=headers) - answers.append(answer) + if answer.status_code != 200: + raise Exception( + f"Got response {answer.status_code} when manually refreshing component statuses: {answer.text}" + ) + answers.append(answer.json()) return answers @@ -520,7 +526,7 @@ def edit_rr_lumisections( WIP edit RR lumisections attributes """ if status not in LUMISECTION_STATES: - raise Exception( + raise ValueError( f"edit_rr_lumisections(): got status '{status}'", f" but allowed statuses are {LUMISECTION_STATES}", ) @@ -542,7 +548,12 @@ def edit_rr_lumisections( "component": component, } ) - return requests.put(url, headers=headers, data=payload) + answer = requests.put(url, headers=headers, data=payload) + if answer.status_code != 200: + raise Exception( + f"Got response {answer.status_code} when editing rr lumisections: {answer.text}" + ) + return answer.json() def move_datasets( @@ -566,10 +577,7 @@ def move_datasets( headers = _get_headers(token=_get_token()) if run: - payload = json.dumps( - {"run_number": run, "dataset_name": dataset_name, "workspace": workspace} - ) - return [requests.post(url, headers=headers, data=payload)] + runs = [run] answers = [] for run_number in runs: @@ -580,8 +588,12 @@ def move_datasets( "workspace": workspace, } ) - answer = requests.post(url, headers=headers, data=payload).json() - answers.append(answer) + answer = requests.post(url, headers=headers, data=payload) + if answer.status_code != 200: + raise Exception( + f"Got response {answer.status_code} when moving datasets: {answer.text}" + ) + answers.append(answer.json()) return answers @@ -603,30 +615,26 @@ def _execute_request_for_single_run(run_number, new_class): ) if not isinstance(new_class, str): - raise Exception('Invalid input for "new_class"') + raise ValueError(f'Invalid input "{new_class}" for "new_class"') answers = [] + # If just one int provided, make it into a list + if isinstance(run_numbers, int): + run_numbers = [run_numbers] + if isinstance(run_numbers, list): for run_number in run_numbers: if not isinstance(run_number, int): - raise Exception( + raise ValueError( "Invalid run number value found in run_numbers. Please provide a list of numbers." ) - answers.append(_execute_request_for_single_run(run_number, new_class)) - elif isinstance(run_numbers, int): - answers.append(_execute_request_for_single_run(run_numbers, new_class)) + answer = _execute_request_for_single_run(run_number, new_class) + if answer.status_code != 200: + raise Exception( + f"Got response {answer.status_code} when changing run class: {answer.text}" + ) + answers.append(answer.json()) else: - raise Exception( + raise ValueError( 'Invalid input for "run_numbers". Please provide a list of numbers.' ) return answers - - -def get_datasets_accepted(): - """ - Method for fetching current datasets accepted in Offline Run Registry - """ - url = "{}/datasets_accepted".format(api_url) - headers = _get_headers(token=_get_token()) - if os.getenv("ENVIRONMENT") in ["development", "local"]: - print(url) - return requests.get(url, headers=headers).json() diff --git a/tests/test_advanced_rr_operations.py b/tests/test_advanced_rr_operations.py index 887f400..f449baf 100644 --- a/tests/test_advanced_rr_operations.py +++ b/tests/test_advanced_rr_operations.py @@ -5,6 +5,7 @@ logger = logging.getLogger(__name__) VALID_RUN_NUMBER = 362874 VALID_DATASET_NAME = "/PromptReco/Commissioning2021/DQM" +EGROUPS_ERROR = "User needs to be part of any of the following e-groups" @pytest.fixture @@ -13,110 +14,201 @@ def setup_runregistry(): runregistry.setup("development") -def test_move_datasets(setup_runregistry): - answers = runregistry.move_datasets( - from_=runregistry.WAITING_DQM_GUI_CONSTANT, - to_="OPEN", - dataset_name=VALID_DATASET_NAME, - run=VALID_RUN_NUMBER, - workspace="global", - ) +def test_advanced_move_datasets(setup_runregistry): + with pytest.raises(Exception) as e: + answers = runregistry.move_datasets( + from_=runregistry.WAITING_DQM_GUI_CONSTANT, + to_="OPEN", + dataset_name=VALID_DATASET_NAME, + run=VALID_RUN_NUMBER, + workspace="global", + ) + assert EGROUPS_ERROR in e.exconly() # TODO: Run also with a token that has permission - assert all([answer.status_code == 401 for answer in answers]) - answers = runregistry.move_datasets( - from_="OPEN", - to_="SIGNOFF", - dataset_name=VALID_DATASET_NAME, - run=VALID_RUN_NUMBER, - workspace="ctpps", - ) - # Requires permission - assert all([answer.status_code == 401 for answer in answers]) + with pytest.raises(Exception) as e: + # Requires permission + answers = runregistry.move_datasets( + from_="OPEN", + to_="SIGNOFF", + dataset_name=VALID_DATASET_NAME, + run=VALID_RUN_NUMBER, + workspace="ctpps", + ) + assert EGROUPS_ERROR in e.exconly() + + +def test_advanced_move_datasets_bad_from(setup_runregistry): + with pytest.raises(ValueError): + answers = runregistry.move_datasets( + from_="MPAMIES", + to_="SIGNOFF", + dataset_name=VALID_DATASET_NAME, + run=VALID_RUN_NUMBER, + workspace="global", + ) + + +def test_advanced_move_datasets_no_run(setup_runregistry): + with pytest.raises(ValueError): + answers = runregistry.move_datasets( + from_="MPAMIES", + to_="SIGNOFF", + dataset_name=VALID_DATASET_NAME, + workspace="global", + ) -def test_make_significant_single_run(setup_runregistry): +def test_advanced_move_datasets_bad_to(setup_runregistry): + with pytest.raises(ValueError): + answers = runregistry.move_datasets( + from_=runregistry.WAITING_DQM_GUI_CONSTANT, + to_="AGKINARES", + dataset_name=VALID_DATASET_NAME, + run=VALID_RUN_NUMBER, + workspace="global", + ) + + +def test_advanced_make_significant_single_run(setup_runregistry): # Get latest run in dev runregistry and make it significant run = runregistry.get_runs(limit=1, filter={})[0] - answer = runregistry.make_significant_runs(run=run["run_number"]) - # requires permission - assert answer.status_code == 401 + with pytest.raises(Exception) as e: + # requires permission + runregistry.make_significant_runs(run=run["run_number"]) + assert EGROUPS_ERROR in e.exconly() -def test_make_significant_multi_runs(setup_runregistry): +def test_advanced_make_significant_multi_runs(setup_runregistry): # Get latest run in dev runregistry and make it significant run = runregistry.get_runs(limit=1, filter={})[0] - answers = runregistry.make_significant_runs(runs=[run["run_number"]]) - # requires permission - assert all([answer.status_code == 401 for answer in answers]) - - -def test_reset_RR_attributes_and_refresh_runs_signed_off(setup_runregistry): - answers = runregistry.reset_RR_attributes_and_refresh_runs(runs=VALID_RUN_NUMBER) - # Cannot refresh runs which are not open - assert all( - [ - answer.status_code == 500 and "Run must be in state OPEN" in answer.text - for answer in answers - ] - ) + with pytest.raises(Exception) as e: + # requires permission + runregistry.make_significant_runs(runs=[run["run_number"]]) + assert EGROUPS_ERROR in e.exconly() -def test_manually_refresh_components_statuses_for_runs_open(setup_runregistry): +def test_advanced_make_significant_no_runs(setup_runregistry): run = runregistry.get_runs(limit=1, filter={})[0] - answers = runregistry.manually_refresh_components_statuses_for_runs( - runs=run["run_number"] - ) - assert all([answer.status_code == 200 for answer in answers]) + with pytest.raises(ValueError): + # Required args missing + runregistry.make_significant_runs() + + +def test_advanced_reset_RR_attributes_and_refresh_runs_signed_off(setup_runregistry): + with pytest.raises(Exception) as e: + # Cannot refresh runs which are not open + answers = runregistry.reset_RR_attributes_and_refresh_runs( + runs=VALID_RUN_NUMBER + ) + assert "Run must be in state OPEN" in repr(e) + + +def test_advanced_reset_RR_attributes_and_refresh_runs_no_run(setup_runregistry): + with pytest.raises(ValueError) as e: + # Cannot refresh runs which are not open + answers = runregistry.reset_RR_attributes_and_refresh_runs() -def test_reset_RR_attributes_and_refresh_runs_open(setup_runregistry): +def test_advanced_reset_RR_attributes_and_refresh_runs_open(setup_runregistry): run = runregistry.get_runs(limit=1, filter={})[0] answers = runregistry.reset_RR_attributes_and_refresh_runs(runs=run["run_number"]) - assert all([answer.status_code == 200 for answer in answers]) + assert isinstance(answers, list) -def test_manually_refresh_components_statuses_for_runs_signed_off(setup_runregistry): +def test_advanced_manually_refresh_components_statuses_for_no_runs(setup_runregistry): + with pytest.raises(ValueError): + # Missing argument + runregistry.manually_refresh_components_statuses_for_runs() + + +def test_advanced_manually_refresh_components_statuses_for_runs_open(setup_runregistry): + run = runregistry.get_runs(limit=1, filter={})[0] + # Currently, manual refresh does not need special permissions?? answers = runregistry.manually_refresh_components_statuses_for_runs( - runs=VALID_RUN_NUMBER - ) - # Cannot refresh runs which are not open - assert all( - [ - answer.status_code == 500 and "Run must be in state OPEN" in answer.text - for answer in answers - ] + runs=run["run_number"] ) + assert isinstance(answers, list) -def test_move_runs_no_run_arg(setup_runregistry): +def test_advanced_manually_refresh_components_statuses_for_runs_signed_off( + setup_runregistry, +): + with pytest.raises(Exception) as e: + runregistry.manually_refresh_components_statuses_for_runs(runs=VALID_RUN_NUMBER) + assert "Run must be in state OPEN" in e.exconly() + + +def test_advanced_move_runs_no_run_arg(setup_runregistry): with pytest.raises(ValueError): # Raises ValueError runregistry.move_runs("OPEN", "SIGNOFF") -def test_move_single_run(setup_runregistry): - """ - Unfortunately, this function was given a dual signature, and can return - both a single or a list of request responses. - """ - answer = runregistry.move_runs("OPEN", "SIGNOFF", run=VALID_RUN_NUMBER) - # Requires permission - assert answer.status_code == 401 +def test_advanced_move_single_run(setup_runregistry): + with pytest.raises(Exception) as e: + # Requires permission + runregistry.move_runs("OPEN", "SIGNOFF", run=VALID_RUN_NUMBER) + assert "User needs to be part of any of the following e-groups" in e.exconly() + + +def test_advanced_move_multi_runs(setup_runregistry): + with pytest.raises(Exception) as e: + # Requires permission + runregistry.move_runs("OPEN", "SIGNOFF", runs=[VALID_RUN_NUMBER]) + assert EGROUPS_ERROR in e.exconly() -def test_move_multi_runs(setup_runregistry): +def test_advanced_move_runs_invalid(setup_runregistry): + with pytest.raises(ValueError): + # Requires permission + runregistry.move_runs("!!!!", "???", runs=[VALID_RUN_NUMBER]) + + +def test_advanced_edit_rr_lumisections_good(setup_runregistry): + with pytest.raises(Exception) as e: + # Requires permission + runregistry.edit_rr_lumisections( + VALID_RUN_NUMBER, 0, 1, "castor-castor", "GOOD" + ) + assert "User needs to be part of any of the following e-groups" in e.exconly() + + +def test_advanced_edit_rr_lumisections_bad(setup_runregistry): + with pytest.raises(ValueError): + # Requires permission + runregistry.edit_rr_lumisections( + VALID_RUN_NUMBER, 0, 1, "castor-castor", "KALHSPERA STHN PAREA ;)" + ) + + +def test_advanced_change_run_class_list(setup_runregistry): + with pytest.raises(Exception) as e: + # Requires permission + runregistry.change_run_class(run_numbers=[VALID_RUN_NUMBER], new_class="test") + assert "User needs to be part of any of the following e-groups" in e.exconly() + + +def test_advanced_change_run_class_int(setup_runregistry): """ - Unfortunately, this function was given a dual signature, and can return - both a single or a list of request responses. + Current behavior is to accept both a list and an int as run_numbers """ - answers = runregistry.move_runs("OPEN", "SIGNOFF", runs=[VALID_RUN_NUMBER]) - # Requires permission - assert all([answer.status_code == 401 for answer in answers]) + with pytest.raises(Exception) as e: + # Still Requires permission + runregistry.change_run_class(run_numbers=VALID_RUN_NUMBER, new_class="test") + assert "User needs to be part of any of the following e-groups" in e.exconly() -def test_edit_rr_lumisections(setup_runregistry): - answer = runregistry.edit_rr_lumisections( - VALID_RUN_NUMBER, 0, 1, "castor-castor", "GOOD" - ) - # Requires permission - assert answer.status_code == 401 +def test_advanced_change_run_class_list_with_bad_run_types(setup_runregistry): + with pytest.raises(ValueError) as e: + runregistry.change_run_class(run_numbers=["3455555"], new_class="test") + + +def test_advanced_change_run_class_bad_run_numbers(setup_runregistry): + with pytest.raises(ValueError): + runregistry.change_run_class(run_numbers="3455555", new_class="test") + + +def test_advanced_change_run_class_bad_new_class_type(setup_runregistry): + with pytest.raises(ValueError): + # Requires permission + runregistry.change_run_class(run_numbers=[VALID_RUN_NUMBER], new_class=1) diff --git a/tests/test_client.py b/tests/test_client.py index 93bc5da..aba11b2 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,6 +1,6 @@ import json -import warnings - +import pytest +import runregistry from runregistry.runregistry import ( get_run, get_runs, @@ -12,7 +12,9 @@ get_lumisection_ranges, get_oms_lumisection_ranges, get_joint_lumisection_ranges, - generate_json, + get_cycles, + get_lumisection_ranges_by_component, + get_datasets_accepted, create_json, setup, ) @@ -26,7 +28,7 @@ INVALID_DATASET_NAME = "/PromptKikiriko/HELLOCosmics18Z/DQM" -def test_get_run(): +def test_client_get_run(): run_number = VALID_RUN_NUMBER run = get_run(run_number=VALID_RUN_NUMBER) assert run["run_number"] == VALID_RUN_NUMBER @@ -36,7 +38,13 @@ def test_get_run(): assert not run -def test_get_runs(): +def test_client_get_runs_no_filter(): + with pytest.raises(Exception) as e: + runs = get_runs() + assert "must pass a filter" in e.exconly() + + +def test_client_get_runs(): # Gets runs between run number VALID_RUN_RANGE_START and VALID_RUN_RANGE_STOP filter_run = { "run_number": { @@ -69,7 +77,7 @@ def test_get_runs(): assert len(runs) > 0 -def test_get_runs_with_ignore_filter(): +def test_client_get_runs_with_ignore_filter(): filter_run = { "run_number": { "and": [{">": VALID_RUN_RANGE_START}, {"<": VALID_RUN_RANGE_STOP}] @@ -81,7 +89,7 @@ def test_get_runs_with_ignore_filter(): assert len(runs) > 0 -def test_get_datasets_with_ignore_filter(): +def test_client_get_datasets_with_ignore_filter(): # datasets = get_datasets(filter={ # "run_number": { # "and": [{ @@ -121,12 +129,12 @@ def test_get_datasets_with_ignore_filter(): assert len(datasets) > 0 -# test_get_datasets_with_ignore_filter() +# test_client_get_datasets_with_ignore_filter() -# test_get_runs_with_ignore_filter() +# test_client_get_runs_with_ignore_filter() -def test_get_runs_not_compressed(): +def test_client_get_runs_not_compressed(): runs = get_runs( filter={ "run_number": { @@ -156,12 +164,12 @@ def get_runs_with_combined_filter(): assert len(runs) > 0 -def test_get_dataset_names_of_run(): +def test_client_get_dataset_names_of_run(): dataset_names = get_dataset_names_of_run(run_number=VALID_RUN_NUMBER) assert len(dataset_names) > 0 -def test_get_dataset(): +def test_client_get_dataset(): dataset = get_dataset(run_number=VALID_RUN_NUMBER, dataset_name=VALID_DATASET_NAME) assert dataset["run_number"] == VALID_RUN_NUMBER assert dataset["name"] == VALID_DATASET_NAME @@ -171,54 +179,87 @@ def test_get_dataset(): assert not dataset -def test_get_datasets(): +def test_client_get_datasets_no_limit(): datasets = get_datasets( + compress_attributes=True, filter={ "run_number": { "and": [{">": VALID_RUN_RANGE_START}, {"<": VALID_RUN_RANGE_STOP}] } - } + }, + ) + assert len(datasets) > 0 + assert "Run" not in datasets[0] + + +def test_client_get_datasets_no_compression(): + datasets = get_datasets( + compress_attributes=False, + filter={ + "run_number": { + "and": [{">": VALID_RUN_RANGE_START}, {"<": VALID_RUN_RANGE_STOP}] + } + }, ) assert len(datasets) > 0 + assert "Run" in datasets[0] -def test_get_lumisections(): +def test_client_get_datasets_with_limit(): + datasets = get_datasets( + limit=5, + filter={ + "run_number": { + "and": [{">": VALID_RUN_RANGE_START}, {"<": VALID_RUN_RANGE_STOP}] + } + }, + ) + assert len(datasets) > 0 + + +def test_client_get_datasets_no_filters(): + with pytest.raises(Exception) as e: + datasets = get_datasets() + assert "must pass a filter" in e.exconly() + + +def test_client_get_lumisections(): lumisections = get_lumisections(VALID_RUN_NUMBER, VALID_DATASET_NAME) assert len(lumisections) > 0 -def test_get_oms_lumisections(): +def test_client_get_oms_lumisections(): lumisections = get_oms_lumisections(VALID_RUN_NUMBER) assert len(lumisections) > 0 dataset_lumisections = get_oms_lumisections(VALID_RUN_NUMBER, VALID_DATASET_NAME) assert len(dataset_lumisections) > 0 -def test_get_lumisection_ranges(): +def test_client_get_lumisection_ranges(): lumisections = get_lumisection_ranges(VALID_RUN_NUMBER, VALID_DATASET_NAME) assert len(lumisections) > 0 -def test_get_oms_lumisection_ranges(): +def test_client_get_oms_lumisection_ranges(): lumisections = get_oms_lumisection_ranges(VALID_RUN_NUMBER) assert len(lumisections) > 0 -def test_get_joint_lumisection_ranges(): +def test_client_get_joint_lumisection_ranges(): lumisections = get_joint_lumisection_ranges(VALID_RUN_NUMBER, VALID_DATASET_NAME) assert len(lumisections) > 0 -def test_get_collisions18(): +def test_client_get_collisions18(): runs = get_runs(filter={"class": "Collisions18"}) assert len(runs) > 0 -def test_get_or_run(): +def test_client_get_or_run(): runs = get_runs(filter={"run_number": {"or": [VALID_RUN_NUMBER]}}) -def test_get_datasets_with_filter(): +def test_client_get_datasets_with_filter(): datasets = get_datasets( filter={ "run_number": { @@ -230,143 +271,6 @@ def test_get_datasets_with_filter(): assert len(datasets) > 0 -def test_generate_json(): - # https://docs.python.org/3/library/warnings.html#testing-warnings - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - generate_json( - """ -{ - "and": [ - { - "or": [ - { - "==": [ - { - "var": "dataset.name" - }, - "/PromptReco/Collisions2018A/DQM" - ] - } - ] - } - ] -} - """ - ) - assert len(w) == 1 - assert issubclass(w[-1].category, PendingDeprecationWarning) - assert "deprecated" in str(w[-1].message) - - -# UNSAFE: -# def test_generate_json(): -# json_logic = """ -# { -# "and": [ -# { -# "or": [ -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018A/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018B/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018C/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018D/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018E/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018F/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018G/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018H/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018I/DQM"]} -# ] -# }, -# { ">=": [{ "var": "run.oms.energy" }, 6000] }, -# { "<=": [{ "var": "run.oms.energy" }, 7000] }, -# { ">=": [{ "var": "run.oms.b_field" }, 3.7] }, -# { "in": [ "25ns", { "var": "run.oms.injection_scheme" }] }, -# { "==": [{ "in": [ "WMass", { "var": "run.oms.hlt_key" }] }, false] }, - -# { "==": [{ "var": "lumisection.rr.dt-dt" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.csc-csc" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.l1t-l1tmu" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.l1t-l1tcalo" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.hlt-hlt" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.tracker-pixel" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.tracker-strip" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.tracker-track" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.ecal-ecal" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.ecal-es" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.hcal-hcal" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.muon-muon" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.jetmet-jetmet" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.lumi-lumi" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.dc-lowlumi" }, "BAD"] }, - -# { "==": [{ "var": "lumisection.oms.cms_active" }, true] }, -# { "==": [{ "var": "lumisection.oms.bpix_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.fpix_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.tibtid_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.tecm_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.tecp_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.tob_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.ebm_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.ebp_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.eem_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.eep_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.esm_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.esp_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.hbhea_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.hbheb_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.hbhec_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.hf_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.ho_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.dtm_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.dtp_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.dt0_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.cscm_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.cscp_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.rpc_ready" }, true] }, -# { "==": [{ "var": "lumisection.oms.beam1_present" }, true] }, -# { "==": [{ "var": "lumisection.oms.beam2_present" }, true] }, -# { "==": [{ "var": "lumisection.oms.beam1_stable" }, true] }, -# { "==": [{ "var": "lumisection.oms.beam2_stable" }, true] } -# ] -# } -# """ -# UNSAFE: -# final_json = generate_json(json_logic) -# assert final_json is not None -# json_logic2 = { -# "and": [ -# { -# "or": [ -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018A/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018B/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018C/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018D/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018E/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018F/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018G/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018H/DQM"]}, -# {"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018I/DQM"]} -# ] -# }, -# { ">=": [{ "var": "run.oms.energy" }, 6000] }, -# { "<=": [{ "var": "run.oms.energy" }, 7000] }, -# { ">=": [{ "var": "run.oms.b_field" }, 3.7] }, -# { "in": [ "25ns", { "var": "run.oms.injection_scheme" }] }, -# { "==": [{ "in": [ "WMass", { "var": "run.oms.hlt_key" }] }, False] }, - -# { "==": [{ "var": "lumisection.rr.dt-dt" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.csc-csc" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.l1t-l1tmu" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.l1t-l1tcalo" }, "GOOD"] }, -# { "==": [{ "var": "lumisection.rr.hlt-hlt" }, "GOOD"] }, - -# { "==": [{ "var": "lumisection.oms.bpix_ready" }, True] } -# ] -# } -# final_json2 = generate_json(json_logic2) - -# assert final_json2 is not None - json_logic = { "and": [ {">=": [{"var": "run.oms.energy"}, 6000]}, @@ -384,14 +288,14 @@ def test_generate_json(): } -def test_create_json(): +def test_client_create_json(): json = create_json( json_logic=json_logic, dataset_name_filter="/PromptReco/Collisions2018A/DQM" ) print(json) -def test_custom_filter(): +def test_client_custom_filter(): filter_arg = { "dataset_name": {"like": "%/PromptReco/Cosmics18CRUZET%"}, "run_number": { @@ -404,3 +308,47 @@ def test_custom_filter(): datasets = get_datasets(filter=filter_arg) assert datasets + + +def test_client_setup_random(): + with pytest.raises(Exception) as e: + setup("Olo kaskarikes mas kaneis") + assert "Invalid setup target" in e.exconly() + + +def test_client_setup_production(): + setup("production") + assert "https://cmsrunregistry" in runregistry.runregistry.api_url + + +def test_client_setup_development(): + setup("development") + assert "https://dev-cmsrunregistry" in runregistry.runregistry.api_url + + +def test_client_get_cycles(): + answers = get_cycles() + assert len(answers) > 0 + assert "id_cycle" in answers[0] + assert "cycle_name" in answers[0] + assert "cycle_attributes" in answers[0] + assert "deadline" in answers[0] + assert "datasets" in answers[0] + + +def test_client_get_lumisection_ranges_by_component(): + answer = get_lumisection_ranges_by_component(VALID_RUN_NUMBER) + assert len(answer.keys()) > 0 + assert "dt-dt" in answer + assert "rpc-hv" in answer + assert "tracker-track_private" in answer + + +def test_client_get_datasets_accepted(): + answers = get_datasets_accepted() + assert len(answers) > 0 + assert isinstance(answers[0], dict) + assert "regexp" in answers[0] + assert "id" in answers[0] + assert "name" in answers[0] + assert "class" in answers[0]