Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make all methods' return values uniform #11

Merged
merged 6 commits into from
Oct 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion .github/workflows/test_package.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ jobs:

steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
Expand All @@ -34,4 +36,8 @@ jobs:
SSO_CLIENT_SECRET: ${{ secrets.SSO_CLIENT_SECRET }}
ENVIRONMENT: ${{ vars.ENVIRONMENT }}
run: |
pytest tests --retries 2
pytest tests --cov=runregistry --cov-report xml --retries 2
- name: Upload results to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
27 changes: 15 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
![Build Status](https://github.com/cms-DQM/runregistry_api_client/actions/workflows/test_package.yaml/badge.svg)
[![codecov](https://codecov.io/github/cms-DQM/runregistry_api_client/graph/badge.svg?token=IRADJ57684)](https://codecov.io/github/cms-DQM/runregistry_api_client)
[![PyPI version](https://badge.fury.io/py/runregistry.png)](https://badge.fury.io/py/runregistry)

# Run Registry Client

Expand Down Expand Up @@ -389,17 +391,17 @@ generated_json = runregistry.create_json(json_logic=json_logic, dataset_name_fil
You can also manipulate runs via API:

1. Mark run significant:
```python
runregistry.make_significant_runs(run=362761)
```
```python
runregistry.make_significant_runs(run=362761)
```
2. Reset RR attributes and reload data from OMS:
```python
runregistry.reset_RR_attributes_and_refresh_runs(run=362761)
```
```python
runregistry.reset_RR_attributes_and_refresh_runs(run=362761)
```
3. Move runs from one state to another:
```python
runregistry.move_runs("OPEN", "SIGNOFF", run=362761)
```
```python
runregistry.move_runs("OPEN", "SIGNOFF", run=362761)
```

## Troubleshooting

Expand All @@ -414,6 +416,7 @@ python3 -m pip install --upgrade pip build twine
python3 -m build
python3 -m twine upload --skip-existing --repository pypi dist/*
```

Instructions from [here](https://packaging.python.org/en/latest/tutorials/packaging-projects/).

## Running the tests
Expand Down Expand Up @@ -451,12 +454,12 @@ No.

### Should I be using `runregistry_api_client` for getting OMS data?

No*.
No\*.

Our recommendation is to query Run Registry only for data that RR is responsible for.

<small>*It's not that you can't, it's just that this puts extra burden on the application, making it slow for everyone.</small>
<small>\*It's not that you can't, it's just that this puts extra burden on the application, making it slow for everyone.</small>

### Is the token stored somewhere and reused?

No, almost every function call gets a new token. This is not ideal, and it may be improved in the future.
No, almost every function call gets a new token. This is not ideal, and it may be improved in the future.
116 changes: 62 additions & 54 deletions runregistry/runregistry.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import time
import json
import requests
import warnings
from dotenv import load_dotenv
from cernrequests import get_api_token, get_with_token
from runregistry.utils import (
Expand Down Expand Up @@ -66,6 +65,10 @@
api_url = "https://cmsrunregistry.web.cern.ch/api"
use_cookies = True
target_application = "webframeworks-paas-cmsrunregistry"
else:
raise Exception(
f'Invalid setup target "{target}". Valid options: "local", "development", "production".'
)


def _get_headers(token: str = ""):
Expand Down Expand Up @@ -345,25 +348,6 @@
return _get_lumisection_helper(url, run_number, dataset_name, **kwargs)


# DO NOT USE Using compiler (not-safe):
def generate_json(json_logic, **kwargs):
"""
DO NOT USE, USE THE ONE BELOW (create_json)...
It receives a json logic configuration and returns a json with lumisections which pass the filter
"""
warnings.warn(
"The generate_json is unsafe and will be deprecated. Please use create_json instead",
PendingDeprecationWarning,
)
if not isinstance(json_logic, str):
json_logic = json.dumps(json_logic)
url = "{}/json_creation/generate".format(api_url)
headers = _get_headers(token=_get_token())
payload = json.dumps({"json_logic": json_logic})
response = requests.post(url, headers=headers, data=payload).json()
return response["final_json"]


# Using json portal (safe):
def create_json(json_logic, dataset_name_filter, **kwargs):
"""
Expand Down Expand Up @@ -406,6 +390,15 @@
)


def get_datasets_accepted():
"""
Method for fetching current datasets accepted in Offline Run Registry
"""
url = "{}/datasets_accepted".format(api_url)
headers = _get_headers(token=_get_token())
return requests.get(url, headers=headers).json()


# advanced RR operations ==============================================================================
# Online Table
def move_runs(from_, to_, run=None, runs=[], **kwargs):
Expand All @@ -426,14 +419,17 @@
headers = _get_headers(token=_get_token())

if run:
payload = json.dumps({"run_number": run})
return requests.post(url, headers=headers, data=payload)
runs = [run]

answers = []
for run_number in runs:
payload = json.dumps({"run_number": run_number})
answer = requests.post(url, headers=headers, data=payload)
answers.append(answer)
if answer.status_code != 200:
raise Exception(
f"Got response {answer.status_code} when moving datasets: {answer.text}"
)
answers.append(answer.json())

Check warning on line 432 in runregistry/runregistry.py

View check run for this annotation

Codecov / codecov/patch

runregistry/runregistry.py#L432

Added line #L432 was not covered by tests

return answers

Expand All @@ -448,18 +444,20 @@
)

url = "%s/runs/mark_significant" % (api_url)

headers = _get_headers(token=_get_token())

if run:
data = {"run_number": run}
return requests.post(url, headers=headers, json=data)
runs = [run]

answers = []
for run_number in runs:
data = {"run_number": run}
answer = requests.post(url, headers=headers, json=data)
answers.append(answer)
if answer.status_code != 200:
raise Exception(
f"Got response {answer.status_code} when making runs significant: {answer.text}"
)
answers.append(answer.json())

Check warning on line 460 in runregistry/runregistry.py

View check run for this annotation

Codecov / codecov/patch

runregistry/runregistry.py#L460

Added line #L460 was not covered by tests

return answers

Expand All @@ -478,7 +476,11 @@
for run_number in runs:
url = "%s/runs/reset_and_refresh_run/%d" % (api_url, run_number)
answer = requests.post(url, headers=headers)
answers.append(answer)
if answer.status_code != 200:
raise Exception(
f"Got response {answer.status_code} when resetting and refreshing runs: {answer.text}"
)
answers.append(answer.json())

return answers

Expand All @@ -500,7 +502,11 @@
for run_number in runs:
url = "%s/runs/refresh_run/%d" % (api_url, run_number)
answer = requests.post(url, headers=headers)
answers.append(answer)
if answer.status_code != 200:
raise Exception(
f"Got response {answer.status_code} when manually refreshing component statuses: {answer.text}"
)
answers.append(answer.json())

return answers

Expand All @@ -520,7 +526,7 @@
WIP edit RR lumisections attributes
"""
if status not in LUMISECTION_STATES:
raise Exception(
raise ValueError(
f"edit_rr_lumisections(): got status '{status}'",
f" but allowed statuses are {LUMISECTION_STATES}",
)
Expand All @@ -542,7 +548,12 @@
"component": component,
}
)
return requests.put(url, headers=headers, data=payload)
answer = requests.put(url, headers=headers, data=payload)
if answer.status_code != 200:
raise Exception(
f"Got response {answer.status_code} when editing rr lumisections: {answer.text}"
)
return answer.json()

Check warning on line 556 in runregistry/runregistry.py

View check run for this annotation

Codecov / codecov/patch

runregistry/runregistry.py#L556

Added line #L556 was not covered by tests


def move_datasets(
Expand All @@ -566,10 +577,7 @@
headers = _get_headers(token=_get_token())

if run:
payload = json.dumps(
{"run_number": run, "dataset_name": dataset_name, "workspace": workspace}
)
return [requests.post(url, headers=headers, data=payload)]
runs = [run]

answers = []
for run_number in runs:
Expand All @@ -580,8 +588,12 @@
"workspace": workspace,
}
)
answer = requests.post(url, headers=headers, data=payload).json()
answers.append(answer)
answer = requests.post(url, headers=headers, data=payload)
if answer.status_code != 200:
raise Exception(
f"Got response {answer.status_code} when moving datasets: {answer.text}"
)
answers.append(answer.json())

Check warning on line 596 in runregistry/runregistry.py

View check run for this annotation

Codecov / codecov/patch

runregistry/runregistry.py#L596

Added line #L596 was not covered by tests

return answers

Expand All @@ -603,30 +615,26 @@
)

if not isinstance(new_class, str):
raise Exception('Invalid input for "new_class"')
raise ValueError(f'Invalid input "{new_class}" for "new_class"')
answers = []
# If just one int provided, make it into a list
if isinstance(run_numbers, int):
run_numbers = [run_numbers]

if isinstance(run_numbers, list):
for run_number in run_numbers:
if not isinstance(run_number, int):
raise Exception(
raise ValueError(
"Invalid run number value found in run_numbers. Please provide a list of numbers."
)
answers.append(_execute_request_for_single_run(run_number, new_class))
elif isinstance(run_numbers, int):
answers.append(_execute_request_for_single_run(run_numbers, new_class))
answer = _execute_request_for_single_run(run_number, new_class)
if answer.status_code != 200:
raise Exception(
f"Got response {answer.status_code} when changing run class: {answer.text}"
)
answers.append(answer.json())

Check warning on line 635 in runregistry/runregistry.py

View check run for this annotation

Codecov / codecov/patch

runregistry/runregistry.py#L635

Added line #L635 was not covered by tests
else:
raise Exception(
raise ValueError(
'Invalid input for "run_numbers". Please provide a list of numbers.'
)
return answers


def get_datasets_accepted():
"""
Method for fetching current datasets accepted in Offline Run Registry
"""
url = "{}/datasets_accepted".format(api_url)
headers = _get_headers(token=_get_token())
if os.getenv("ENVIRONMENT") in ["development", "local"]:
print(url)
return requests.get(url, headers=headers).json()
Loading