Skip to content

Commit 402a617

Browse files
Merge pull request #707 from Labelbox/develop
3.27.1
2 parents 0f19dbd + 0f59fd7 commit 402a617

File tree

9 files changed

+79
-70
lines changed

9 files changed

+79
-70
lines changed

CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
# Changelog
22

3+
# Version 3.27.1 (2022-09-16)
4+
### Changed
5+
* Removed `client.get_data_rows_for_global_keys` until further notice
6+
37
# Version 3.27.0 (2022-09-12)
48
### Added
59
* Global Keys for data rows

CONTRIB.md

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,14 @@ following packages/modules:
2525
* Approved PRs are merged to the `develop` branch.
2626
* The `develop` branch is merged to `master` on each release.
2727

28+
## Commits
29+
30+
Before making a commit, to automatically adhere to our formatting standards,
31+
install and activate [pre-commit](https://pre-commit.com/)
32+
33+
After the above, running `git commit ...` will attempt to fix formatting. If
34+
there was formatted needed, you will need to re-add and re-commit before pushing.
35+
2836
## Testing
2937

3038
Currently, the SDK functionality is tested using integration tests. These tests
@@ -36,7 +44,8 @@ To execute tests you will need to provide an API key for the server you're using
3644
for testing (staging by default) in the `LABELBOX_TEST_API_KEY` environment
3745
variable. For more info see [Labelbox API key docs](https://labelbox.helpdocs.io/docs/api/getting-started).
3846

39-
To pass tests, code must be formatted using the following command:
47+
To pass tests, code must be formatted. If pre-commit was not installed,
48+
you will need to use the following command:
4049

4150
```shell
4251
yapf tests labelbox -i --verbose --recursive --parallel --style "google"
@@ -54,4 +63,4 @@ Each release should follow the following steps:
5463
6. This will kick off a Github Actions workflow that will:
5564
- Build the library in the [standard way](https://packaging.python.org/tutorials/packaging-projects/#generating-distribution-archives)
5665
- Upload the distribution archives in the [standard way](https://packaging.python.org/tutorials/packaging-projects/#uploading-the-distribution-archives)
57-
with credentials for the `labelbox` PyPI user.
66+
with credentials for the `labelbox` PyPI user.

docs/source/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
copyright = '2021, Labelbox'
2222
author = 'Labelbox'
2323

24-
release = '3.27.0'
24+
release = '3.27.1'
2525

2626
# -- General configuration ---------------------------------------------------
2727

examples/basics/basics.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343
"The quick version is basically just\n",
4444
"1. `!pip install labelbox`\n",
4545
"2. `export LABELBOX_API_KEY=\"<your_api_key>\"`\n",
46-
"* Get this from the UI under (Account -> API -> Create API Key)\n",
46+
"* Get this from the UI under (Workspace settings -> API -> Create API Key)\n",
4747
"* You can also set the api_key below in the notebook.\n",
4848
"\n",
4949
"This only works for cloud deployments.\n",

examples/basics/batches.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@
239239
},
240240
"outputs": [],
241241
"source": [
242-
"sample = random.sample(data_rows, 5)"
242+
"sample = random.sample(data_row_ids, 5)"
243243
]
244244
},
245245
{

labelbox/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
name = "labelbox"
2-
__version__ = "3.27.0"
2+
__version__ = "3.27.1"
33

44
from labelbox.client import Client
55
from labelbox.schema.project import Project

labelbox/client.py

Lines changed: 0 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1192,45 +1192,3 @@ def _format_failed_rows(rows: List[str],
11921192
"Timed out waiting for get_data_rows_for_global_keys job to complete."
11931193
)
11941194
time.sleep(sleep_time)
1195-
1196-
def get_data_rows_for_global_keys(
1197-
self,
1198-
global_keys: List[str],
1199-
timeout_seconds=60) -> Dict[str, Union[str, List[Any]]]:
1200-
"""
1201-
Gets data rows for a list of global keys.
1202-
1203-
Args:
1204-
A list of global keys
1205-
Returns:
1206-
Dictionary containing 'status', 'results' and 'errors'.
1207-
1208-
'Status' contains the outcome of this job. It can be one of
1209-
'Success', 'Partial Success', or 'Failure'.
1210-
1211-
'Results' contains a list of `DataRow` instances successfully fetchced. It may
1212-
not necessarily contain all data rows requested.
1213-
1214-
'Errors' contains a list of global_keys that could not be fetched, along
1215-
with the failure reason
1216-
Examples:
1217-
>>> job_result = client.get_data_rows_for_global_keys(["key1","key2"])
1218-
>>> print(job_result['status'])
1219-
Partial Success
1220-
>>> print(job_result['results'])
1221-
[<DataRow ID: cl7tvvybc00icka6ggipyh8tj>, <DataRow ID: cl7tvvyfp00igka6gblrw2idc>]
1222-
>>> print(job_result['errors'])
1223-
[{'global_key': 'asdf', 'error': 'Data Row not found'}]
1224-
"""
1225-
job_result = self.get_data_row_ids_for_global_keys(
1226-
global_keys, timeout_seconds)
1227-
1228-
# Query for data row by data_row_id to ensure we get all fields populated in DataRow instances
1229-
data_rows = []
1230-
for data_row_id in job_result['results']:
1231-
# TODO: Need to optimize this to run over a collection of data_row_ids
1232-
data_rows.append(self.get_data_row(data_row_id))
1233-
1234-
job_result['results'] = data_rows
1235-
1236-
return job_result

labelbox/schema/model_run.py

Lines changed: 56 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -132,49 +132,83 @@ def upsert_predictions_and_send_to_project(
132132
project_id: str,
133133
priority: Optional[int] = 5,
134134
) -> 'MEAPredictionImport': # type: ignore
135-
""" Upload predictions and create a batch import to project.
135+
"""
136+
Provides a convenient way to execute the following steps in a single function call:
137+
1. Upload predictions to a Model
138+
2. Create a batch from data rows that had predictions assocated with them
139+
3. Attach the batch to a project
140+
4. Add those same predictions to the project as MAL annotations
141+
142+
Note that partial successes are possible.
143+
If it is important that all stages are successful then check the status of each individual task
144+
with task.errors. E.g.
145+
146+
>>> mea_import_job, batch, mal_import_job = upsert_predictions_and_send_to_project(name, predictions, project_id)
147+
>>> # handle mea import job successfully created (check for job failure or partial failures)
148+
>>> print(mea_import_job.status, mea_import_job.errors)
149+
>>> if batch is None:
150+
>>> # Handle batch creation failure
151+
>>> if mal_import_job is None:
152+
>>> # Handle mal_import_job creation failure
153+
>>> else:
154+
>>> # handle mal import job successfully created (check for job failure or partial failures)
155+
>>> print(mal_import_job.status, mal_import_job.errors)
156+
157+
136158
Args:
137159
name (str): name of the AnnotationImport job as well as the name of the batch import
138160
predictions (Iterable):
139161
iterable of annotation rows
140162
project_id (str): id of the project to import into
141163
priority (int): priority of the job
142164
Returns:
143-
(MEAPredictionImport, Batch, MEAToMALPredictionImport)
165+
Tuple[MEAPredictionImport, Batch, MEAToMALPredictionImport]
166+
If any of these steps fail the return value will be None.
167+
144168
"""
145169
kwargs = dict(client=self.client, model_run_id=self.uid, name=name)
146170
project = self.client.get_project(project_id)
147171
import_job = self.add_predictions(name, predictions)
148172
prediction_statuses = import_job.statuses
149-
mea_to_mal_data_rows_set = set([
150-
row['dataRow']['id']
151-
for row in prediction_statuses
152-
if row['status'] == 'SUCCESS'
153-
])
154173
mea_to_mal_data_rows = list(
155-
mea_to_mal_data_rows_set)[:DATAROWS_IMPORT_LIMIT]
156-
157-
if len(mea_to_mal_data_rows) >= DATAROWS_IMPORT_LIMIT:
174+
set([
175+
row['dataRow']['id']
176+
for row in prediction_statuses
177+
if row['status'] == 'SUCCESS'
178+
]))
179+
180+
if not mea_to_mal_data_rows:
181+
# 0 successful model predictions imported
182+
return import_job, None, None
158183

184+
elif len(mea_to_mal_data_rows) >= DATAROWS_IMPORT_LIMIT:
185+
mea_to_mal_data_rows = mea_to_mal_data_rows[:DATAROWS_IMPORT_LIMIT]
159186
logger.warning(
160-
f"Got {len(mea_to_mal_data_rows_set)} data rows to import, trimmed down to {DATAROWS_IMPORT_LIMIT} data rows"
187+
f"Exeeded max data row limit {len(mea_to_mal_data_rows)}, trimmed down to {DATAROWS_IMPORT_LIMIT} data rows."
161188
)
162-
if len(mea_to_mal_data_rows) == 0:
163-
return import_job, None, None
164189

165190
try:
166191
batch = project.create_batch(name, mea_to_mal_data_rows, priority)
167-
try:
168-
mal_prediction_import = Entity.MEAToMALPredictionImport.create_for_model_run_data_rows(
169-
data_row_ids=mea_to_mal_data_rows,
170-
project_id=project_id,
171-
**kwargs)
172-
return import_job, batch, mal_prediction_import
173-
except:
174-
return import_job, batch, None
175-
except:
192+
except Exception as e:
193+
logger.warning(f"Failed to create batch. Messsage : {e}.")
194+
# Unable to create batch
176195
return import_job, None, None
177196

197+
try:
198+
mal_prediction_import = Entity.MEAToMALPredictionImport.create_for_model_run_data_rows(
199+
data_row_ids=mea_to_mal_data_rows,
200+
project_id=project_id,
201+
**kwargs)
202+
mal_prediction_import.wait_until_done()
203+
except Exception as e:
204+
logger.warning(
205+
f"Failed to create MEA to MAL prediction import. Message : {e}."
206+
)
207+
# Unable to create mea to mal prediction import
208+
return import_job, batch, None
209+
210+
return import_job, batch, mal_prediction_import
211+
178212
def add_predictions(
179213
self,
180214
name: str,

tests/integration/test_global_keys.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,8 @@ def test_global_key_with_whitespaces_validation(client, dataset, image_url):
147147
['Invalid global key', 'Invalid global key', 'Invalid global key'])
148148

149149

150+
@pytest.mark.skip(reason='get_data_rows_for_global_keys not included in '
151+
'the initial release of global_keys')
150152
def test_get_data_rows_for_global_keys(client, dataset, image_url):
151153
gk_1 = str(uuid.uuid4())
152154
gk_2 = str(uuid.uuid4())
@@ -174,6 +176,8 @@ def test_get_data_rows_for_global_keys(client, dataset, image_url):
174176
assert res['results'] == [dr_1, dr_2]
175177

176178

179+
@pytest.mark.skip(reason='get_data_rows_for_global_keys not included in '
180+
'the initial release of global_keys')
177181
def test_get_data_rows_for_invalid_global_keys(client, dataset, image_url):
178182
gk_1 = str(uuid.uuid4())
179183
gk_2 = str(uuid.uuid4())

0 commit comments

Comments
 (0)