Skip to content

Commit

Permalink
Add support for Python 3.13
Browse files Browse the repository at this point in the history
* Upgrade pre-commit dependencies
  • Loading branch information
replaceafill authored Jan 13, 2025
1 parent 0ebb512 commit 255cca0
Show file tree
Hide file tree
Showing 15 changed files with 121 additions and 123 deletions.
10 changes: 6 additions & 4 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ on:
jobs:
test-virtualenv:
name: "Test Python ${{ matrix.python-version }} (virtualenv)"
runs-on: "ubuntu-22.04"
runs-on: "ubuntu-24.04"
strategy:
fail-fast: false
matrix:
Expand All @@ -19,6 +19,7 @@ jobs:
"3.10",
"3.11",
"3.12",
"3.13",
]
steps:
- name: "Check out repository"
Expand All @@ -40,7 +41,7 @@ jobs:
./simplebrowsertest.py
test-docker:
name: "Test Python ${{ matrix.python-version }} (docker)"
runs-on: "ubuntu-22.04"
runs-on: "ubuntu-24.04"
strategy:
fail-fast: false
matrix:
Expand All @@ -49,6 +50,7 @@ jobs:
"3.10",
"3.11",
"3.12",
"3.13",
]
steps:
- name: "Check out repository"
Expand All @@ -75,14 +77,14 @@ jobs:
/home/artefactual/acceptance-tests/simplebrowsertest.py
lint:
name: "Lint"
runs-on: "ubuntu-22.04"
runs-on: "ubuntu-24.04"
steps:
- name: "Check out repository"
uses: "actions/checkout@v4"
- name: "Set up Python"
uses: "actions/setup-python@v5"
with:
python-version: "3.9"
python-version: "3.x"
cache: "pip"
cache-dependency-path: |
requirements.txt
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ repos:
hooks:
- id: end-of-file-fixer
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.5
rev: v0.9.1
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
Expand Down
6 changes: 2 additions & 4 deletions amuser/am_browser_preservation_planning_ability.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def save_policy_check_command(self, policy_command, description):
``policy_command``.
"""
logger.info(
"Creating an FPR policy check command with description" ' "%s".',
'Creating an FPR policy check command with description "%s".',
description,
)
self.navigate(self.get_create_command_url())
Expand All @@ -157,9 +157,7 @@ def save_policy_check_command(self, policy_command, description):
option.click()
break
self.driver.find_element(By.ID, "id_description").send_keys(description)
js_script = (
'document.getElementById("id_command").value =' f" `{policy_command}`;"
)
js_script = f'document.getElementById("id_command").value = `{policy_command}`;'
self.driver.execute_script(js_script)
self.driver.find_element(By.ID, "id_script_type").send_keys("Python")
self.driver.find_element(By.ID, "id_command_usage").send_keys("Validation")
Expand Down
25 changes: 11 additions & 14 deletions amuser/am_mets_ability.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ def validate_mets_for_pids(mets_doc, accession_no=None):
if entity["name"] == "objects":
continue
# All entities have an id, i.e., DMDID or ADMID
assert entity.get(
"id"
), "Unable to find a DMDID/ADMID for entity {}".format(entity["path"])
assert entity.get("id"), (
"Unable to find a DMDID/ADMID for entity {}".format(entity["path"])
)
purls = []
# All entities should have the following types of identifier
for idfr_type in ("UUID", "hdl", "URI"):
Expand All @@ -70,21 +70,21 @@ def validate_mets_for_pids(mets_doc, accession_no=None):
except IndexError:
idfr = None
assert idfr, (
"Unable to find an identifier of type {} for entity" " {}".format(
"Unable to find an identifier of type {} for entity {}".format(
idfr_type, entity["path"]
)
)
if idfr_type == "UUID":
assert utils.is_uuid(idfr), f"Identifier {idfr} is not a UUID"
elif idfr_type == "hdl":
assert utils.is_hdl(
idfr, entity["type"], accession_no
), f"Identifier {idfr} is not a hdl"
assert utils.is_hdl(idfr, entity["type"], accession_no), (
f"Identifier {idfr} is not a hdl"
)
else:
purls.append(idfr)
assert utils.all_urls_resolve(
purls
), "At least one PURL does not resolve in\n {}".format("\n ".join(purls))
assert utils.all_urls_resolve(purls), (
"At least one PURL does not resolve in\n {}".format("\n ".join(purls))
)

@staticmethod
def assert_empty_dir_documented_identified(mets_doc, empty_dir_rel_path):
Expand Down Expand Up @@ -146,10 +146,7 @@ def _add_entity_identifiers(entity, doc, ns):
elif e_type == "file":
amd_sec_el = doc.xpath(f"mets:amdSec[@ID='{e_id}']", namespaces=ns)[0]
obj_idfr_els = amd_sec_el.findall(
".//mets:mdWrap/"
"mets:xmlData/"
"premis:object/"
"premis:objectIdentifier",
".//mets:mdWrap/mets:xmlData/premis:object/premis:objectIdentifier",
ns,
)
for obj_idfr_el in obj_idfr_els:
Expand Down
6 changes: 3 additions & 3 deletions amuser/amuser.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,9 @@ def decompress_aip(self, aip_path, cwd=None):
p = subprocess.Popen(cmd, cwd=cwd, stdout=devnull, stderr=subprocess.STDOUT)
p.wait()
assert p.returncode == 0
assert os.path.isdir(
aip_dir_path
), f"Failed to create dir {aip_dir_path} from compressed AIP at {aip_path}"
assert os.path.isdir(aip_dir_path), (
f"Failed to create dir {aip_dir_path} from compressed AIP at {aip_path}"
)
return aip_dir_path


Expand Down
6 changes: 4 additions & 2 deletions amuser/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,8 +189,10 @@ def varvn(varname, vn):
"Reminder: add metadata if desired": ("Add final metadata",),
"Remove cache files": ("Remove cache files",),
"Remove empty manual normalization directories": ("Process metadata directory",),
"Remove files without linking information (failed normalization"
" artifacts etc.)": ("Process submission documentation", "Normalize"),
"Remove files without linking information (failed normalization artifacts etc.)": (
"Process submission documentation",
"Normalize",
),
"Remove hidden files and directories": ("Verify transfer compliance",),
"Remove the processing directory": ("Store AIP",),
"Remove unneeded files": ("Verify transfer compliance",),
Expand Down
36 changes: 17 additions & 19 deletions features/steps/aip_encryption_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,7 @@ def step_impl(context):


@given(
"there is a standard GPG-encrypted Transfer Backlog location in the"
" storage service"
"there is a standard GPG-encrypted Transfer Backlog location in the storage service"
)
def step_impl(context):
context.execute_steps(
Expand Down Expand Up @@ -240,9 +239,9 @@ def step_impl(context):
"""
the_aip_uuid = utils.get_uuid_val(context, "sip")
search_results = context.scenario.aip_search_results
assert (
len(search_results) == 2
), f"We expected 2 search results but there are {len(search_results)} in {str(search_results)}"
assert len(search_results) == 2, (
f"We expected 2 search results but there are {len(search_results)} in {str(search_results)}"
)
the_aips = [dct for dct in search_results if dct["uuid"] == the_aip_uuid]
not_the_aips = [dct for dct in search_results if dct["uuid"] != the_aip_uuid]
assert len(the_aips) == 1
Expand All @@ -253,14 +252,14 @@ def step_impl(context):
assert replica["replica_of"] == the_aip["uuid"]
expected_replica_actions = {"Pointer File", "Download", "Request Deletion"}
replica_actions = {x.strip() for x in replica["actions"].split("|")}
assert (
replica_actions == expected_replica_actions
), f"We expected the replica actions to be {expected_replica_actions} but in fact they were {replica_actions}"
assert replica_actions == expected_replica_actions, (
f"We expected the replica actions to be {expected_replica_actions} but in fact they were {replica_actions}"
)
expected_aip_actions = {"Pointer File", "Download", "Request Deletion", "Re-ingest"}
aip_actions = {x.strip() for x in the_aip["actions"].split("|")}
assert (
aip_actions == expected_aip_actions
), f"We expected the AIP actions to be {expected_aip_actions} but in fact they were {aip_actions}"
assert aip_actions == expected_aip_actions, (
f"We expected the AIP actions to be {expected_aip_actions} but in fact they were {aip_actions}"
)
context.scenario.master_aip_uuid = the_aip_uuid
context.scenario.replica_aip_uuid = replica_uuid

Expand Down Expand Up @@ -447,8 +446,7 @@ def step_impl(context, key_name):


@then(
"the user fails to import the GPG key {key_name} because it requires a"
" passphrase"
"the user fails to import the GPG key {key_name} because it requires a passphrase"
)
def step_impl(context, key_name):
assert context.scenario.import_gpg_key_result == (
Expand Down Expand Up @@ -536,9 +534,9 @@ def step_impl(context):

@then("the user is prevented from deleting the key because {reason}")
def step_impl(context, reason):
assert (
context.scenario.delete_gpg_key_success is False
), f"GPG deletion success is something other than False: {context.scenario.delete_gpg_key_success}"
assert context.scenario.delete_gpg_key_success is False, (
f"GPG deletion success is something other than False: {context.scenario.delete_gpg_key_success}"
)
if reason == "it is attached to a space":
assert context.scenario.delete_gpg_key_msg.startswith("GPG key")
assert context.scenario.delete_gpg_key_msg.endswith(
Expand All @@ -553,9 +551,9 @@ def step_impl(context, reason):

@then("the user succeeds in deleting the GPG key")
def step_impl(context):
assert (
context.scenario.delete_gpg_key_success is True
), context.scenario.delete_gpg_key_msg
assert context.scenario.delete_gpg_key_success is True, (
context.scenario.delete_gpg_key_msg
)
assert context.scenario.delete_gpg_key_msg.endswith("successfully deleted.")


Expand Down
64 changes: 32 additions & 32 deletions features/steps/black_box_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -793,15 +793,15 @@ def step_impl(context):
)
def step_impl(context):
expected_dmdsec_status = ("original-superseded", "update")
assert context.current_transfer[
"metadata_csv_files"
], "Could not extract the rows of the original metadata.csv in {}".format(
context.current_transfer["extracted_aip_dir"]
assert context.current_transfer["metadata_csv_files"], (
"Could not extract the rows of the original metadata.csv in {}".format(
context.current_transfer["extracted_aip_dir"]
)
)
assert context.current_transfer[
"reingest_metadata_csv_files"
], "Could not extract the rows of the reingested metadata.csv in {}".format(
context.current_transfer["reingest_extracted_aip_dir"]
assert context.current_transfer["reingest_metadata_csv_files"], (
"Could not extract the rows of the reingested metadata.csv in {}".format(
context.current_transfer["reingest_extracted_aip_dir"]
)
)
reingest_mets = metsrw.METSDocument.fromfile(
context.current_transfer["reingest_aip_mets_location"]
Expand Down Expand Up @@ -930,15 +930,15 @@ def step_impl(context, dmdsec_status):
def step_impl(context):
expected_dmdsec_status = ("original-superseded", "update")
expected_mdwrap_mdtype = ("OTHER",)
assert context.current_transfer[
"source_metadata_files"
], "Could not extract the rows of the original source-metadata.csv in {}".format(
context.current_transfer["extracted_aip_dir"]
assert context.current_transfer["source_metadata_files"], (
"Could not extract the rows of the original source-metadata.csv in {}".format(
context.current_transfer["extracted_aip_dir"]
)
)
assert context.current_transfer[
"reingest_source_metadata_files"
], "Could not extract the rows of the reingested source-metadata.csv in {}".format(
context.current_transfer["reingest_extracted_aip_dir"]
assert context.current_transfer["reingest_source_metadata_files"], (
"Could not extract the rows of the reingested source-metadata.csv in {}".format(
context.current_transfer["reingest_extracted_aip_dir"]
)
)
reingest_mets = metsrw.METSDocument.fromfile(
context.current_transfer["reingest_aip_mets_location"]
Expand Down Expand Up @@ -1023,15 +1023,15 @@ def step_impl(context):
def step_impl(context, dmdsec_status):
expected_dmdsec_status = (dmdsec_status,)
expected_mdwrap_mdtype = ("OTHER",)
assert context.current_transfer[
"source_metadata_files"
], "Could not extract the rows of the original source-metadata.csv in {}".format(
context.current_transfer["extracted_aip_dir"]
assert context.current_transfer["source_metadata_files"], (
"Could not extract the rows of the original source-metadata.csv in {}".format(
context.current_transfer["extracted_aip_dir"]
)
)
assert context.current_transfer[
"reingest_source_metadata_files"
], "Could not extract the rows of the reingested source-metadata.csv in {}".format(
context.current_transfer["reingest_extracted_aip_dir"]
assert context.current_transfer["reingest_source_metadata_files"], (
"Could not extract the rows of the reingested source-metadata.csv in {}".format(
context.current_transfer["reingest_extracted_aip_dir"]
)
)
reingest_mets = metsrw.METSDocument.fromfile(
context.current_transfer["reingest_aip_mets_location"]
Expand Down Expand Up @@ -1105,18 +1105,18 @@ def step_impl(context, dmdsec_status):
def step_impl(context, dmdsec_status):
expected_dmdsec_status = (dmdsec_status,)
expected_mdwrap_mdtype = ("OTHER",)
assert context.current_transfer[
"source_metadata_files"
], "Could not extract the rows of the original source-metadata.csv in {}".format(
context.current_transfer["extracted_aip_dir"]
assert context.current_transfer["source_metadata_files"], (
"Could not extract the rows of the original source-metadata.csv in {}".format(
context.current_transfer["extracted_aip_dir"]
)
)
original_source_metadata_by_type_id = {
row["type_id"]: row for row in context.current_transfer["source_metadata_files"]
}
assert context.current_transfer[
"reingest_source_metadata_files"
], "Could not extract the rows of the reingested source-metadata.csv in {}".format(
context.current_transfer["reingest_extracted_aip_dir"]
assert context.current_transfer["reingest_source_metadata_files"], (
"Could not extract the rows of the reingested source-metadata.csv in {}".format(
context.current_transfer["reingest_extracted_aip_dir"]
)
)
reingest_mets = metsrw.METSDocument.fromfile(
context.current_transfer["reingest_aip_mets_location"]
Expand Down
8 changes: 4 additions & 4 deletions features/steps/indexless_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def step_impl(context):
assert "Send to backlog" not in context.scenario.decision_options


@then('the "Index AIP" micro-service output indicates that no indexing has' " occurred")
@then('the "Index AIP" micro-service output indicates that no indexing has occurred')
def step_impl(context):
unit_type = utils.get_normalized_unit_type("ingest")
uuid_val = utils.get_uuid_val(context, unit_type)
Expand Down Expand Up @@ -285,9 +285,9 @@ def step_impl(context, tab_name):
displayed_tabs = [t.lower() for t in context.am_user.browser.get_displayed_tabs()]
tab_name = tab_name.replace(" tab", "").strip().lower()
assert displayed_tabs
assert (
"transfer" in displayed_tabs
), f'"transfer" is not in {str(displayed_tabs)}' # sanity check
assert "transfer" in displayed_tabs, (
f'"transfer" is not in {str(displayed_tabs)}'
) # sanity check
assert tab_name not in displayed_tabs


Expand Down
4 changes: 2 additions & 2 deletions features/steps/mediaconch_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ def step_impl(context, contains, policy_file):
if contains in ("contains", "does contain"):
assert os.path.isfile(original_policy_path)
assert os.path.isfile(aip_policy_path), (
"There is no MediaConch policy file in the AIP at" f" {aip_policy_path}!"
f"There is no MediaConch policy file in the AIP at {aip_policy_path}!"
)
with open(original_policy_path) as filei:
original_policy = filei.read().strip()
Expand Down Expand Up @@ -325,7 +325,7 @@ def step_impl(context, contains, policy_file):
if contains in ("contains", "does contain"):
assert os.path.isfile(original_policy_path)
assert os.path.isfile(aip_policy_path), (
"There is no MediaConch policy file in the AIP at" f" {aip_policy_path}!"
f"There is no MediaConch policy file in the AIP at {aip_policy_path}!"
)
with open(original_policy_path) as filei:
original_policy = filei.read().strip()
Expand Down
6 changes: 3 additions & 3 deletions features/steps/mets_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,9 @@ def step_impl(context, quant):
mets = utils.get_mets_from_scenario(context, api=True)
mets_dmd_sec_els = mets.findall(".//mets:dmdSec", context.am_user.mets.mets_nsmap)
try:
assert len(mets_dmd_sec_els) == int(
quant
), f"Expected {int(quant)} dmdSec element(s), got {len(mets_dmd_sec_els)}"
assert len(mets_dmd_sec_els) == int(quant), (
f"Expected {int(quant)} dmdSec element(s), got {len(mets_dmd_sec_els)}"
)
except ValueError:
raise utils.ArchivematicaStepsError(
f"Unable to recognize the quantifier {quant} when checking for dmdSec"
Expand Down
Loading

0 comments on commit 255cca0

Please sign in to comment.