diff --git a/CHANGELOG.md b/CHANGELOG.md index b1dfb27..faa116c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # CHANGELOG +### version 3.1.0 +- Update software versions: + - `dms_variants` to 1.6.0 + - `neutcurve` to 2.1.0 + - `altair` to 5.3 + - `python` to 3.12 + ## version 3.0.0 - In `curvefit_params` in the YAML configuration, now `fixslope` should be specified in addition `fixtop` and `fixbottom`. In addition, all three of these can be set to constraint ranges rather than just totally free or to fixed values. Alongside this change, the slope of curve fits are now reported in key output files. Addresses [this issue](https://github.com/jbloomlab/neutcurve/issues/53) and [this issue](https://github.com/jbloomlab/seqneut-pipeline/issues/32). - This is a **backward-incompatible change** in the configuration YAML, now you must specify `fixslope` under `curvefit_params`. diff --git a/environment.yml b/environment.yml index 022262c..810c494 100644 --- a/environment.yml +++ b/environment.yml @@ -15,11 +15,11 @@ dependencies: - papermill=2.5 - pip - pyarrow - - python=3.11 + - python=3.12 - ruamel.yaml=0.18.6 - snakefmt - - snakemake=8.10.0 + - snakemake=8.10 - ruff - pip: - - dms_variants==1.5.0 - - neutcurve==2.0.1 + - dms_variants==1.6.0 + - neutcurve==2.1.0 diff --git a/funcs.smk b/funcs.smk index e06732a..cfc1211 100644 --- a/funcs.smk +++ b/funcs.smk @@ -16,7 +16,7 @@ def process_miscellaneous_plates(misc_plates_d): for plate, plate_dict in misc_plates_d.items(): misc_plates[plate] = {} if not req_keys.issubset(plate_dict): - raise ValueError(f"miscellaneous_plate {plate} lacks {req_keys=}") + raise ValueError(f"miscellaneous_plate {plate} lacks {req_keys =}") misc_plates[plate]["viral_library"] = plate_dict["viral_library"] misc_plates[plate]["neut_standard_set"] = plate_dict["neut_standard_set"] samples = pd.read_csv(plate_dict["samples_csv"]) @@ -48,29 +48,29 @@ def process_plate(plate, plate_params): "curvefit_qc", } if not req_plate_params.issubset(plate_params): - raise ValueError(f"{plate=} {plate_params=} lacks {req_plate_params=}") + raise ValueError(f"{plate =} {plate_params =} lacks {req_plate_params =}") if plate_params["viral_library"] not in viral_libraries: raise ValueError( - f"{plate=} {plate_params['viral_library']=} not in {viral_libraries=}" + f"{plate =} {plate_params['viral_library'] =} not in {viral_libraries =}" ) if plate_params["neut_standard_set"] not in neut_standard_sets: raise ValueError( - f"{plate=} {plate_params['neut_standard_set']=} not in {neut_standard_sets=}" + f"{plate =} {plate_params['neut_standard_set'] =} not in {neut_standard_sets =}" ) plate_d = copy.deepcopy(plate_params) plate_d["group"] = str(plate_d["group"]) plate_d["date"] = str(plate_d["date"]) if not re.fullmatch("\d{4}\-\d{2}\-\d{2}", str(plate_d["date"])): - raise ValueError(f"{plate=} {plate_d['date']=} not in YYYY-MM-DD format") + raise ValueError(f"{plate =} {plate_d['date'] =} not in YYYY-MM-DD format") # Process samples_csv to create the sample data frame req_sample_cols = ["well", "serum", "dilution_factor", "replicate", "fastq"] samples_df = pd.read_csv(plate_params["samples_csv"], comment="#") if not set(req_sample_cols).issubset(samples_df.columns): - raise ValueError(f"{plate=} {samples_df.columns=} lacks {req_sample_cols=}") + raise ValueError(f"{plate =} {samples_df.columns =} lacks {req_sample_cols =}") if samples_df["serum"].isnull().any(): - raise ValueError(f"{plate=} 'samples_csv' has null values in 'serum' column") + raise ValueError(f"{plate =} 'samples_csv' has null values in 'serum' column") # try to turn columns of ints and NAs into Int64 to avoid ints appearing as flaots for col in ["replicate", "dilution_factor"]: @@ -113,7 +113,7 @@ def process_plate(plate, plate_params): plate_replicate=lambda x: x.apply( lambda row: ( plate - + ("" if row["one_serum_replicate"] else f"{-row['replicate']}") + + ("" if row["one_serum_replicate"] else f"{- row['replicate']}") ), axis=1, ), @@ -136,17 +136,17 @@ def process_plate(plate, plate_params): .drop(columns="duplicates") ) if len(dup_rows): - raise ValueError(f"{plate=} has duplicated serum / replicates:\n{dup_rows}") + raise ValueError(f"{plate =} has duplicated serum / replicates:\n{dup_rows}") # make sure dilution_factor is valid if not ( (samples_df["dilution_factor"] >= 1) | (samples_df["serum"] == "none") ).all(): - raise ValueError(f"{plate=} has dilution factors not >= 1 for non-none serum") + raise ValueError(f"{plate =} has dilution factors not >= 1 for non-none serum") # make sure there is at least one "none" sample if "none" not in set(samples_df["serum"]): - raise ValueError(f"{plate=} has no samples with serum set to 'none'") + raise ValueError(f"{plate =} has no samples with serum set to 'none'") # make sure fastqs are unique dup_fastqs = ( @@ -157,7 +157,7 @@ def process_plate(plate, plate_params): .drop(columns="duplicates") ) if len(dup_fastqs): - raise ValueError(f"{plate=} has duplicate FASTQs:\n{dup_fastqs}") + raise ValueError(f"{plate =} has duplicate FASTQs:\n{dup_fastqs}") plate_d["samples"] = samples_df diff --git a/seqneut-pipeline.smk b/seqneut-pipeline.smk index 9f12dc1..bcccf14 100644 --- a/seqneut-pipeline.smk +++ b/seqneut-pipeline.smk @@ -39,7 +39,7 @@ plates = { groups = sorted(set(plate_params["group"] for plate_params in plates.values())) groups_cannot_contain = ["|", "_"] # wildcard problems if group contains these if any(s in group for s in groups_cannot_contain for group in groups): - raise ValueError(f"found {groups_cannot_contain=} character in {groups=}") + raise ValueError(f"found {groups_cannot_contain =} character in {groups =}") wildcard_constraints: @@ -47,7 +47,7 @@ wildcard_constraints: if not set(config["sera_override_defaults"]).issubset(groups): - raise ValueError(f"{config['sera_override_defaults']=} keyed by invalid groups") + raise ValueError(f"{config['sera_override_defaults'] =} keyed by invalid groups") samples = pd.concat(