From b5f42c8b15fac3a56578df82744f7e2982607d4e Mon Sep 17 00:00:00 2001 From: JamesBagley <34780918+JamesBagley@users.noreply.github.com> Date: Thu, 11 Mar 2021 18:01:56 -0500 Subject: [PATCH] Added readme & more refactoring/renaming --- .spyproject/config/backups/codestyle.ini.bak | 8 + .spyproject/config/backups/encoding.ini.bak | 6 + .spyproject/config/backups/vcs.ini.bak | 7 + .spyproject/config/backups/workspace.ini.bak | 10 + Biomek/media_gradient_maker.py | 58 ----- Genomics/gene_search.py | 225 ------------------ README.md | 41 ++++ .../empty_strain_map.xlsx | Bin .../BD accuri parser.py | 41 +--- microtiter/Curve_maker.py | 50 ++-- .../plate_reader_tools.cpython-37.pyc | Bin 8290 -> 8572 bytes microtiter/plate_reader_tools.py | 100 ++++---- 12 files changed, 172 insertions(+), 374 deletions(-) create mode 100644 .spyproject/config/backups/codestyle.ini.bak create mode 100644 .spyproject/config/backups/encoding.ini.bak create mode 100644 .spyproject/config/backups/vcs.ini.bak create mode 100644 .spyproject/config/backups/workspace.ini.bak delete mode 100644 Biomek/media_gradient_maker.py delete mode 100644 Genomics/gene_search.py rename {microtiter => example_file_structures}/empty_strain_map.xlsx (100%) rename Genomics/ploidy.py => flow_cytometry/BD accuri parser.py (78%) diff --git a/.spyproject/config/backups/codestyle.ini.bak b/.spyproject/config/backups/codestyle.ini.bak new file mode 100644 index 0000000..0f54b4c --- /dev/null +++ b/.spyproject/config/backups/codestyle.ini.bak @@ -0,0 +1,8 @@ +[codestyle] +indentation = True +edge_line = True +edge_line_columns = 79 + +[main] +version = 0.2.0 + diff --git a/.spyproject/config/backups/encoding.ini.bak b/.spyproject/config/backups/encoding.ini.bak new file mode 100644 index 0000000..a17aced --- /dev/null +++ b/.spyproject/config/backups/encoding.ini.bak @@ -0,0 +1,6 @@ +[encoding] +text_encoding = utf-8 + +[main] +version = 0.2.0 + diff --git a/.spyproject/config/backups/vcs.ini.bak b/.spyproject/config/backups/vcs.ini.bak new file mode 100644 index 0000000..fd66eae --- /dev/null +++ b/.spyproject/config/backups/vcs.ini.bak @@ -0,0 +1,7 @@ +[vcs] +use_version_control = False +version_control_system = + +[main] +version = 0.2.0 + diff --git a/.spyproject/config/backups/workspace.ini.bak b/.spyproject/config/backups/workspace.ini.bak new file mode 100644 index 0000000..d2c8c74 --- /dev/null +++ b/.spyproject/config/backups/workspace.ini.bak @@ -0,0 +1,10 @@ +[workspace] +restore_data_on_startup = True +save_data_on_exit = True +save_history = True +save_non_project_files = False + +[main] +version = 0.2.0 +recent_files = ['..\\..\\..\\..\\.spyder-py3\\temp.py', 'microtiter\\curve_maker.py', 'microtiter\\plate_reader_tools.py', '..\\..\\..\\..\\miniconda3\\lib\\site-packages\\spyder_kernels\\customize\\spyderpdb.py'] + diff --git a/Biomek/media_gradient_maker.py b/Biomek/media_gradient_maker.py deleted file mode 100644 index 23b5b0c..0000000 --- a/Biomek/media_gradient_maker.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Fri May 25 15:54:17 2018 - -@author: James -""" - -import pandas as pd -from microtiter import plate_reader_tools -import sys -import random - -class Plate: - def __init__(self, name): - self.name = name - self.wells = {} - - def new_well(self, name, components): - if not type(components) == dict: - return(TypeError('components must be of type dict')) - - -class Media_Well: - def __init__(self, name, number): - self.name = name - self.number = number - -FILE = r'C:\Users\Owner\Documents\Concordia\1000 yeast strains\Biomek instructions\hydrolysate test 2 map.xlsx' - -plate_maps_excel = pd.ExcelFile(FILE) - -plates = plate_reader_tools.find_plates(plate_maps_excel) - -for plate in plates: - if 2 in plate.shape: - sys.exit('plate wrong shape') - -for plate in plates: - plate.set_index(plate.iloc[0:, 0], inplace=True) - plate.drop(['<>'], axis='rows', inplace=True) - plate.drop([0], axis='columns', inplace=True) - - -concentrations = [] -for i, plate in enumerate(plates): - print(plate) - concentrations.append(input( - 'plate {} looks like this, what is the source concentration in g/L?\ - \n'.format(i+1))) - -melted_plates = [] -for plate in plates: - melted_plates.append(plate.T.melt()) - -melted_lists = [] -for melted_plate in melted_plates: - for well in melted_plate.index: - value = float(melted_plate.iloc[well]['value']) diff --git a/Genomics/gene_search.py b/Genomics/gene_search.py deleted file mode 100644 index 1646888..0000000 --- a/Genomics/gene_search.py +++ /dev/null @@ -1,225 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Wed Sep 30 08:33:49 2020 - -@author: jbag2 -""" -import pandas as pd -import plotly.express as px -import numpy as np -from os import listdir -from pathlib import Path -from Bio.Seq import Seq -from collections import namedtuple -from Bio.SeqIO import parse, write -from Bio.SeqRecord import SeqRecord - - -ROOT = Path(r'C:\Users\jbag2\OneDrive - Concordia University - Canada\Genomes\Genomes and GFFs') -fastas, gffs = [], [] -for file in [ROOT.joinpath(x) for x in listdir(ROOT)]: - #print(file) - if file.suffix == '.fasta': - fastas.append(file) - elif file.suffix == '.gff': - gffs.append (file) - -fastas = {x.parts[-1].split(';')[0].strip(): str(x) for x in fastas} -gffs = {x.parts[-1].split('.')[0].split(' ')[-1].strip(): str(x) for x in gffs} - -g_and_g_map = pd.DataFrame(fastas.values(), - index=fastas.keys(), - columns=['fasta']).join( - pd.DataFrame(gffs.values(), - index=gffs.keys(), - columns=['gff'])) - -def find_genes(gene_names): - cds_names, strains = [], [] - results = dict() - for gene in gene_names: - cds_name, strain = correct_name(gene) - cds_names.append(cds_name) - strains.append(strain) - - if all([strains[0] == strain for strain in strains]): - - strain = strains[0] - gff, genome = load_genome(strain) - - sequences = {gene:find_gene(gene, genome, gff) for gene in cds_names} - results = sequences - - else: - for working_strain in set(strains): - if working_strain == '04Q': - continue - working_set = [] - for gene, strain in zip(cds_names, strains): - if strain != working_strain: - continue - else: - working_set.append(gene) - gff, genome = load_genome(working_strain) - sequences = {gene:find_gene(gene, genome, gff) for gene in working_set} - results[working_strain] = sequences - - return(results) - - - -def find_gene(gene_name, genome, gff): - - exons = find_CDS_coords(gene_name, gff) - exon_sequences = [] - for key, exon in exons.items(): - coords, strand, frame, contig = exon - fasta_coords = [int(coord) + genome[contig].start_char for coord in coords] - direction = {'+':+1, '-':-1}[strand] - sequence = genome.fasta.replace('\n', '')[fasta_coords[0]-1:fasta_coords[1]] - if direction == -1: - sequence = str(Seq(sequence[::-1]).complement()) - #exon_sequences.append(sequence[frame:]) - exon_sequences.append(sequence) - sequence = ''.join(exon_sequences) - return sequence - - -def correct_name(name): - breakpoints = ['|','c','g'] - locs = [name.find(x) for x in breakpoints] - subs = [name[:locs[0]], name[locs[1]+1:locs[2]], name[locs[2]+1:]] - strain = subs[0] - contig = subs[1] - gene = subs[2] - - return f'{contig.zfill(6)}F|arrow.g{gene}', strain.strip('JB_') - -def find_CDS_coords(name, gff): - found = 0 - exons = {} - target_attribute = f'ID={name}.t1.cds' - with open(gff) as file: - for line in file: - if line[0]=='#': - continue - entries = line.split('\t') - attributes = entries[8].split(';') - if target_attribute in attributes: - if not found: - found += 1 - coords, strand, frame, contig = (entries[3], entries[4]), entries[6], int(entries[7]), entries[0] - exons[found] = coords, strand, frame, contig - else: - #multiple exons - found += 1 - #print(f'Found {found} instances of {name} in {gff}') - coords, strand, frame, contig = (entries[3], entries[4]), entries[6], int(entries[7]), entries[0] - exons[found] = coords, strand, frame, contig - - if found: - return exons - else: - print(f'No instances of "{target_attribute}" found in {gff}') - - -def load_fasta(path): - - Contig_details = namedtuple('Contig', ['name', 'start_row', 'end_row', - 'start_char', 'end_char']) - class Genome(): - # Genome class stores contig headers as keys and returns full contig - # when indexed by a key - - def __init__(self, path): - contig_names = [] - contig_starts = [] - contig_char_starts = [] - contig_ends = [] - contig_char_ends = [] - char_count = 0 - with open(path) as file : - for line_num, line in enumerate(file): - if line[0] != '>': - contig_ends[-1] = line_num - char_count += len(line.strip('\n')) - contig_char_ends[-1] = char_count - continue - else: - contig_names.append(line[1:].split(' ')[0].strip('\n')) - contig_starts.append(line_num+1) - contig_ends.append(line_num) - char_count += len(line.strip('\n')) - contig_char_starts.append(char_count) - contig_char_ends.append(char_count) - - self.contigs = \ - {contig_names[i]: - Contig_details(contig_names[i], contig_starts[i], - contig_ends[i], contig_char_starts[i], - contig_char_ends[i]) - for i in range(len(contig_names))} - self.path = path - - def __getitem__(self, item): - return self.contigs[item] - - @property - def fasta(self): - return(open(self.path, 'r').read()) - - return(Genome(path)) - -def load_genome(assembly_name): - locs = g_and_g_map.loc[assembly_name].values - gff = locs[1] - genome = load_fasta(locs[0]) - - return (gff, genome) - -def results_to_fasta(results, name): - from Bio.Seq import Seq - from Bio.SeqRecord import SeqRecord - from Bio.SeqIO import write - - reformatted = [] - for strain in results.keys(): - for gene in results[strain].keys(): - reformatted.append(['|'.join([strain, gene]), results[strain][gene]]) - out = [SeqRecord(Seq(sequence), id=full_name) for full_name, sequence in reformatted] - - write(out, f'{name}.fasta', 'fasta') - -def from_aybrah(FOG, amino_acid=True): - aybrah = pd.read_excel(r'C:\Users\jbag2\OneDrive - Concordia University - Canada\Aybrah annotation dataset (2).xlsx') - aybrah = aybrah.set_index('FOG') - SOI = 'JB_02W,JB_04R,JB_02M,pku_NG7,JB_02G,pfe_madison,JB_02L,JB_02Q,JB_019,JB_01V,JB_01X,pnor_UCD,sce,opm,ppa,dbx,yli'.split(',') - GOI = aybrah.loc[FOG].loc[SOI] - - JB = 'JB_02W,JB_04R,JB_02M,JB_02G,JB_02L,JB_02Q,JB_019,JB_01V,JB_01X'.split(',') - AYB = 'sce,opm,ppa,dbx,yli'.split(',') - unimplemented = set(GOI.index).difference(set(AYB)).difference(set(JB)) - print(f'strains {", ".join(unimplemented)} NOT YET IMPLEMENTED') - - JB_genes = find_genes(GOI.loc[JB].str.split(';', expand=True).melt().dropna().value.values) - ayb_fasta = parse(r'C:\Users\jbag2\OmicsBoxWorkspace\yeast_proteome_aybrah.faa', 'fasta') - ayb_goi = GOI.loc[AYB].str.split(';', expand=True).melt().dropna().value.values - ayb_out = [] - for seq in ayb_fasta: - if seq.id in ayb_goi: - ayb_out.append([seq.id, seq.seq]) - JB_out = [] - for strain in JB_genes.keys(): - try: - for gene in JB_genes[strain].keys(): - if amino_acid: - JB_out.append(['|'.join([strain, gene]), Seq(JB_genes[strain][gene]).translate()]) - else: - JB_out.append(['|'.join([strain, gene]), Seq(JB_genes[strain][gene])]) - - except(AttributeError): - continue - - out = JB_out + ayb_out - return out - \ No newline at end of file diff --git a/README.md b/README.md index 1cdad99..11f0e45 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,43 @@ # James_Foundry_Tools +#### biomek + +-- main.py + Top level interface for running backdilutions on Biomek, Equal_OD_dilutions function takes the output from a plate reader along with a dilution factor, target OD, target volume & other variables as inputs and outputs a csv file formatted for the biomek's "Transfer from file" step to the "output files" folder. + + Prints a warning in case of a well's OD being too low and drops them from the experiment + +-- dilution_calculator.py + Handles calculations related to diluting cultures for main.py + +-- biomek_file_writer.py + general purpose csv writer formatted for the biomek, breaks down transfers that exceed maximum allowed volume into multiple smaller transfers automatically, can save to .csv or return csv formatted file to console + + +#### genomics + +-- ploidy.py + built off the open source project "FlowIO", reads .fcs files outputted by the flow cytometer and combines with a strain map to provide easily graphable and retrievable results + can make multistrain comparing any combination of two measurements, across an arbitrary number of strains. Can take custom dimensions for side by side comparison or generate dimensions automatically + + Can also retrieve results for a single strain for custom figures. + +#### microtiter + +-- plate_reader_tools.py + contains an array of basic tools used throughout the package including: + Reading one-shot plate reader outputs + Reading growth curve data from sunrises + Conversion between matrix format and list format + Conversion between well coordinates (A1-H12) and numbers (1-96) + Reading strain & treatment maps for other modules + +-- curve_maker.py + Contains higher level tools for analyzing growth curves. + curve_maker accepts a strain map path and a sunrise path and outputs a long format dataframe with columns as Time, well, name & OD. Ideal for combining output of multiple experiments into a single dataframe + curve_viewer recieves the output of curve_maker (works better with raw_time set to False) and produces a line plot using the seaborn.relplot function. + accepts an optional context variable with “notepad”, “paper”, “talk”, and “poster” as options, + accepts optional: legend_name, and a list of names from the strain map to plot + mu_max recieves the output from curve_maker and produces the mu max of each well, retaining identifier information + mu_max_plot provides a high level function for plotting mu max results, designed specifically for multiple strains & treatments with a control strain + diff --git a/microtiter/empty_strain_map.xlsx b/example_file_structures/empty_strain_map.xlsx similarity index 100% rename from microtiter/empty_strain_map.xlsx rename to example_file_structures/empty_strain_map.xlsx diff --git a/Genomics/ploidy.py b/flow_cytometry/BD accuri parser.py similarity index 78% rename from Genomics/ploidy.py rename to flow_cytometry/BD accuri parser.py index 05a9821..bf33163 100644 --- a/Genomics/ploidy.py +++ b/flow_cytometry/BD accuri parser.py @@ -1,8 +1,4 @@ -<<<<<<< HEAD - # -*- coding: utf-8 -*- -======= # -*- coding: utf-8 -*- ->>>>>>> refactor-&-merge """ Created on Tue Feb 18 16:58:48 2020 @@ -15,8 +11,8 @@ from pandas import DataFrame, concat import numpy as np import matplotlib.pyplot as plt +from math import sqrt, ceil -<<<<<<< HEAD def getwell(well, folder): if len(well)<3: well = well[0]+'0'+well[1] @@ -24,15 +20,6 @@ def getwell(well, folder): def getstrain(strain, strain_map): S_map = read_infinateM200_output(strain_map) -======= -def getwell(well): - if len(well)<3: - well = well[0]+'0'+well[1] - return 'D:\\Downloads\\ISM 2020-02-14 Ploidy\\'+well+'.fcs' - -def getstrain(strain): - S_map = read_infinateM200_output(r"D:\Downloads\2020-02-04 Strain Map Top45 PLOIDY.xlsx") ->>>>>>> refactor-&-merge S_map = dict(zip(S_map.wells.values(), S_map.wells.keys())) return(S_map[strain]) @@ -43,19 +30,18 @@ def getflowdata(strain): return(DataFrame(data=data, columns=columns)) -<<<<<<< HEAD -def multistrain(strains, folder, X='FSC-A', Y='FL2-A', xmax=3000000, ymax=30000): - fig, axes = plt.subplots(2, 2) - plot_loc = {0:[0,0], 1:[0,1], 2:[1,0], 3:[1,1]} +def multistrain(strains, folder, X='FSC-A', Y='FL2-A', xmax=3000000, ymax=30000, dim=None): + if dim: + fig, axes = plt.subplots(*dim) + else: + dim = [ceil(sqrt(len(strains))), ceil(sqrt(len(strains)))] + fig, axes = plt.subplots(*dim) + + plot_loc = dict() + for i in range(dim[0]*dim[1]): + plot_loc[i] = [i//dim[1], i%dim[1]] for i, strain in enumerate(strains): data = getflowdata(strain, folder) -======= -def multistrain(strains, X='FSC-A', Y='FL2-A', xmax=3000000, ymax=30000): - fig, axes = plt.subplots(2, 2) - plot_loc = {0:[0,0], 1:[0,1], 2:[1,0], 3:[1,1]} - for i, strain in enumerate(strains): - data = getflowdata(strain) ->>>>>>> refactor-&-merge if len(X.split('/'))>1: Xs = X.split('/') @@ -97,13 +83,8 @@ def multistrain(strains, X='FSC-A', Y='FL2-A', xmax=3000000, ymax=30000): ax.set_ylabel(Y) plt.show() -<<<<<<< HEAD def onestrain(strain, folder, X='FSC-A', Y='FL2-A', xmax=3000000, ymax=30000, plot=True): data = getflowdata(strain, folder) -======= -def onestrain(strain, X='FSC-A', Y='FL2-A', xmax=3000000, ymax=30000, plot=True): - data = getflowdata(strain) ->>>>>>> refactor-&-merge if len(X.split('/'))>1: Xs = X.split('/') diff --git a/microtiter/Curve_maker.py b/microtiter/Curve_maker.py index a0c2376..bde9cfe 100644 --- a/microtiter/Curve_maker.py +++ b/microtiter/Curve_maker.py @@ -9,13 +9,14 @@ import seaborn as sns import numpy as np import matplotlib.pyplot as plt +import matplotlib.ticker as ticker def curve_maker(sunrise_path, - strain_map_path=r"../microtiter/empty_strain_map.xlsx", + strain_map_path=r"../example_file_structures/empty_strain_map.xlsx", raw_time=False): sunrise = tools.read_sunrise(sunrise_path) - strain_map = tools.read_infinateM200_output(strain_map_path) + strain_map = tools.read_treatment_map(strain_map_path) sunrise.index.name = 'Time' strain_map = strain_map.dataframe.reset_index() strain_map.columns = ['well','name'] @@ -31,16 +32,19 @@ def curve_maker(sunrise_path, labeled.Time = labeled.Time.apply(lambda x: x.total_seconds()/3600) return(labeled) -def curve_viewer(sheet, context='talk', hue='name', order=False): +def curve_viewer(sheet, context='talk', legend_name='name', names=False): sns.set_context(context) - sheet = sheet.rename(columns={'name': hue}) - if order: - sns.relplot(data = sheet, x='Time', y='OD595', kind = 'line', - hue = 'name', hue_order=order, aspect=2) + sheet = sheet.rename(columns={'name': legend_name, 'Time': 'Time (hours)'}) + if names: + plot = sns.relplot(data = sheet, x='Time (hours)', y='OD595', kind = 'line', + hue = legend_name, hue_order=names, aspect=2) else: - sns.relplot(data = sheet, x='Time', y='OD595', kind = 'line', - hue = 'name', aspect=2) -def mu_max(curves, norm_eqs=None): + plot = sns.relplot(data = sheet, x='Time (hours)', y='OD595', kind = 'line', + hue = legend_name, aspect=2) + plot.ax.xaxis.set_major_locator(ticker.MultipleLocator(6)) + return(plot) + +def mu_max(curves, norm_eqs=None, time_range=['2.5 hours', '15 hours'], blank='BLK'): if norm_eqs: for strain, norm_eq in norm_eqs.items(): try: @@ -52,15 +56,23 @@ def mu_max(curves, norm_eqs=None): #curves.rename(columns={'index': 'indexer'}) window_size = 12 curves.Time = pd.TimedeltaIndex(curves.Time, unit='h').round('T') - data = (curves.set_index(['Time', 'name', 'well']).unstack([1,2]).resample('5T').mean()\ - .rolling(window_size).apply(lambda x: np.log(x[-1]/x[0])).OD595['2.5 hours':'15 hours']\ - .max()/(window_size/12)).drop('BLK').reset_index() - data = data.assign(Strain = data.name.apply(lambda x: x.split(' ')[0]), - Treatment = data.name.apply(lambda x: ' '.join(x.split(' ')[1:]))) - #data.pH = pd.to_numeric(data.pH, errors='ignore') - data = data.rename(columns={0: 'Max growth rate'}) - sns.barplot(data=data, y='Treatment', x='Max growth rate', hue='Strain') - return data + data = curves.set_index(['Time', 'name', 'well']).unstack([1,2]).resample('5T').mean() + blank_val = data['OD595']['BLK'].mean() + + try: + blank_val = blank_val.mean() + except: + pass + data = data - blank_val + rolling = data.rolling(window_size) + growth_rates = (rolling.apply(lambda x: np.log(x[-1]/x[0])).OD595[time_range[0]:time_range[-1]]\ + .max()/(window_size/12)).reset_index() + # growth_rates = growth_rates.assign(Strain = growth_rates.name.apply(lambda x: str(x).split(' ')[0]), + # Treatment = growth_rates.name.apply(lambda x: ' '.join(str(x).split(' ')[1:]))) + # #data.pH = pd.to_numeric(data.pH, errors='ignore') + # data = growth_rates.rename(columns={0: 'Max growth rate'}) + # sns.barplot(data=growth_rates, y='Treatment', x='Max growth rate', hue='Strain') + return growth_rates def mu_max_plot(growth_rates, strains, comparison='Cen'): #Strain = namedtuple('strain', ['key', 'name', 'marker', 'marker_size']) diff --git a/microtiter/__pycache__/plate_reader_tools.cpython-37.pyc b/microtiter/__pycache__/plate_reader_tools.cpython-37.pyc index 7898d54850d352df7da10d718bafe8ba2974eb18..b8b3aa99251cdbbbaecdcaa12556fa6576aa10d9 100644 GIT binary patch delta 1582 zcmZvcT}&KR6oBW>{<6z1rAv2Ne#!<5bc2SLiU#?S@>3EcR9dW|)GXu9EFGBL*`Ar@ zr!F=IOf=Cpa8qe{v5gPb2T9B(M&Eq!(Z5G?O*9(gtI@>7COmk~h1xc8v-{22bI(0< z&)@xZXm2<=9EpS!{PceC!rX@)o6&mqGY?+ z?FE>q{1O~v&vxN1IEfG*8+75b>}3NWN!o5i+6T{&Tunh;%+8?I(rj^O8NW?8PXTT^6goVV1DBp1*d#K^M>jATNZ zfyH01J$7-1G}DAeLKA_E_oMaYEL(F$=z?aIj4=QkWV~4d%RA5LIZT zS(&VDJrFSg!n=yw1fPtxumN6(C6RB(-eP?`T6Zjbj!r&6=;i};jqDPiscT`wvJUUo zJ;^rs7j-|yr^sM~UR7ljN_hh?fiTEFIg&bcp0w4joyl5e@ikim9m70L_Oig2sZpJg}rOYt}G4By3*J$-1C_5vM6cq}^$?X;Z^g>51o__KB)XU@YGExbus zmnS@t=wdgqQjmceQ9hkQA~Zg3wHIKcR<8 zwV%g9l2pR;ga`t|7$RAwg<1ec2%}tUXit8Fq7y|Z><|mGpz3E)7FS`Lf8Q{a94Eth z!es0NFs* z|20l2Uw~D9+7~;rgsVT}@4#U-t5#u&|JC@hpB%z%ezz$VmSIzG!w&ztsX0L|MX(6Q zl4+N4;_|kR?QLx}clvj5>34;nY;I*cmFecs*b@#J`%I^x74|QMARLnWbEQJo#V&_M z{!~l9dSBs-Ev@WcWiGMJ*ctv${ma*=B=A1ss(%9Cks6Rmn#Kgt^j=l_2SLWIf<=~( zjI{8{*#wjHvYJvlNH1BZYaglX!@}O84YD<7lvx!2?+&oZrbsx7z$pYfk5ijw4xfs|Lp3X*g58OTO47ee3z{ zmZNi&PM9Fb?xTwv=(2<{LXIGNQ%*Y#Gu$BDBy1C8fK+y%T0xjlO}Izc<$nYcJe{gx V$9Zq+Xe1mA2Lr)CC>CV#)W1xZh*tmr delta 1404 zcmZ`(U1%d!6wb}hWb)rMvCU7@#&tI#E@)g=DK*(`wi_$R+O00Dj7T!g+|+R=Gx5yC zwh>&!imcgLz?!L8CnIv=U!KB(}=O&-lSY`5{8uGv9t5v`K<6)R7)8FMbyYLi7 zLmj~lSfIHf!`kBjdGbDkPz%9xWakJj5?m17=q6hgw<39Q68)Yj;xM+&Hl{zt{$y-z z`a>ee%BvXYN*Y*0Q-w0V)M9X%d?CKygvPZj2r#L!S3-qEV~ZdCG{rj zPVc9NjI~8Iy^+^25i&H19~jvLH3oh0QTlA*b@IGQP$XC)pkSzrlk{SZdVm`UwJ@~s z;Rdg5U2Q8-Yvgon4m!7ItqC??Vok9!zk%4B|B}^3EK{t{QqNJUWP!e`n@Xgwo2t?2 zS%y0^!P--T2bw-m#Ldh~v4PR|@V4U!K2~5!7RRgNhlV)He9YFwLiTL+I*tK_dPGA7 zEjQV69ME@04sQckfb1(m&91PP_%M5!jm1g!mxV1#8B=tGo1*~-qz{_nyT{7UZj(3e zSe>4s4|gmTG;DjF;=N5&&{85E--d2GF4LhUaeXlvmew%a0 zMyhF~76hu;4H#C>E7q%>DkQJev&_D}3!mT%>=7Jb$r-V__%ypSZI(V`(pB`1UlOIn z^I~qLaNAogq&WveK9;Uo8ZS3N#gH4g9&P#9-?#)E4czuVK0N142X5%5YK%Da$S)Ik z)(%CZFzAtn68T>sAbES$y2*ix{O)%wxZNQNP8<1O4e6Bl4#6@4?lxG1j;8kQn zih9cSSXL4aaMc-b)9DP6_wW&U42FWU|GEKw}S*hN&`lGw~;{XhDIQht#g2`9JS zYSW6u1~M|CL13!$e#!lw%&fbWq?}yk96S(gLnL z1pc0GjchkGz|FPsxPDVOFCF8U#KfY$=Qvhluh4%{d|h7dQa-#);3uHZAJF#)HVC=|O#<(vjmbJBxI=J6 uKnwyk3RE1#2FRaY}Ar6C;CPB9`rn*nWID7MGut!*V#9k=Z{|VN1IJ diff --git a/microtiter/plate_reader_tools.py b/microtiter/plate_reader_tools.py index e3d6598..25db828 100644 --- a/microtiter/plate_reader_tools.py +++ b/microtiter/plate_reader_tools.py @@ -202,48 +202,16 @@ def structured_find_plates(excel): return plates - def read_treatment_map(target): - if target == 'testing': - target = r'C:\Users\Owner\Concordia\Lab_Automation\example_files\treatment_map.xlsx' - file = pd.read_excel(target, 0, header=None, dtype=str) - treatment_map = find_plate(file) - - MODE = matrix_or_list(treatment_map) - - print('Mode =', MODE) - - if MODE == 'MATRIX': - treatment_map_dict = {} - treatment_map.set_index(treatment_map[0], inplace=True) - treatment_map.drop(['<>'], axis='rows', inplace=True) - treatment_map.drop([0], axis='columns', inplace=True) - counter = 0 - for row in treatment_map.index.values: - for col in treatment_map.columns.values: - - index = (str(row) + str(col)) - - try: - value = float(treatment_map[col][row]) - except ValueError: - value = treatment_map[col][row] - - treatment_map_dict[counter] = {'Value': value, - 'Well': index} - counter += 1 - - treatment_map = pd.DataFrame.from_dict(treatment_map_dict).T - treatment_map.set_index(treatment_map['Well'], inplace=True) - treatment_map.drop(['Well'], axis='columns', inplace=True) - - if MODE == 'LIST': - treatment_map = treatment_map.set_index(treatment_map[0]) - treatment_map.columns = treatment_map.iloc[0] - treatment_map = treatment_map.drop(['Well']) - treatment_map = treatment_map.drop(['Well'], axis=1) - return treatment_map - + file = pd.read_excel(io=target, header=None, dtype=str) + plate = find_plate(file) + plate = read_plate(plate) + + plate_obj = plate_96() + for item in plate.iteritems(): + plate_obj.wells[item[0]] = item[1] + + return plate_obj def matrix_or_list(dataframe): if dataframe.shape[1] > 2: @@ -303,4 +271,52 @@ def as_matrix(data, function=False, rows=list('ABCDEFGH'), else: row.append(well) matrix.append(row) - return matrix \ No newline at end of file + return matrix + + + + + + +#OBSOLETE +def _read_treatment_map(target): + if target == 'testing': + target = r'C:\Users\Owner\Concordia\Lab_Automation\example_files\treatment_map.xlsx' + file = pd.read_excel(target, 0, header=None, dtype=str) + treatment_map = find_plate(file) + + MODE = matrix_or_list(treatment_map) + + print('Mode =', MODE) + + if MODE == 'MATRIX': + treatment_map_dict = {} + treatment_map.set_index(treatment_map[0], inplace=True) + treatment_map.drop(['<>'], axis='rows', inplace=True) + treatment_map.drop([0], axis='columns', inplace=True) + counter = 0 + for row in treatment_map.index.values: + for col in treatment_map.columns.values: + + index = (str(row) + str(col)) + + try: + value = float(treatment_map[col][row]) + except ValueError: + value = treatment_map[col][row] + + treatment_map_dict[counter] = {'Value': value, + 'Well': index} + counter += 1 + + treatment_map = pd.DataFrame.from_dict(treatment_map_dict).T + treatment_map.set_index(treatment_map['Well'], inplace=True) + treatment_map.drop(['Well'], axis='columns', inplace=True) + + if MODE == 'LIST': + treatment_map = treatment_map.set_index(treatment_map[0]) + treatment_map.columns = treatment_map.iloc[0] + treatment_map = treatment_map.drop(['Well']) + treatment_map = treatment_map.drop(['Well'], axis=1) + return treatment_map +