Skip to content

Commit

Permalink
Merge pull request #135 from LondonBiofoundry/well-location-in-csv
Browse files Browse the repository at this point in the history
implements new echo instruction pipeline
  • Loading branch information
hainesm6 authored Jun 18, 2021
2 parents d50d917 + d190e24 commit 7ecf225
Show file tree
Hide file tree
Showing 4 changed files with 147 additions and 14 deletions.
28 changes: 26 additions & 2 deletions basicsynbio/cam/csv_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,26 @@
import csv


def export_csvs(basic_build: BasicBuild, path: str = None):
def export_csvs(
basic_build: BasicBuild,
path: str = None,
clip_plate_mapping: dict[str, list[str]] = None,
assembly_plate_mapping: dict[str, str] = None,
):
"""Writes information about each clip_data and assembly to
two dependent CSV files in the same folder the command
is executed.
Args:
path (optional): path to zipped folder of csv files. If none defaults to
working directory with a time stamped name, output csvs is created.
clip_plate_mapping (optional): A dictionary with keys containing clip
indicies and values containing lists of well locations's where the
clips are stored.
assembly_plate_mapping (optional): A dictionary with keys containing
assembly indicies and values containing the assemblies location plate
followed by a dash then the well locations for example {1:'0-A1',...}
Returns:
str: filepath of created zip containing the CSV files
Expand All @@ -35,6 +47,7 @@ def export_csvs(basic_build: BasicBuild, path: str = None):
"Suffix ID",
"Total assemblies",
"Assembly indexes",
"Clip plate mapping",
]
thewriter = csv.DictWriter(f, fieldnames=fieldnames)
thewriter.writeheader()
Expand All @@ -55,10 +68,18 @@ def export_csvs(basic_build: BasicBuild, path: str = None):
basic_build.basic_assemblies.index(assembly) + 1
for assembly in clip_data[1]
],
"Clip plate mapping": clip_plate_mapping[str(index + 1)]
if clip_plate_mapping
else "N/A",
}
)
with open(Path.cwd() / "assemblies.csv", "w", newline="") as f:
fieldnames = ["Assembly Index", "Assembly ID", "Clip indexes"]
fieldnames = [
"Assembly Index",
"Assembly ID",
"Clip indexes",
"Assembly plate mapping",
]
thewriter = csv.DictWriter(f, fieldnames=fieldnames)
thewriter.writeheader()
for index, assembly in enumerate(basic_build.basic_assemblies):
Expand All @@ -70,6 +91,9 @@ def export_csvs(basic_build: BasicBuild, path: str = None):
basic_build.unique_clips.index(clip_reaction) + 1
for clip_reaction in assembly._clip_reactions
],
"Assembly plate mapping": assembly_plate_mapping[str(index + 1)]
if assembly_plate_mapping
else "N/A",
}
)
with zipfile.ZipFile(zip_path, "w") as my_zip:
Expand Down
67 changes: 56 additions & 11 deletions basicsynbio/cam/echo_instructions.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from .main import BasicBuild
from .csv_export import export_csvs
import zipfile
import os
import pandas as pd
Expand All @@ -9,6 +10,9 @@
import csv
from functools import reduce
from platemap import Plate, assign_source_wells, find_well, remove_volume
from typing import Literal
from collections import defaultdict


CLIP_VOLUME = 500
BUFFER_VOLUME = 500
Expand All @@ -21,6 +25,9 @@ def export_echo_assembly(
buffer_well: str = "A1",
water_well: str = "B1",
alternate_well: bool = False,
assemblies_per_clip: int = 28,
clips_plate_size: Literal[6, 24, 96, 384, 1536] = 384,
assemblies_plate_size: Literal[6, 24, 96, 384, 1536] = 96,
) -> None:
"""Writes automation scripts for a echo liquid handler to build assemblies from clips.
Expand All @@ -30,6 +37,10 @@ def export_echo_assembly(
buffer_well (optional): location in 6 well plate of assembly buffer.
water_well (optional): location in 6 well plate of dH20.
alternate_well (optional): specifies whether alternating wells are to be used in the input 384 well plate.
assemblies_per_clip (optional): number of assemblies each purified clip reaction can support.
clips_plate_size (optional): specifies the size of the clips plate. Defaults to 384
assemblies_plate_size (optional): specifiesthe size of the assemblies plates. Defaults to 96
Returns:
str: Path of zip file containing echo automation scripts
Expand All @@ -49,39 +60,60 @@ def export_echo_assembly(
"Assembly Buffer Well location needs to be within the 6 well plate, between A1 - B3"
)

source_plate = Plate(size=384, well_volume=40000, deadspace=20000)
destination_plate = Plate(size=96)
calculated_well_volume = assemblies_per_clip * CLIP_VOLUME
source_plate = Plate(
size=clips_plate_size, well_volume=calculated_well_volume, deadspace=0
)
destination_plate = Plate(size=assemblies_plate_size)

try:
assign_source_wells(
source_plate,
reduce(
lambda a, b: {**a, **b},
list(
map(
lambda x: {x[0]: len(x[1][1] * CLIP_VOLUME)},
lambda x: {x[0] + 1: len(x[1][1] * CLIP_VOLUME)},
enumerate(basic_build.clips_data.items()),
)
),
),
alternate_wells=alternate_well,
)

except:
raise ValueError(
"""To many clips in the build to be handled by a single 384
source plate, considering you alternate_well setting."""
)

dd = defaultdict(list)

for d in list(
map(
lambda well_item: {well_item[1][1]["id"]: well_item[1][0]},
enumerate(
filter(lambda x: x[1]["total_volume"], source_plate.contents.items())
),
)
):
for key, value in d.items():
dd[str(key)].append(value)

clip_sourceplate_mapping = dict(dd)
assembly_outputplate_mapping = {}

if path == None:
now = datetime.now()
zip_path = (
Path.cwd() / f"Echo_Instructions_{now.strftime('%d-%m-%Y_%H.%M.%S')}.zip"
)
else:
zip_path = path
for index, set_of_96_assemblies in enumerate(
for index, set_of_full_assemblies in enumerate(
list(
basic_build.basic_assemblies[x : x + 96]
for x in range(0, len(basic_build.basic_assemblies), 96)
basic_build.basic_assemblies[x : x + assemblies_plate_size]
for x in range(0, len(basic_build.basic_assemblies), assemblies_plate_size)
)
):
with open(
Expand All @@ -94,14 +126,17 @@ def export_echo_assembly(
thewriter_clips.writeheader()
thewriter_water_buffer = csv.DictWriter(f2, fieldnames=fieldnames)
thewriter_water_buffer.writeheader()
for index, assembly in enumerate(set_of_96_assemblies):
for assembly_index, assembly in enumerate(set_of_full_assemblies):
assembly_outputplate_mapping[
str((index * assemblies_plate_size) + (assembly_index + 1))
] = (str(index) + "-" + str(destination_plate.wells[assembly_index]))
for clip in [
basic_build.unique_clips.index(clip_reaction)
basic_build.unique_clips.index(clip_reaction) + 1
for clip_reaction in assembly._clip_reactions
]:
thewriter_clips.writerow(
{
"Destination Well": destination_plate.wells[index],
"Destination Well": destination_plate.wells[assembly_index],
"Source Well": find_well(source_plate, clip, CLIP_VOLUME),
"Transfer Volume": CLIP_VOLUME,
}
Expand All @@ -113,14 +148,14 @@ def export_echo_assembly(
)
thewriter_water_buffer.writerow(
{
"Destination Well": destination_plate.wells[index],
"Destination Well": destination_plate.wells[assembly_index],
"Source Well": buffer_well,
"Transfer Volume": BUFFER_VOLUME,
}
)
thewriter_water_buffer.writerow(
{
"Destination Well": destination_plate.wells[index],
"Destination Well": destination_plate.wells[assembly_index],
"Source Well": water_well,
"Transfer Volume": TOTAL_VOLUME
- BUFFER_VOLUME
Expand All @@ -133,7 +168,17 @@ def export_echo_assembly(
),
}
)
csv_zip = export_csvs(
basic_build, None, clip_sourceplate_mapping, assembly_outputplate_mapping
)
with zipfile.ZipFile(csv_zip, "r") as zip_ref:
zip_ref.extractall()
with zipfile.ZipFile(zip_path, "w") as my_zip:
my_zip.write("clips.csv")
my_zip.write("assemblies.csv")
os.remove("clips.csv")
os.remove("assemblies.csv")
os.remove(csv_zip)
for file in os.listdir(Path.cwd()):
if file.startswith("echo_") and file.endswith(".csv"):
my_zip.write(file)
Expand Down
2 changes: 1 addition & 1 deletion basicsynbio/cam/pdf_instructions.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def pdf_instructions(
basic_build: BasicBuild object the pdf lab instructions are written for.
path (optional): path to zipped folder of csv files. If none defaults to
working directory with a time stamped name, output csvs is created.
assemblies_per_clip (optional): amount of clips to be used in each assembly.
assemblies_per_clip (optional): number of assemblies each purified clip reaction can support.
Returns:
str: filepath of created pdf
Expand Down
64 changes: 64 additions & 0 deletions tests/test_function_echo.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ def test_echo_instructions_small_build(small_build_example):
)
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_clips_1.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_water_buffer_1.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "clips.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "assemblies.csv")
os.rmdir(Path.cwd() / "ECHO_CSVS")
expected_clips = [
["A1", "A1", 500],
Expand Down Expand Up @@ -80,6 +82,8 @@ def test_echo_instructions_small_build_useAllWell_False(small_build_example):
)
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_clips_1.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_water_buffer_1.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "clips.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "assemblies.csv")
os.rmdir(Path.cwd() / "ECHO_CSVS")
expected_clips = [
["A1", "A1", 500],
Expand Down Expand Up @@ -142,6 +146,8 @@ def test_multiple_files_made_more_than_96_assemblies(promoter_assemblies_build):
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_water_buffer_1.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_clips_2.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_water_buffer_2.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "clips.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "assemblies.csv")
os.rmdir(Path.cwd() / "ECHO_CSVS")
expected_water_buffer_2 = [
["A1", "A1", 500],
Expand Down Expand Up @@ -334,6 +340,64 @@ def test_multiple_files_made_more_than_96_assemblies(promoter_assemblies_build):
assert expected_water_buffer_2 == echo_water_buffer_2.to_numpy().tolist()


def test_multiple_files_made_more_than_96_assemblies_clips_assignment(
promoter_assemblies_build,
):
import zipfile
import os
import pandas as pd
import numpy as np
from pathlib import Path

echozippath = bsb.export_echo_assembly(promoter_assemblies_build)
with zipfile.ZipFile(echozippath, "r") as zip_ref:
try:
zip_ref.extractall("ECHO_CSVS")
finally:
zip_ref.close()
os.remove(echozippath)
echo_clips_csv = pd.read_csv(Path.cwd() / "ECHO_CSVS" / "clips.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_clips_1.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_water_buffer_1.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_clips_2.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_water_buffer_2.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "clips.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "assemblies.csv")
os.rmdir(Path.cwd() / "ECHO_CSVS")
echo_clips_first_clip_wells = "['A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1']"
assert echo_clips_first_clip_wells == echo_clips_csv.to_numpy().tolist()[0][-1]


def test_multiple_files_made_more_than_96_assemblies_clips_assignment_with_alternate_wells(
promoter_assemblies_build,
):
import zipfile
import os
import pandas as pd
import numpy as np
from pathlib import Path

echozippath = bsb.export_echo_assembly(
promoter_assemblies_build, alternate_well=True, clips_plate_size=1536
)
with zipfile.ZipFile(echozippath, "r") as zip_ref:
try:
zip_ref.extractall("ECHO_CSVS")
finally:
zip_ref.close()
os.remove(echozippath)
echo_clips_csv = pd.read_csv(Path.cwd() / "ECHO_CSVS" / "clips.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_clips_1.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_water_buffer_1.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_clips_2.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "echo_water_buffer_2.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "clips.csv")
os.remove(Path.cwd() / "ECHO_CSVS" / "assemblies.csv")
os.rmdir(Path.cwd() / "ECHO_CSVS")
echo_clips_first_clip_wells = "['A1', 'C1', 'E1', 'G1', 'I1', 'K1', 'M1']"
assert echo_clips_first_clip_wells == echo_clips_csv.to_numpy().tolist()[0][-1]


def test_echo_instructions_too_many_clips(promoter_assemblies_build_more_than_384):
import zipfile
import os
Expand Down

0 comments on commit 7ecf225

Please sign in to comment.