This repository was archived by the owner on Feb 1, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathrun.py
803 lines (626 loc) · 25.3 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
# imports
from GDapp.models import add_analyzed_image, add_gold_particle_coordinates, add_histogram_image, add_output_file, EMImage
import os
from skimage import io # library for python to help access pictures
import numpy as np # help do math in python
import matplotlib.pyplot as plt
import random
import imageio
from PIL import Image
import sys
from skimage.util.shape import view_as_windows, view_as_blocks
import imutils
import os
import cv2
import glob
import shutil
import re
import pandas as pd
import pathlib
import time
import errno
import os
import stat
import shutil
from django.conf import settings
def get_artifact_status(model):
'''
This function checks if the output of GoldDigger will need a black rectangle
to cover an artifact in the upper right corner and what that artifact needs
to look like.
Parameters:
model (string): Name of trained model.
Returns:
art_idx: Integer to later be used as an index for a location on the image to
overwrite with black. None if no artifact is required.
'''
if model == '87kGoldDigger':
art_idx = 35
elif model == 'greenonly_0422':
art_idx = 18
else:
art_idx = None
return art_idx
def clear_out_old_files(model):
'''
This function clears directories that need to be empty for a new run.
Parameters:
model: Name of trained model.
'''
shutil.rmtree('media/Output', ignore_errors=True)
os.makedirs('media/Output')
shutil.rmtree('media/Output_Appended', ignore_errors=True)
os.makedirs('media/Output_Appended')
shutil.rmtree('media/Output_Appended/test', ignore_errors=True)
os.makedirs('media/Output_Appended/test')
shutil.rmtree('media/PIX2PIX/results/{0}/test_latest/images'.format(model), ignore_errors=True)
os.makedirs('media/PIX2PIX/results/{0}/test_latest/images'.format(model))
shutil.rmtree('media/Output_ToStitch', ignore_errors=True)
os.makedirs('media/Output_ToStitch')
shutil.rmtree('media/Output_Final', ignore_errors=True)
os.makedirs('media/Output_Final')
def crop_mask(mask, height256, width256):
'''
This function crops the mask file to the same size as the cropped image file.
Parameters:
mask: Path to mask image file.
height256: Height of cropped image (=height of original image rounded down
to the nearest multiple of 256).
width256: Width of cropped image (=width of original image rounded down
to the nearest multiple of 256).
returns:
img_mask: Mask image cropped to height256 x width256 (dimensions that are
multiples of 256 pixels and are the same size as the cropped image).
'''
mask_path = pathlib.Path('media/Mask', mask)
img_mask = io.imread(mask_path)
img_mask = img_mask[:height256, :width256, :3]
imageio.imwrite('media/Output_Final/' +
'CroppedMask'+'.png', img_mask)
print("completed crop_mask function")
return(img_mask)
def get_dimensions(img_new):
'''
This function calculates height and width to give an image dimensions that are divisible by 256.
Parameters:
img_new: Image to run through GD network.
Returns:
height256: Height of img_new rounded down to a multiple of 256 (pixels).
height: Height of img_new (pixels).
width256: Width of img_new rounded down to a multiple of 256 (pixels).
width: Width of img_new (pixels).
'''
shape = img_new.shape
height = shape[0] // 256
height256 = height * 256
width = shape[1] // 256
width256 = width * 256
return height256, width256, height, width
def create_small_image(current_progress, total_progress, front_end_updater, img_size, img_new_w, i, j, r):
'''
This function makes and saves 1 256x256 crop of an image.
Parameters:
current_progress:
total_progress:
front_end_updater: FrontEndUpdater class object for updating the front end.
img_size:
img_new_w:
i:
j:
r:
Returns:
current_progress:
r:
'''
current_progress += 1
front_end_updater.update_progress(
current_progress/total_progress * 100, 1)
A = np.zeros((img_size[0], img_size[1], 3))
A[:, :, :] = img_new_w[i, j, :, :]
imageio.imwrite('media/Output/' + str(r) + '.png', A)
r += 1
return current_progress, r
# look in INPUT folder, crop photo and save crop to OUTPUT folder
def load_data_make_jpeg(image, mask, model, front_end_updater, imageName=''):
'''
Crops image to size that is a multiple of 256 x 256 pixels and breaks
image into 256 x 256 pixel squares.
Parameters:
image:
mask:
model:
front_end_updater:
imageName:
Returns:
file_list:
width:
height:
img_mask:
'''
file_list = pathlib.Path('media/Input', image)
print(file_list)
for entry in [file_list]:
front_end_updater.post_message('loading image')
front_end_updater.update_progress(10, 1)
img_size = (256, 256, 3)
img_new = io.imread(entry)
height256, width256, height, width = get_dimensions(img_new)
img_new = img_new[:height256, :width256, :3]
img_mask = None
if mask is not None and mask!='':
img_mask = crop_mask(mask, height256, width256)
front_end_updater.update_progress(50, 1)
img_new_w = view_as_blocks(img_new, img_size)
img_new_w = np.uint8(img_new_w)
imageio.imwrite('media/Output_Final/' +
f'Cropped-{imageName}-with-{model}' + '.png', img_new)
r = 0
total_progress = img_new_w.shape[0] * img_new_w.shape[1]
current_progress = 0
front_end_updater.post_message('cutting up image')
# the cutting up image step is what gives all the "Lossy conversion" warnings in celery terminal
front_end_updater.update_progress(90, 1)
for i in range(img_new_w.shape[0]):
for j in range(img_new_w.shape[1]):
current_progress, r = create_small_image(current_progress, total_progress, front_end_updater, img_size, img_new_w, i, j, r)
return file_list, width, height, img_mask
# Cut append white image to every cropped 256x256 image
def combine_white(white, folderA, front_end_updater):
'''
This function appends a white image to every cropped 256x256 image for
the PIX2PIX network to write over.
Parameters:
white: Blank 256x256 image.
folderA: Folder containing the 256x256 crops of the original image.
front_end_updater: FrontEndUpdater class object for updating the front end.
'''
print(os.getcwd())
os.chdir(folderA)
total_progress = len(os.listdir('.'))
current_progress = 0
for file in os.listdir('.'):
front_end_updater.update_progress(
current_progress/total_progress * 100, 1)
current_progress += 1
imA = io.imread(file)
newimage = np.concatenate((imA, white), axis=1)
imageio.imwrite('../Output_Appended/test/' + file, newimage)
os.chdir('../../')
print(os.getcwd())
def save_to_output_folder(file_list, model):
'''
This function moves all 256 x 256 PIX2PIX output images into
media/Output_ToStitch.
Parameters:
file_list: List of 256x256 output images.
model: Name of trained model.
'''
for entry in file_list:
split_name = entry.split('/')
#print(split_name)
dirA = 'media/PIX2PIX/results/{0}/test_latest/images/'.format(model)
pathA = os.path.join(dirA, split_name[-1])
dirB = 'media/Output_ToStitch/'
pathB = os.path.join(dirB, split_name[-1])
shutil.move(pathA, pathB)
# STICH TOGETHER
def stitch_row(n, master, folderstart, art_idx, widthdiv256):
'''
This function takes the images in folderstart (256 x 256 output
images in Output_ToStitch) and stitches them back together into one
row that has the width of the original cropped image.
Parameters:
n:
master:
folderstart:
art_idx:
widthdiv256:
Returns:
full_row:
'''
image1 = imageio.imread(folderstart + master[n])
if art_idx is not None:
image1[0:art_idx, 220:256, :] = 0
file1 = np.array(image1)
image2 = imageio.imread(folderstart + master[n + 1])
if art_idx is not None:
image2[0:art_idx, 220:256, :] = 0
file2 = np.array(image2)
full_row = np.concatenate((file1, file2), axis=1)
for i in range(n + 2, n + widthdiv256):
image3 = imageio.imread(folderstart + master[i])
if art_idx is not None:
image3[0:art_idx, 220:256, :] = 0
file_next = np.array(image3)
full_row = np.concatenate((full_row, file_next), axis=1)
return full_row
def stitch_image(folderstart, widthdiv256, heighttimeswidth, art_idx):
'''
This function takes all images in folderstart (hardcoded as
'media/Output_ToStitch' which contains 256x256 images that are the
output from PIX2PIX) and reassembles them into a full analyzed version
of the original cropped image.
Parameters:
folderstart:
widthdiv256:
heighttimeswidth:
art_idx:
Returns:
picture:
file_list:
'''
files = os.listdir(folderstart)
file_list = []
for file in files:
split_name = re.split('\D', file)
file_list.append(split_name[0])
file_list.sort(key=float)
master = []
for file in file_list:
name = file + '_fake_B.png'
master.append(name)
picture = stitch_row(0, master, folderstart, art_idx, widthdiv256)
for n in range(widthdiv256, heighttimeswidth, widthdiv256):
next_row = stitch_row(n, master, folderstart, art_idx, widthdiv256)
picture = np.concatenate((picture, next_row), axis=0)
return picture, file_list
# find centers of green squares
def find_centers(cnts, img_original):
'''
This function calculates the centers of the green squares created by the
network to mark particle locations.
Parameters:
cnts:
img_original:
Returns:
seedlistx:
seedlisty:
'''
seedlistx = []
seedlisty = []
for c in cnts:
M = cv2.moments(c)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
M["m00"] = 1
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if (cX != 0 or cY != 0):
if img_original[cY, cX, 0] < 120:
seedlistx.append(cX)
seedlisty.append(cY)
return seedlistx, seedlisty
# From Diego:
# 1. Finds green square and then the center of that (x,y)
# 2. Then I perform a flood fill on that (x,y) on the original image
# 3. So it fills out the entire dark particle
# 4. Then I find the contour of that mask and the xy of that new circle
# 5. I do this so inconsistencies in the green mask dont affect the area of the gold particle
# Basically it just uses the green masks to find a seed point to start flood filling. This makes sure that the mask is the exact size of the gold particle
def count_green_dots(model, imageName='', thresh_sens=4):
'''
This function extracts gold particle coordinates from the PIX2PIX output image
Parameters:
model:
imageName:
thresh_sens:
Returns:
cnts:
'''
img = cv2.imread('media/Output_Final/OutputStitched.png')
img_original = cv2.imread(f'media/Output_Final/Cropped-{imageName}-with-{model}.png')
img_original = np.uint8(img_original)
h, w = img_original.shape[:2]
flood_mask = np.zeros((h + 2, w + 2), dtype=np.uint8)
lower_green = np.array([0, 200, 0])
upper_green = np.array([55, 255, 55])
mask = cv2.inRange(img, lower_green, upper_green)
kernel = np.ones((5, 5), np.uint8)
e = cv2.erode(mask, kernel, iterations=1)
d = cv2.dilate(e, kernel, iterations=1)
cnts = cv2.findContours(d, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
seedlistx, seedlisty = find_centers(cnts, img_original) # (1) calculating the centers
# (2) floodfill
listlen = len(seedlistx)
floodflags = 4
floodflags |= cv2.FLOODFILL_MASK_ONLY
floodflags |= (255 << 8)
for i in range(listlen):
num, im, mask, rect = cv2.floodFill(img_original, flood_mask, (seedlistx[i], seedlisty[i]), 1, (thresh_sens,) * 3, (thresh_sens,) * 3,
floodflags)
print(np.mean(img_original))
flood_mask = flood_mask[:h, :w]
cnts = cv2.findContours(
flood_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
return cnts
def check_if_coordinate_is_in_mask(x, y, mask):
'''
This function checks if a coordinate is inside the mask (pface area)
Parameters:
x:
y:
mask:
Returns:
'''
if mask is None:
return True
elif len(mask) == 0: #if mask is empty string
return True
# if coordinate is in white region on the mask image, return false (do not count it)
elif np.array_equal(mask[x, y], np.array((255, 255, 255))):
return False
else: # if coordinate is not in white region return true (do count it)
return True
def get_contour_centers(cnts, img_mask):
'''
This function obtains the list of all gold particle coordinates
Parameters:
cnts:
img_mask:
Returns:
all_coordinates:
coords_in_mask:
'''
# group using k means
# report size distributions
# show relative size histograms and cutoffs
all_coordinates = pd.DataFrame(columns=['X','Y','Area'])
coords_in_mask = pd.DataFrame(columns=['X','Y','Area'])
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
M["m00"] = 1
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if not (cX == 0 and cY == 0):
all_coordinates = all_coordinates.append({'X': cX, 'Y': cY,'Area':cv2.contourArea(c)},
ignore_index=True)
if check_if_coordinate_is_in_mask(cY, cX, img_mask):
# add condition to get rid of some of the crazy outliers in histogram later on
if cv2.contourArea(c) < 500:
coords_in_mask = coords_in_mask.append({'X': cX, 'Y': cY,'Area':cv2.contourArea(c)},
ignore_index=True)
return all_coordinates, coords_in_mask
def sort_from_thresholds(coords_in_mask, particle_group_count, thresholds_list_string):
'''
This function sorts the coordinates into up to 3 groups based on their area cutoffs.
Parameters:
coords_in_mask:
particle_group_count:
thresholds_list_string:
Returns:
results1:
results2:
results3:
'''
#print(thresholds_list_string)
thresholds_list=[]
#print(thresholds_list_string.split(","))
for x in thresholds_list_string.split(","):
thresholds_list.append(int(x))
print("thresholds_list:")
print(thresholds_list)
results1 = pd.DataFrame(columns=['X', 'Y'])
results2 = pd.DataFrame(columns=['X', 'Y'])
results3 = pd.DataFrame(columns=['X', 'Y'])
for index, row in coords_in_mask.iterrows():
if particle_group_count == 1:
if row['Area'] > thresholds_list[0] and row['Area'] < thresholds_list[1]:
results1 = results1.append({'X': row['X'], 'Y': row['Y']}, ignore_index=True)
if particle_group_count == 2:
if row['Area'] > thresholds_list[0] and row['Area'] < thresholds_list[1]:
results1 = results1.append({'X': row['X'], 'Y': row['Y']}, ignore_index=True)
elif row['Area'] > thresholds_list[2] and row['Area'] < thresholds_list[3]:
results2 = results2.append({'X': row['X'], 'Y': row['Y']}, ignore_index=True)
if particle_group_count == 3:
if row['Area'] > thresholds_list[0] and row['Area'] < thresholds_list[1]:
results1 = results1.append({'X': row['X'], 'Y': row['Y']}, ignore_index=True)
elif row['Area'] > thresholds_list[2] and row['Area'] < thresholds_list[3]:
results2= results2.append({'X': row['X'], 'Y': row['Y']}, ignore_index=True)
elif row['Area'] > thresholds_list[4] and row['Area'] < thresholds_list[5]:
results3 = results3.append({'X': row['X'], 'Y': row['Y']}, ignore_index=True)
return results1, results2, results3
def update_progress(progress_recorder, step, total_steps, message):
'''
This function updates the progress bar progress
Parameters:
progress_recorder:
step:
total_steps:
message:
'''
if progress_recorder is not None:
progress_recorder.set_progress(step, total_steps, message)
def save_preview_figure(all_coordinates, imageName, model, front_end_updater):
'''
This function saves the image with printed area values to the EMImage object
'''
img_original = cv2.imread(f'media/Output_Final/Cropped-{imageName}-with-{model}.png')
for i, coord in all_coordinates.iterrows():
cv2.putText(img_original, str(coord['Area']), (int(coord['X']), int(coord['Y'])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
preview_file_path = 'media/Output_Final/preview.png'
imageio.imwrite('media/Output_Final/preview.png', img_original)
add_analyzed_image(front_end_updater.pk, preview_file_path)
def save_histogram(coordinates, front_end_updater):
'''
This function saves the histogram image to the EMImage object
Parameters:
coordinates:
front_end_updater:
'''
plt.figure(2)
plt.hist(coordinates.Area.values, bins=100)
plt.title('Particle Area Histogram')
plt.xlabel('Size (px)')
plt.ylabel('Count')
hist_path = 'media/Output_Final/preview_histogram.png'
if os.path.exists(hist_path):
os.remove(hist_path)
plt.savefig(hist_path, bbox_inches='tight')
add_histogram_image(front_end_updater.pk, hist_path)
def save_coordinates(coordinates, name, front_end_updater):
coordinates_path = 'media/Output_Final/' + name + '.csv'
if os.path.exists(coordinates_path):
os.remove(coordinates_path)
coordinates.to_csv(coordinates_path, index=False)
def save_all_results(coordinates, coordinates1, coordinates2, coordinates3, model, front_end_updater, imageName=''):
'''
This function saves the output files to the EMImage object.
Parameters:
coordinates:
coordinates1:
coordinates2:
coordinates3:
model:
front_end_updater:
imageName:
'''
sub_path = 'results'
results_path = os.path.join(settings.MEDIA_ROOT, sub_path)
if not os.path.isdir(results_path):
os.makedirs(results_path)
timestr = time.strftime("%Y%m%d%H%M%S")
coordinates_path_relative = os.path.join(
sub_path, 'coordinates' + timestr + '.csv')
coordinates_path_absolute = os.path.join(
settings.MEDIA_ROOT, coordinates_path_relative)
coordinates.to_csv(coordinates_path_absolute, index=None,
header=True)
add_gold_particle_coordinates(
front_end_updater.pk, coordinates_path_absolute)
save_coordinates(coordinates1, f'coordsGroup1-{imageName}-with-{model}', front_end_updater)
save_coordinates(coordinates2, f'coordsGroup2-{imageName}-with-{model}', front_end_updater)
save_coordinates(coordinates3, f'coordsGroup3-{imageName}-with-{model}', front_end_updater)
save_preview_figure(coordinates, imageName, model, front_end_updater)
save_histogram(coordinates, front_end_updater)
#def run_gold_digger(model, input_image_list, particle_group_count, thresholds_list_string, thresh_sens=4, mask=None, front_end_updater=None):
#run_gold_digger(obj.trained_model, image_path, obj.particle_groups, obj.threshold_string, thresh_sens=obj.thresh_sens, mask=mask_path, front_end_updater=front_end_updater)
def run_gold_digger(image_path, obj, mask=None, front_end_updater=None):
'''
This function calls all the functions required to run a profile through a trained model and produce output.
Parameters:
model:
input_image_list:
particle_group_count:
thresholds_list_string:
thresh_sens:
mask:
front_end_updater:
'''
model = obj.trained_model
input_image_list = image_path
particle_group_count = obj.particle_groups
thresholds_list_string = obj.threshold_string
thresh_sens = obj.thresh_sens
obj.status = "Inside run_gold_digger function"
obj.save()
try:
print(f'Running with {model}')
imageName = pathlib.Path(input_image_list).stem
print(f'Image name: {imageName}')
front_end_updater.update(1, "starting")
art_idx = get_artifact_status(model)
clear_out_old_files(model)
obj.status = "Prepared files to run"
obj.save()
except:
obj.status = "Error in get_artifact_status or clear_out_old_files function"
obj.save()
return
try:
front_end_updater.update(2, "loading and cutting up image")
file_list, width, height, img_mask = load_data_make_jpeg(
input_image_list, mask, model, front_end_updater, imageName=imageName)
obj.status = "Image cut into 256x256 windows"
obj.save()
except:
obj.status = "Error in load_data_make_jpeg"
obj.save()
return
try:
front_end_updater.update(4, "combining with white background")
white = io.imread('media/White/white.png')
combine_white(white, 'media/Output', front_end_updater)
obj.status = "Image prepared for PIX2PIX"
obj.save()
except:
obj.status = "Error in combine_white function"
obj.save()
return
try:
front_end_updater.update(5, "running PIX2PIX...")
os.system(
'python3 media/PIX2PIX/test.py --dataroot media/Output_Appended/ --name {0} --model pix2pix --direction AtoB --num_test 1000000 --checkpoints_dir media/PIX2PIX/checkpoints/ --results_dir media/PIX2PIX/results/'.format(
model))
print("RAN PIX2PIX")
obj.status = "Ran PIX2PIX"
obj.save()
except:
obj.status = "Error when running PIX2PIX"
obj.save()
return
try:
front_end_updater.update(6, "Finished. stitching files together...")
file_list = glob.glob(
'media/PIX2PIX/results/{0}/test_latest/images/*_fake_B.png'.format(model))
print("---BEFORE STITCH---")
widthdiv256 = width
heighttimeswidth = width * height
folderstart = 'media/Output_ToStitch/'
save_to_output_folder(file_list, model)
picture, file_list = stitch_image(
folderstart, widthdiv256, heighttimeswidth, art_idx)
imageio.imwrite('media/Output_Final/OutputStitched.png', picture)
obj.status = "Stitched PIX2PIX output image together"
obj.save()
except:
obj.status = "Error in save_to_output_folder or stitch_image function"
obj.save()
return
try:
front_end_updater.update(7, "Identifying green dots")
cnts = count_green_dots(model, imageName=imageName, thresh_sens=thresh_sens)
all_coordinates, coords_in_mask = get_contour_centers(cnts, img_mask)
obj.status = "Identified particle coordinates"
obj.save()
except:
obj.status = "Error in count_green_dots or get_contour_centers"
obj.save()
return
try:
print("image name: " + input_image_list)
print(pathlib.Path(input_image_list).stem)
results1, results2, results3 = sort_from_thresholds(coords_in_mask,
particle_group_count, thresholds_list_string)
obj.status = "Sorted particle coordinates into area groups"
obj.save()
except:
obj.status = "Error in sort_from_thresholds function"
obj.save()
return
#try:
save_all_results(coords_in_mask, results1, results2, results3, model, front_end_updater, imageName=imageName)
obj = EMImage.objects.get(pk=front_end_updater.pk)
obj.status = "Results saved"
obj.save()
print("SUCCESS!!")
front_end_updater.update(8, "Saving files")
output_file = shutil.make_archive(f'media/Output-{imageName}-with-{model}', 'zip', 'media/Output_Final')
add_output_file(front_end_updater.pk, f'media/Output-{imageName}-with-{model}.zip')
obj = EMImage.objects.get(pk=front_end_updater.pk)
obj.status = "Output zip file created"
obj.save()
print('CREATED ZIP FILE')
front_end_updater.update(9, f"All done with {imageName}")
front_end_updater.analysis_done(imageName = imageName)
obj.status="Successfully completed run"
obj.save()