diff --git a/EM_Imaging.py b/EM_Imaging.py new file mode 100644 index 0000000..de630ba --- /dev/null +++ b/EM_Imaging.py @@ -0,0 +1,1037 @@ +# /!\ WARNING /!\ : tileSize 5 does not exist /!\ + +import win32com.client +from win32com.client import VARIANT +import pythoncom + +import os, time, sys, shutil, pickle, Tkinter, tkFileDialog, subprocess +import logging, colorlog # colorlog is not yet standard +# import logging # if colorlog not available + +from operator import itemgetter +from datetime import datetime + +import numpy as np +from numpy import sin, pi, cos, arctan, tan, sqrt + +import matplotlib.pyplot as plt +import matplotlib.patches as patches + +import skimage +from skimage import feature +from skimage.data import camera +from skimage.filters import roberts, sobel, scharr, prewitt +from skimage.filters.rank import median +from skimage.morphology import disk + +from Tkinter import * +import tkMessageBox + +import winsound + +# /!\ Warning : the merlin is flipped on the x axis. All stage variables in this script are in real coordinates. Only when I read from and write to the Merlin I flip the x axis. + +##################### +### I/O Functions ### +def mkdir_p(path): + try: + os.mkdir(path) + logger.debug('Folder created: ' + path) + except Exception, e: + if e[0] == 20047 or e[0] == 183: + # IJ.log('Nothing done: folder already existing: ' + path) + pass + else: + logger.error('Exception during folder creation :', exc_info=True) + raise + return path + +def getDirectory(text, startingFolder = None): + if startingFolder: + direc = os.path.join(tkFileDialog.askdirectory(title = text, initialdir = startingFolder), '') + else: + direc = os.path.join(tkFileDialog.askdirectory(title = text), '') + logger.debug('Directory chosen by user: ' + direc) + return direc + +def getPath(text, startingFolder = None): + if startingFolder: + path = os.path.join(tkFileDialog.askopenfilename(title = text, initialdir = startingFolder), '') + else: + path = os.path.join(tkFileDialog.askopenfilename(title = text), '') + logger.debug('Path chosen by user: ' + path) + return path + +def getText(text): + userText = raw_input(text) + return userText + + +def readPoints(path): + x,y = [], [] + with open(path, 'r') as f: + lines = f.readlines() + for point in lines: + x.append(int(point.split('\t')[0] )) + y.append(int(point.split('\t')[1] )) + return np.array([x,y]) + +def readSectionCoordinates(path): + with open(path, 'r') as f: + lines = f.readlines() + sections = [] + for line in lines: +# print line + points = line.replace('\n', '').split('\t') + print points + if points[-1] == '': + points.pop() + section = [ [int(float(point.split(',')[0])), int(float(point.split(',')[1]))] for point in points ] + sections.append(section) + return sections + +def initLogger(path): + fileFormatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt = '%d-%m-%Y %H:%M:%S') + fileHandler = logging.FileHandler(path) + fileHandler.setFormatter(fileFormatter) + fileHandler.setLevel(logging.DEBUG) # should I also save an .INFO log ? no: if someone wants to check a log, he probably wants to see the .debug one ... + + colorFormatter = colorlog.ColoredFormatter('%(log_color)s%(asctime)s %(levelname)s %(message)s', datefmt = '%d-%m-%Y %H:%M:%S') + streamHandler = colorlog.StreamHandler() + streamHandler.setFormatter(colorFormatter) + + logger = logging.getLogger(__name__) + # clean the logger in case the script is run again in the same console + handlers = logger.handlers[:] + for handler in handlers: + handler.close() + logger.removeHandler(handler) + + logger.setLevel(logging.DEBUG) + logger.propagate = False + + logger.addHandler(fileHandler) + logger.addHandler(streamHandler) + + return logger + +def logMerlinParameters(): + params = {} + with open(logPath, 'a') as f: + for parameter in allMerlinParameters: + if parameter == 'AP_STAGE_AT_X': # /!\ Flipping x axis during read + params[parameter] = - a.Get(parameter)[1] + else: + params[parameter] = a.Get(parameter)[1] + f.write(parameter + ' = ' + str(params[parameter]) + '\n') + return params + +def durationToPrint(d): + return str(round(d/60., 1)) + ' min = ' + str(round(d/3600., 1)) + ' hours = ' + str(round(d/(3600.*24), 1)) + ' days' + +##################### +### GUI Functions ### +class App: + global wafer + def __init__(self, master): + self.frame = Frame(master) + self.frame.pack() + + self.button1 = Button(self.frame, text='Acquire wafer', command = self.acquireWaferButtonAction) + self.button1.pack(side=LEFT) + + self.button11 = Button(self.frame, text='Acquire *sub*wafer', command = self.acquireSubWaferButtonAction) + self.button11.pack(side=LEFT) + + self.button2 = Button(self.frame, text='Add mosaic here', command = addMosaicHere) + self.button2.pack(side=LEFT) + + self.button3 = Button(self.frame, text='Add landmark', command = addLandmark) + self.button3.pack(side=LEFT) + + self.button4 = Button(self.frame, text='Load wafer', command = loadWafer) + self.button4.pack(side=LEFT) + + self.button7 = Button(self.frame, text='Save wafer', command = saveWafer) + self.button7.pack(side=LEFT) + + self.button5 = Button(self.frame, text='Load sections and landmarks from pipeline', command = loadSectionsAndLandmarksFromPipeline) + self.button5.pack(side=LEFT) + + self.button6 = Button(self.frame, text='Turn high tension off', command = turnHighTensionOff) + self.button6.pack(side=LEFT) + + self.buttonQuit = Button(self.frame, text='Quit', command = root.destroy) + self.buttonQuit.pack(side=LEFT) + + def acquireWaferButtonAction(self): + # self.frame.quit() # no I should start an independent thread that scans the wafer and close this GUI + turnOff = tkMessageBox.askquestion("Question", "Turn off high tension after acquisition ?") + acquireWafer() + if turnOff == 'yes': + turnHighTensionOff() + + def acquireSubWaferButtonAction(self): + sectionIndicesToAcquire = map(int, getText('What sections should be scanned (e.g., "1,3,5,7,9") ?').split(',')) + turnOff = tkMessageBox.askquestion("Question", "Turn off high tension after acquisition ?") + acquireWafer(userDefinedSectionsToAcquire = sectionIndicesToAcquire) + if turnOff == 'yes': + turnHighTensionOff() + + +def addLandmark(): + stageXY = getStageXY() + wafer.targetLandmarks.append([stageXY[0], stageXY[1], getWD()]) + nValidatedLandmarks = len(wafer.targetLandmarks) + if nValidatedLandmarks == len(sourceLandmarks.T): # all target landmarks have been identified + logger.info('Good. All landmarks have been calibrated.') + + targetSections = [] + targetAngles = [] + + for sourceSectionTissueCoordinates in sourceSections: + if hasattr(wafer, 'sourceROIDescription'): # transform the sourceRoi to the ROI in the sourceSection using the transform sourceSectionRoi -> sourceSectionCoordinates + sourceSectionTissueCoordinates = affineT(np.array(wafer.sourceROIDescription[0]).T, np.array(sourceSectionTissueCoordinates).T, np.array(wafer.sourceROIDescription[1]).T).T + + targetTissueCoordinates = affineT(sourceLandmarks, np.array(wafer.targetLandmarks).T[:2], np.array(sourceSectionTissueCoordinates).T) + + # np.array([x_target_points, y_target_points]) + # x1,y1,x2,y2 + + # the angle is simply the angle of the second line in the template. Should be enhanced ... + targetAngle = getAngle([targetTissueCoordinates[0][2], targetTissueCoordinates[1][2], targetTissueCoordinates[0][3], targetTissueCoordinates[1][3]]) + + targetAngle = ((- targetAngle)*180/float(pi) + 90)%360 # the second line is at the bottom and horizontal + # targetAngle = ((- targetAngle)*180/float(pi) + 0)%360 # the second line is at the bottom and horizontal + + targetAngles.append(targetAngle) + + targetTissue = getCenter(targetTissueCoordinates) + targetTissueCenterZ = focusThePoints(np.array(wafer.targetLandmarks).T, np.array([targetTissue]).T)[-1][0] + targetSections.append([targetTissue[0], targetTissue[1], targetTissueCenterZ]) + + for idSection, targetSection in enumerate(targetSections): + section = Section(idSection, [targetSection[0], targetSection[1]], targetAngles[idSection], mp, sp, targetSection[2], wafer.folderWaferSave) + wafer.sections.append(section) + elif nValidatedLandmarks > 1: + logger.info('There are still ' + str(len(sourceLandmarks) - nValidatedLandmarks ) + ' landmarks to calibrate. The stage has been moved to the next landmark to be calibrated') + moveStage(*affineT(sourceLandmarks, np.array(wafer.targetLandmarks).T[:2], sourceLandmarks).T[nValidatedLandmarks]) + else: + logger.info('Please go manually to the second landmark.') + +def addMosaicHere(): + wafer.addCurrentPosition() + +def acquireWafer(userDefinedSectionsToAcquire = None): + wafer.acquire(userDefinedSectionsToAcquire = userDefinedSectionsToAcquire) + +def loadWafer(): + global wafer + waferPath = getPath('Select the wafer pickle file', startingFolder = folderSave) + f = open(os.path.normpath(waferPath), 'r') + wafer = pickle.load(f) + f.close() + +def saveWafer(): + wafer.save() + +def turnHighTensionOff(): + a.Execute('CMD_EHT_OFF') + logger.info('High tension turned off') + +def loadSectionsAndLandmarksFromPipeline(): + global sourceSections, sourceTissueMagDescription, sourceLandmarks + pipelineFolder = getDirectory('Select the folder containing the sections and landmarks from the pipeline', startingFolder = folderSave) + + sourceSectionsPath = os.path.join(pipelineFolder, 'source_sections_tissue.txt.') + sourceSections = readSectionCoordinates(sourceSectionsPath) + + sourceTissueMagDescriptionPath = os.path.join(pipelineFolder, 'source_tissue_mag_description.txt.') + sourceTissueMagDescription = readSectionCoordinates(sourceTissueMagDescriptionPath) + + sourceLandmarksPath = os.path.join(pipelineFolder, 'source_landmarks.txt.') + sourceLandmarks = readPoints(sourceLandmarksPath) + + sourceROIDescriptionPath = os.path.join(pipelineFolder, 'source_ROI_description.txt') + if os.path.isfile(sourceROIDescriptionPath): + wafer.sourceROIDescription = readSectionCoordinates(sourceROIDescriptionPath) + else: + logger.info('There is no source_ROI_description. The center of the tissue will be used as the center of the ROI.') + + +########################### +### Geometric functions ### +def applyAffineT(points,coefs): + x,y = np.array(points) + x_out = coefs[1]*x - coefs[0]*y + coefs[2] + y_out = coefs[1]*y + coefs[0]*x + coefs[3] + return np.array([x_out,y_out]) + +def rotate(points, angle): + angleRadian = angle * pi / 180. + coefs = [sin(angleRadian), cos(angleRadian), 0, 0] + return applyAffineT(points,coefs) + +def translate(points, v): + coefs = [0, 1, v[0], v[1]] + return applyAffineT(points,coefs) + +def affineT(sourceLandmarks, targetLandmarks, sourcePoints): + # separating the x and y into separate variables + x_sourceLandmarks, y_sourceLandmarks = np.array(sourceLandmarks).T[:len(targetLandmarks.T)].T # sourceLandmarks trimmed to the number of existing targetlandmarks + x_targetLandmarks, y_targetLandmarks = targetLandmarks + x_sourcePoints, y_sourcePoints = sourcePoints + + # Solving the affine transform + A_data = [] + for i in range(len(x_sourceLandmarks)): + A_data.append( [-y_sourceLandmarks[i], x_sourceLandmarks[i], 1, 0]) + A_data.append( [x_sourceLandmarks[i], y_sourceLandmarks[i], 0, 1]) + b_data = [] + for i in range(len(x_targetLandmarks)): + b_data.append(x_targetLandmarks[i]) + b_data.append(y_targetLandmarks[i]) + A = np.matrix( A_data ) + b = np.matrix( b_data ).T + c = np.linalg.lstsq(A, b)[0].T #solving happens here + c = np.array(c)[0] +# print('Absolute errors in target coordinates : (xError, yError)') +# for i in range(len(x_sourceLandmarks)): + #print ("%f, %f" % ( + # np.abs(c[1]*x_sourceLandmarks[i] - c[0]*y_sourceLandmarks[i] + c[2] - x_targetLandmarks[i]), + # np.abs(c[1]*y_sourceLandmarks[i] + c[0]*x_sourceLandmarks[i] + c[3] - y_targetLandmarks[i]))) + + #computing the accuracy + x_target_computed_landmarks, y_target_computed_landmarks = applyAffineT(sourceLandmarks, c) + accuracy = 0 + for i in range(len(x_targetLandmarks)): + accuracy = accuracy + np.sqrt( np.square( x_targetLandmarks[i] - x_target_computed_landmarks[i] ) + np.square( y_targetLandmarks[i] - y_target_computed_landmarks[i] ) ) + accuracy = accuracy/float(len(x_sourceLandmarks) + 1) +# print 'The mean accuracy in target coordinates is', accuracy + + #computing the target points + x_target_points, y_target_points = applyAffineT(sourcePoints,c) + return np.array([x_target_points, y_target_points]) + +def getCenter(corners): + center = np.array(map(np.mean, corners)) + return center + +def getAngle(line): + line = np.array(line) + diff = line[0:2] - line[2:4] + theta = np.arctan2(diff[1], diff[0]) + return theta + +def getZ(x,y,abc): #Fitted plane function + return float(abc[0]*x + abc[1]*y + abc[2]) + +def focusThePoints(focusedPoints, pointsToFocus): + x_pointsToFocus, y_pointsToFocus = pointsToFocus[0], pointsToFocus[1] # works even if pointsToFocus has no z coordinates + x_focusedPoints, y_focusedPoints, z_focusedPoints = focusedPoints + + # remove outliers + idInliers = getInlierIndices(z_focusedPoints) + logger.debug('There are ' + str(idInliers.size) + ' inliers in ' + str(map(lambda x:round(x, 2), z_focusedPoints*1e6)) + ' um' ) + if idInliers.size == 3: + logger.warning('Warning - One autofocus point has been removed for interpolative plane calculation') + x_focusedPoints, y_focusedPoints, z_focusedPoints = focusedPoints.T[idInliers].T + elif idInliers.size < 3: + logger.warning('WARNING - There are only ' + str(idInliers.size) + ' inliers for the interpolative plane calculation. A strategy should be developed to address such an event.') + + A = np.column_stack([x_focusedPoints, y_focusedPoints, np.ones_like(x_focusedPoints)]) + abc,residuals,rank,s = np.linalg.lstsq(A, z_focusedPoints) + z_pointsToFocus = map(lambda a: getZ (a[0],a[1],abc), np.array([x_pointsToFocus.transpose(), y_pointsToFocus.transpose()]).transpose()) + + # calculating the accuracy + z_check = np.array(map(lambda a: getZ (a[0],a[1],abc), np.array([x_focusedPoints.transpose(), y_focusedPoints.transpose()]).transpose())) + diff = z_check - z_focusedPoints + meanDiff = np.mean(np.sqrt(diff * diff)) + logger.debug('The plane difference is ' + str(diff*1e6) + ' um') + logger.info('The mean distance of focus points to the plane is ' + str(round(meanDiff*1e6, 3)) + ' um') + + return np.array([x_pointsToFocus, y_pointsToFocus, z_pointsToFocus]) + +def transformCoordinates(coordinates, center, angle): + return (translate(rotate(coordinates.T, angle), center)).T + +def pointsToXY(l): # probably useless, use simply a.T for numpy arrays + return np.array([[p[0] for p in l],[p[1] for p in l]]) + +def XYtoPoints(XY): # probably useless, use simply a.T for numpy arrays + l = [] + for x, y in zip(XY[0], XY[1]): + l.append([x,y]) + return np.array(l) + +def getInlierIndices(data, m = 8.): + d = np.abs(data - np.median(data)) + mdev = np.median(d) + s = d/mdev if mdev else 0. + print 'd', d + print 's', s + return np.where(s3600: # turn off high tension if scan took more than 1 hour + logger.critical('Turning the beam off because the scan took more than 1 hour') + a.Execute('CMD_EHT_OFF') + +class Section(object): + def __init__(self, *args): + self.index = args[0] + self.center = args[1] + self.angle = args[2] + self.mp = args[3] # MosaicParameters + self.sp = args[4] # scanningParameters + self.startingWD = args[5] # given by the interpolative plane + self.folderWaferSave = args[6] + self.imagingCoordinates = {} + self.imagingCoordinates['tiles'] = transformCoordinates(self.mp.templateTileCoordinates[0], self.center, -self.angle) + self.imagingCoordinates['autofocus'] = transformCoordinates(self.mp.templateTileCoordinates[1], self.center, -self.angle) + + self.params = None + self.focusedPoints = [] + self.stigs = [] + self.acquireStarted = False + self.acquireFinished = False + self.currentWD = self.startingWD + self.startingStig = None + self.startingTile = 0 # for after interruptions + self.folderSectionSave = os.path.join(self.folderWaferSave, 'section_' + str(self.index).zfill(4)) + self.startingTime = -1 + self.finishingTime = -1 + + def acquire(self): + self.startingTime = time.time() + self.params = logMerlinParameters() + if (not self.acquireStarted): # to handle the case when the section is reset manually by setting acquireStarted=False + self.startingTile = 0 + self.acquireStarted = True + mkdir_p(self.folderSectionSave) + self.moveToSection() + self.computeWDPlaneAndGetStig() + self.scanTiles() + self.finishingTime = time.time() + self.acquireFinished = True + logger.debug('Section ' + str(self.index) + ' acquired. It has taken ' + str((self.finishingTime - self.startingTime)/60.) + ' min.' ) + + def getRoughFocusStig(self): # should I assume that it will never fail ? + self.currentWD = autofocus(self.sp, 'rough') + self.currentStig = autostig(self.sp, 'rough') + + def getRoughFocus(self): # should I assume that it will never fail ? + self.currentWD = autofocus(self.sp, 'rough') + + def computeWDPlaneAndGetStig(self): + if len(self.imagingCoordinates['autofocus']) == 1: + + moveStage(self.imagingCoordinates['autofocus'][0][0], self.imagingCoordinates['autofocus'][0][1]) # probably useless + + time.sleep(sleepBeforeContrast) + moveToContrast(0, 0) + + autofocus(self.sp, 'fine') + autostig(self.sp, 'fine') + + + self.imagingCoordinates['tiles'] = np.array([[self.imagingCoordinates['tiles'][0][0], self.imagingCoordinates['tiles'][0][1], autofocus(self.sp, 'fine')]]) + logger.debug('The imaging coordinates will be ' + str(self.imagingCoordinates['tiles'])) + + elif ((self.mp.tileGrid == np.array([2,2])).all()) or ((self.mp.tileGrid == np.array([3,3])).all()): + + # already at the center of the section + + logger.debug('Special focusing for [2,2] grid') + time.sleep(sleepBeforeContrast) + moveToContrast(0, 0) + + autofocus(self.sp, 'fine') + autostig(self.sp, 'fine') + WD = autofocus(self.sp, 'fine') + + + allImagingCoordinates = [] + for id, imagingCoordinates in enumerate(self.imagingCoordinates['tiles']): # set the same working distance for all tiles + # self.imagingCoordinates['tiles'][id] = np.array([imagingCoordinates[0], imagingCoordinates[1], WD]) + allImagingCoordinates.append([imagingCoordinates[0], imagingCoordinates[1], WD]) + self.imagingCoordinates['tiles'] = np.array(allImagingCoordinates) + logger.debug('The imaging coordinates will be ' + str(self.imagingCoordinates['tiles'])) + + + else: + self.focusedPoints = [] # this is needed for after interruptions: the focused points should be cleared + self.startingStig = getStig() + for idPoint, autofocusPosition in enumerate(self.imagingCoordinates['autofocus']): + logger.debug('Autofocusing/stig of point number ' + str(idPoint) + ' in Tile number ' + str(self.index)) + moveStage(autofocusPosition[0], autofocusPosition[1]) + + setWD(self.startingWD) # the case happened that the focus failed in the first corner, and the wrong focus propagated. Going back each time to startingWD is a first approximation. Ideally it would go back to the average of the previous section(s) + + if (idPoint == 0): + self.getRoughFocus() + + time.sleep(sleepBeforeContrast) + moveToContrast(self.index, idPoint) + + + if (idPoint == 0): # foc stig foc for first corner of the autofocuses + WD = autofocus(self.sp, 'fine') + stig = autostig(self.sp, 'fine') + if (not isNewStigOk(stig, self.startingStig)): + setStig(self.startingStig) # set to the stig of the previous section + self.stigs.append(self.startingStig) + logger.warning('Warning in section ' + str(self.index) + ': Rejection of autostig in first corner. Setting stigmation to stig of previous section') + else: + self.stigs.append(stig) + + if (idPoint == 3): # foc stig foc for fourth corner of the autofocuses + WD = autofocus(self.sp, 'fine') + stig = autostig(self.sp, 'fine') + if (not isNewStigOk(stig, self.stigs[0])): + self.stigs.append(self.stigs[0]) + setStig(self.stigs[0]) # set to the stig of the previous section + logger.warning('Warning in section ' + str(self.index) + ': Rejection of autostig in fourth corner. Setting stigmation to stig of first corner') + else: + self.stigs.append(stig) + + WD = autofocus(self.sp, 'fine') + self.focusedPoints.append([autofocusPosition[0], autofocusPosition[1], WD]) + + self.imagingCoordinates['tiles'] = focusThePoints(np.array(self.focusedPoints).T, self.imagingCoordinates['tiles'].T).T + + logger.debug('The imaging coordinates will be ' + str(self.imagingCoordinates['tiles'])) + logger.info('Interpolative plane calculated for Section number ' + str(self.index)) + + def scanTiles(self): + setScanRate(self.sp.scanRate) + setMag(self.mp.mag) + + tilesToScan = range(self.startingTile, len(self.imagingCoordinates['tiles']), 1) # for restart after interruption + + for idTile in tilesToScan: + tileCoordinates = self.imagingCoordinates['tiles'][idTile] + logger.info('Scanning Tile ' + str(idTile) + ' of section ' + str(self.index)) + moveStage(tileCoordinates[0], tileCoordinates[1]) + if len(tileCoordinates) == 3: #tileCoordinates might lack the WD coordinate when I am testing short scans + setWD(tileCoordinates[2]) + acquireInSitu(self.mp.tileSizeIndex, self.index, self.mp.ids[idTile], self.folderSectionSave) + self.startingTile = idTile + 1 + # xxx should I autostig from time to time ? + + def moveToSection(self): + a.Set('DP_X_BACKLASH', VARIANT(pythoncom.VT_R4, 3)) + a.Set('DP_Y_BACKLASH', VARIANT(pythoncom.VT_R4, 3)) + moveStage(self.center[0], self.center[1]) + a.Set('DP_X_BACKLASH', VARIANT(pythoncom.VT_R4, 0)) + a.Set('DP_Y_BACKLASH', VARIANT(pythoncom.VT_R4, 0)) + setScanRotation(self.angle) + logger.debug('Moved to center of section ' + str(self.index)) + + +################# +### Constants ### +if __name__ == '__main__': + + # Initializations + tableFreeze = {} + tableFreeze['End Frame'] = 0 + tableFreeze['End Line'] = 1 + tableFreeze['Command'] = 2 + + pixelsCalib = 32768 * 24576 + dwellTimes = np.array([53.2/60., 1.6, 2.9, 5.6, 11, 21.7, 43.2, 1.4*60, 2.9*60, 6, 11, 1.9*24*60, 3.8*60*24, 7.6*24*60, 15*60*24]) * 60. / float(pixelsCalib) + # /!\ Recheck the availableTilesize, whether it makes sense with the fact that tilSize number 5 does not exist + availableTileSizes_px = np.array([[1024, 768], [512, 384], [2048, 1536], [3072, 2304], [4096, 3072], [6144, 4608], [6144, 4608], [8192, 6144], [12288, 9216], [16384, 12288], [24576, 18432], [32768, 24576]]) + + magCalib = 0.787197714089416 + + sleepFocus = 0.3 + sleepUnfreeze = 0.5 + sleepScanning = 0.5 + sleepMoveStage = 3 + sleepSetMag = 0.3 + sleepSetScanRate = 0.2 + sleepSetTileSize = 0.5 + sleepSetRotation = 0.5 + sleepSetStig = 0.3 + sleepSetWD = 0.2 + sleepFreeze = 0.2 + sleepFreezeEndOfFrame = 0.2 + sleepAcquireStart = 4 # 2 is not working (maybe because of the backlash ?) + sleepBeforeContrast = 1 + + mosaicAutofocusRoughDuration = 120 + tileRoughOverhead = 5 + 5 # stage move + writing to disk + + gunParameters = ['AP_GUNALIGN_X', 'AP_GUNALIGN_Y', 'AP_EXTCURRENT', 'AP_MANUALEXT', 'AP_MANUALKV', 'AP_ACTUALKV', 'AP_ACTUALCURRENT', 'AP_FILAMENT_AGE', 'DP_FIL_BLOWN', 'DP_RUNUPSTATE', 'DP_HIGH_CURRENT'] + beamParameters = ['AP_BEAMSHIFT_X', 'AP_BEAMSHIFT_Y', 'AP_BEAM_OFFSET_X', 'AP_BEAM_OFFSET_Y', 'AP_BRIGHTNESS', 'AP_CONTRAST', 'AP_MAG', 'AP_WD', 'AP_SPOT', 'AP_PIXEL_SIZE', 'AP_SCM', 'AP_SPOTSIZE', 'AP_IPROBE', 'AP_STIG_X', 'AP_STIG_Y', 'AP_AUTO_BRIGHT', 'AP_AUTO_CONTRAST', 'AP_ZOOM_FACTOR', 'AP_TILT_ANGLE', 'DP_BEAM_BLANKED', 'DP_BEAM_BLANKING', 'DP_AUTO_FUNCTION', 'DP_SCM_RANGE', 'DP_SCM', 'DP_AUTO_VIDEO', ] + scanParameters = ['AP_SPOT_POSN_X', 'AP_SPOT_POSN_Y', 'AP_LINE_POSN_X', 'AP_LINE_POSN_Y', 'AP_LINE_LENGTH', 'AP_SCANROTATION', 'AP_PIXEL_SIZE', 'AP_LINE_TIME', 'AP_FRAME_TIME', 'AP_FRAME_AVERAGE_COUNT', 'AP_FRAME_INT_COUNT', 'AP_LINE_INT_COUNT', 'AP_RED_RASTER_POSN_X', 'AP_RED_RASTER_POSN_Y', 'AP_RED_RASTER_W', 'AP_RED_RASTER_H', 'AP_LINE_AVERAGE_COUNT', 'AP_NR_COEFF', 'AP_WIDTH', 'AP_HEIGHT', 'DP_SCAN_ROT', 'DP_FREEZE_ON', 'DP_LINE_SCAN', 'DP_EXT_SCAN_CONTROL', 'DP_MAX_RATE', 'DP_SCANRATE', 'DP_NOISE_REDUCTION', 'DP_IMAGE_STORE', 'DP_FROZEN', 'DP_LEFT_FROZEN', 'DP_RIGHT_FROZEN', 'DP_DISPLAY_CHANNELS', 'DP_AUTO_FUNCTION'] + apertureParameters = ['AP_APERTURESIZE', 'AP_APERTURE_ALIGN_X', 'AP_APERTURE_ALIGN_Y', 'AP_APERTUREPOSN_X' , 'AP_APERTUREPOSN_Y', 'DP_APERTURE', 'DP_APERTURE_STATE', 'DP_APERTURE_TYPE'] + detectorParameters = ['AP_PHOTO_NUMBER', 'AP_COLLECTOR_BIAS', 'DP_OUT_DEV', 'DP_4QBSD_Q1', 'DP_4QBSD_Q2', 'DP_4QBSD_Q3', 'DP_4QBSD_Q4', 'DP_4QBSD_VISIBLE', 'DP_4QBSD', 'DP_ZONE', 'DP_DETECTOR_CHANNEL', 'DP_DETECTOR_TYPE', 'DP_HRRU_SPEED', 'DP_HRRU_PHOTO_STATUS', 'DP_HRRU_SOURCE'] + stageParameters = ['AP_STAGE_AT_X', 'AP_STAGE_AT_Y', 'AP_STAGE_AT_Z', 'AP_STAGE_AT_T', 'AP_STAGE_AT_R', 'AP_STAGE_AT_M', 'AP_STAGE_GOTO_X', 'AP_STAGE_GOTO_Y', 'AP_STAGE_GOTO_Z', 'AP_STAGE_GOTO_T', 'AP_STAGE_GOTO_R', 'AP_STAGE_GOTO_M', 'AP_STAGE_HIGH_X', 'AP_STAGE_HIGH_Y', 'AP_STAGE_HIGH_Z', 'AP_STAGE_HIGH_T', 'AP_STAGE_HIGH_R', 'AP_STAGE_HIGH_M', 'AP_STAGE_LOW_X', 'AP_STAGE_LOW_Y', 'AP_STAGE_LOW_Z', 'AP_STAGE_LOW_T', 'AP_STAGE_LOW_R', 'AP_STAGE_LOW_M', 'AP_PIEZO_AT_X', 'AP_PIEZO_GOTO_X', 'AP_PIEZO_GOTO_Y', 'DP_STAGE_TYPE', 'DP_STAGE_BACKLASH', 'DP_STAGE_INIT', 'DP_STAGE_IS', 'DP_STAGE_TOUCH', 'DP_X_BACKLASH', 'DP_Y_BACKLASH', 'DP_Z_BACKLASH', 'DP_T_BACKLASH', 'DP_R_BACKLASH', 'DP_M_BACKLASH', 'DP_X_LIMIT_HIT', 'DP_Y_LIMIT_HIT', 'DP_Z_LIMIT_HIT', 'DP_T_LIMIT_HIT', 'DP_R_LIMIT_HIT', 'DP_X_AXIS_IS', 'DP_Y_AXIS_IS', 'DP_Z_AXIS_IS', 'DP_T_AXIS_IS', 'DP_R_AXIS_IS', 'DP_M_AXIS_IS', 'DP_X_AXIS', 'DP_Y_AXIS', 'DP_Z_AXIS', 'DP_T_AXIS', 'DP_R_AXIS', 'DP_M_AXIS', 'DP_X_ENABLED', 'DP_Y_ENABLED', 'DP_Z_ENABLED', 'DP_T_ENABLED', 'DP_R_ENABLED', 'DP_M_ENABLED', 'DP_STAGE_TILTED', 'DP_JOYSTICK_DISABLE', 'DP_STAGE_SCAN', 'DP_STAGE_SCANNING'] + vacuumParameters = ['AP_HP_TARGET', 'AP_SYSTEM_VAC', 'AP_COLUMN_VAC', 'AP_CHAMBER_PRESSURE', 'DP_COLUMN_CHAMBER_VALVE', 'DP_COLUMN_PUMPING', 'DP_COLUMN_PUMP', 'DP_HP_STATUS', 'DP_VACSTATUS', 'DP_VAC_MODE', 'DP_EP_OK', 'DP_AIRLOCK', 'DP_AIRLOCK_CONTROL', 'DP_AIRLOCK_READY', 'DP_EHT_VAC_READY', 'DP_BAKEOUT', 'DP_BAKEOUT_STATUS'] + allMerlinParameters = gunParameters + beamParameters + scanParameters + apertureParameters + detectorParameters + stageParameters + vacuumParameters + + thresholdStig = 0.1 + + findContrastPatchLow = np.array([200, 200]) #/!\ works only for square patches + findContrastPatchHigh = np.array([120,120]) + + ###################### + ### I/O Parameters ### + # folder = getDirectory('Please give me the folder containing all landmark and scanning files') + folderSave = os.path.join(r'D:\Atlas_Images\0926\Thesis\ATEST', '') + waferName = 'AWAFERTEST' + + logPath = os.path.join(folderSave, 'log_' + waferName + '.txt') + logger = initLogger(logPath) + + ########################################################################## + ### Initialization of communication and parameters with the microscope ### + a = win32com.client.Dispatch('CZ.EMApiCtrl.1') + a.InitialiseRemoting() + a.Set('DP_SCAN_ROT', VARIANT(pythoncom.VT_R4, 1)) # activate scan rotation + a.Set('DP_FREEZE_ON', VARIANT(pythoncom.VT_R4, 0)) + a.Set('DP_Z_BACKLASH', VARIANT(pythoncom.VT_R4, 0)) + a.Set('DP_T_BACKLASH', VARIANT(pythoncom.VT_R4, 0)) + + stageM = float(a.Get('AP_STAGE_AT_M')[1]) + stageRotation = float(a.Get('AP_STAGE_AT_R')[1]) + stageTilt = float(a.Get('AP_STAGE_AT_T')[1]) + stageZ = float(a.Get('AP_STAGE_AT_Z')[1]) + if stageM != 0: + print 'Warning, the "master-z of the stage" is not equal to 0 and the scripts assume that it is equal to 0. Exiting.' + sys.exit() + if stageTilt != 0: + print 'Warning, the tilt angle is not equal to 0: ' + str(stageTilt) + '. Exiting.' + sys.exit() + + params = logMerlinParameters() + + # Initialization of GUI + root = Tk() + + focusScanRate = 3 + ########################### + ### Scanning Parameters ### + brightness = 25 # should B/C be recorded after the very first tile and kept until the end ? + contrast = 40 + startingStig = getStig() + + # tileSizeIndex = 7 = 8192*6144 + + # INLENS + scanRate = 4 # for calibration inlens + tileSizeIndex = 8 # for inlens + tileGrid = [3,3] + contrastSigma = 2.5 + + overlap = 8 + overlap_pct = np.array([overlap, overlap * 4./3.]) + pixelSize = 8 * 1e-9 + autofocusOffsetFactor = 0.5 + + mp = MosaicParameters(tileSizeIndex, tileGrid, overlap_pct, pixelSize, autofocusOffsetFactor) + sp = ScanningParameters(scanRate, brightness, contrast, startingStig, focusScanRate) + + ########################## + ### Initializing wafer ### + + wafer = Wafer(waferName, mp, sp) + + app = App(root) + root.mainloop() diff --git a/Example_EM_Metadata.txt b/Example_EM_Metadata.txt new file mode 100644 index 0000000..abc3aed --- /dev/null +++ b/Example_EM_Metadata.txt @@ -0,0 +1,19 @@ +name = WaferName +nSections = 500 +scanRate = 5 +dwellTime = 1.61677598953e-06 +brightness = 25 +contrast = 40 +tileWidth = 12288 +tileHeight = 9216 +numTilesX = 2 +numTilesY = 2 +tileOverlapX = 0.08 +tileOverlapY = 0.1067 +pixelSize = 8e-09 +xPatchEffectiveSize = 15073.28 +yPatchEffectiveSize = 10977.28 +magnification = 6005.84193489 +autofocusOffsetFactor = 0.5 +mosaicSize_x = 0.000251656 +mosaicSize_y = 0.00018612 diff --git a/Example_LM_Meta_Data.txt b/Example_LM_Meta_Data.txt new file mode 100644 index 0000000..9119d44 --- /dev/null +++ b/Example_LM_Meta_Data.txt @@ -0,0 +1,8 @@ +width = 2048 +height = 2048 +nChannels = 2 +xGrid = 1 +yGrid = 1 +scaleX = 0.107421875 +scaleY = 0.107421875 +channels = [647,brightfield] \ No newline at end of file diff --git a/LM_Imaging.py b/LM_Imaging.py new file mode 100644 index 0000000..54419e8 --- /dev/null +++ b/LM_Imaging.py @@ -0,0 +1,2005 @@ +''' +'Mag' usually refers to the magnetic resin that contains magnetic and fluorescent particles of different wavelengths +''' +######################################################### +### Start of config example for NikonPeter microscope ### +######################################################### +mic = 'NikonPeter' +micromanagerFolder = r'C:\Micro-Manager-1.4.23N' +mmConfigFile = r'E:\UserData\Templier\MM\MMConfig_BCNikon2_Default_Sungsik.cfg' +folderSave = os.path.join(r'E:\UserData\Templier\WorkingFolder', '') + +NikonColors = ['Blue', 'Cyan', 'Green', 'Red', 'Teal', 'Violet', 'White'] # the name of the colors of the Lumencor source - /!\ Warning: white has to be last + +# size of field of view in micrometers for different magnifications +magnificationImageSizes = { +20: [20, 1328.6, 1020.6], +63: [63, 220, 220], +} + +# properties to read the current objective +objectiveProperties = ['TINosePiece', 'Label'] + +# properties to set during initialization +initialProperties = [] +initialProperties.append(['Core', 'Focus','TIZDrive']) # otherwise the z drive is not recognized +initialProperties.append(['TIFilterBlock1', 'Label', '2-Quad']) # or 3-FRAP +initialProperties.append(['TILightPath', 'Label', '2-Left100']) # or 3-Right100 probably for the other camera +initialProperties.append(['Core', 'TimeoutMs', '20000']) # to prevent timeout during long stage movements +for NikonColor in NikonColors: + initialProperties.append(['SpectraLED', NikonColor + '_Level', '100']) + +# Change stage speed for accuracy: faster than 6 seemed to be inacurrate +initialProperties.append(['TIXYDrive', 'SpeedX', '6']) +initialProperties.append(['TIXYDrive', 'SpeedY', '6']) +initialProperties.append(['TIXYDrive', 'ToleranceX', '0']) +initialProperties.append(['TIXYDrive', 'ToleranceY', '0']) + +acquisitionIntervalBF = 5 # in ms, acquisitionInterval during live brightfield imaging +acquisitionIntervalMag = 5 # in ms, acquisitionInterval during live fluo imaging + +############################## +### All channel parameters ### +channelNames = { +'brightfield': ['White', '8-emty', ['SpectraLED', 'White_Level', '10']], +'dapi': ['Violet', '9-DAPI'], +488: ['Cyan', '0-FITC'], +546: ['Green', '5-mCherry'], +647: ['Red', '2-Cy5']} + +objectives = [20, 63] +channelSpecs = ['exposure', 'offset'] +channelContexts = ['imaging', 'focusing', 'live'] +channelTargets = ['beads', 'tissue', 'general'] # 'general' used during live + +# initialize the channels dictionnary: contains exposure times and z-offset of all channels +channels = {} +for channelName in channelNames: + channels[channelName] = {} + for objective in objectives: + channels[channelName][objective] = {} + for channelSpec in channelSpecs: + channels[channelName][objective][channelSpec] = {} + for channelContext in channelContexts: + channels[channelName][objective][channelSpec][channelContext] = {} +for channelContext in channelContexts: + channels[channelContext] = {} + +objectiveBeads = 20 +objectiveTissue = 63 + +### General exposure parameters independent of the imaging target (beads or tissue) for live imaging ### +channels['brightfield'][objectiveTissue]['exposure']['live']['general'] = 2 +channels['dapi'][objectiveTissue]['exposure']['live']['general'] = 1 +channels[488][objectiveTissue]['exposure']['live']['general'] = 1 +channels[546][objectiveTissue]['exposure']['live']['general'] = 5 +channels[647][objectiveTissue]['exposure']['live']['general'] = 5 + +channels['brightfield'][objectiveBeads]['exposure']['live']['general'] = 0.2 +channels['dapi'][objectiveBeads]['exposure']['live']['general'] = 5 +channels[488][objectiveBeads]['exposure']['live']['general'] = 5 +channels[546][objectiveBeads]['exposure']['live']['general'] = 5 +channels[647][objectiveBeads]['exposure']['live']['general'] = 5 + +### TISSUE-LIVE ### with objectiveTissue +channels['brightfield'][objectiveTissue]['exposure']['live']['tissue'] = 10 +channels['dapi'][objectiveTissue]['exposure']['live']['tissue'] = 10 +channels[488][objectiveTissue]['exposure']['live']['tissue'] = 10 +channels[546][objectiveTissue]['exposure']['live']['tissue'] = 10 +channels[647][objectiveTissue]['exposure']['live']['tissue'] = 10 + +### TISSUE-LIVE ### with objectiveBeads +channels['brightfield'][objectiveBeads]['exposure']['live']['tissue'] = 1 +channels['dapi'][objectiveBeads]['exposure']['live']['tissue'] = 10 +channels[488][objectiveBeads]['exposure']['live']['tissue'] = 10 +channels[546][objectiveBeads]['exposure']['live']['tissue'] = 10 +channels[647][objectiveBeads]['exposure']['live']['tissue'] = 10 + +### TISSUE-IMAGING ### +channels['brightfield'][objectiveTissue]['exposure']['imaging']['tissue'] = 1 +channels['dapi'][objectiveTissue]['exposure']['imaging']['tissue'] = 500 +channels[488][objectiveTissue]['exposure']['imaging']['tissue'] = 500 +channels[546][objectiveTissue]['exposure']['imaging']['tissue'] = 500 +channels[647][objectiveTissue]['exposure']['imaging']['tissue'] = 500 + +### BEADS-LIVE ### +channels['brightfield'][objectiveBeads]['exposure']['live']['beads'] = 0.2 +channels['dapi'][objectiveBeads]['exposure']['live']['beads'] = 20 +channels[488][objectiveBeads]['exposure']['live']['beads'] = 20 +channels[546][objectiveBeads]['exposure']['live']['beads'] = 20 +channels[647][objectiveBeads]['exposure']['live']['beads'] = 20 + +### BEADS-IMAGING ### +channels['brightfield'][objectiveBeads]['exposure']['imaging']['beads'] = 0.1 +channels['dapi'][objectiveBeads]['exposure']['imaging']['beads'] = 100 +channels[488][objectiveBeads]['exposure']['imaging']['beads'] = 100 +channels[546][objectiveBeads]['exposure']['imaging']['beads'] = 100 +channels[647][objectiveBeads]['exposure']['imaging']['beads'] = 100 + +### OFFSET-OBJECTIVEBEADS ### +channels['brightfield'][objectiveBeads]['offset']['imaging']['beads'] = 0 +channels['dapi'][objectiveBeads]['offset']['imaging']['beads'] = 0 +channels[488][objectiveBeads]['offset']['imaging']['beads'] = 0.925 +channels[546][objectiveBeads]['offset']['imaging']['beads'] = 0 # reference +channels[647][objectiveBeads]['offset']['imaging']['beads'] = 0 # reference + +### OFFSET-TISSUE ### +channels[546][objectiveTissue]['offset']['imaging']['tissue'] = 0 # reference +channels['brightfield'][objectiveTissue]['offset']['imaging']['tissue'] = 0.2 # well calibrated ... +channels[647][objectiveTissue]['offset']['imaging']['tissue'] = 0.2 +channels[488][objectiveTissue]['offset']['imaging']['tissue'] = 0 +############################## + +####################################################### +### End of config example for NikonPeter microscope ### +####################################################### + +############################ +# Sample specific parameters +############################ +waferName = 'C1_Wafer_500_Tissue' + +# Mosaic Parameters for tissue +tileGrid = [2,2] +overlap = 20 # in percentage +# Mosaic Parameters for mag +tileGridMag = [1,1] +overlapMag = 20 # in percentage + +# What channels should be used for bead imaging +channels['imaging']['beads'] = [488, 546, 'dapi', 'brightfield'] + +# What channels should be used for tissue imaging +channels['imaging']['tissue'] = [488, 546, 647, 'brightfield'] +############################ + + +################### +#### Constants #### +sleepSaver = 0.1 # the saver thread runs every sleepSaver second to check the savingQueue +liveGrabSleep = 0.1 # refresh cycle during the live visualization in s + +# parameters of the cross displayed at the center of the field of view during live imaging +crossLength = 50 +crossWidth = 5 + +stageInc = 1000 # displacement of the stage when the north/south/west/east buttons are pressed +#### End constants #### +####################### + + +import sys +sys.path.append(micromanagerFolder) +import MMCorePy + +import os, time, datetime, shutil, pickle, argparse, tkFileDialog, subprocess, re, copy, random, json + +from operator import itemgetter +import logging, colorlog # colorlog is not yet standard +#import logging + +import threading +from threading import Thread +from Queue import Queue + +from matplotlib import cm +import matplotlib.pyplot as plt +import matplotlib.patches as patches +from mpl_toolkits.mplot3d import Axes3D + +import winsound + +import numpy as np +from numpy import sin, pi, cos, arctan, tan, sqrt + +from Tkinter import Label, LabelFrame, Button, Frame, Tk, LEFT, Canvas, Toplevel + +import ctypes + +import copy + +import PIL # xxx does this not need tifffile ? +from PIL import Image, ImageTk + +from scipy import fftpack +from scipy.interpolate import Rbf, InterpolatedUnivariateSpline +from scipy.optimize import brent, minimize_scalar + +##################### +### I/O Functions ### +def mkdir_p(path): + try: + os.mkdir(path) + logger.debug('Folder created: ' + path) + except Exception, e: + if e[0] == 20047 or e[0] == 183: + # IJ.log('Nothing done: folder already existing: ' + path) + pass + else: + logger.error('Exception during folder creation :', exc_info=True) + raise + return path + +def getDirectory(text, startingFolder = None): + if startingFolder: + direc = os.path.join(tkFileDialog.askdirectory(title = text, initialdir = startingFolder), '') + else: + direc = os.path.join(tkFileDialog.askdirectory(title = text), '') + logger.debug('Directory chosen by user: ' + direc) + return direc + +def getPath(text, startingFolder = None): + if startingFolder: + path = tkFileDialog.askopenfilename(title = text, initialdir = startingFolder) + else: + path = tkFileDialog.askopenfilename(title = text) + logger.debug('Path chosen by user: ' + path) + return path + +def findFilesFromTags(folder,tags): + filePaths = [] + for (dirpath, dirnames, filenames) in os.walk(folder): + for filename in filenames: + if (all(map(lambda x:x in filename,tags)) == True): + path = os.path.join(dirpath, filename) + filePaths.append(path) + filePaths = naturalSort(filePaths) + return filePaths + +def readPoints(path): + x,y = [], [] + with open(path, 'r') as f: + lines = f.readlines() + for point in lines: + x.append(float(point.split('\t')[0])) + try: + y.append(float(point.split('\t')[1])) + except Exception, e: + pass + logger.debug('Points read' + str([x,y])) + return np.array([x,y]) + +def writePoints(path, points): + with open(path, 'w') as f: + for point in points: + line = str(point[0]) + '\t' + str(point[1]) + '\n' + f.write(line) + logger.debug('The point coordinates have been written') + + +def readSectionCoordinates(path): + with open(path, 'r') as f: + lines = f.readlines() + sections = [] + for line in lines: + points = line.split('\t') + points.pop() + section = [ [int(float(point.split(',')[0])), int(float(point.split(',')[1]))] for point in points ] + sections.append(section) + return sections + +def naturalSort(l): + convert = lambda text: int(text) if text.isdigit() else text.lower() + alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] + return sorted(l, key = alphanum_key) + +def initLogger(path): + fileFormatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt = '%d-%m-%Y %H:%M:%S') + fileHandler = logging.FileHandler(path) + fileHandler.setFormatter(fileFormatter) + fileHandler.setLevel(logging.DEBUG) # should I also save an .INFO log ? no: if someone wants to check a log, he probably wants to see the .debug one ... + colorFormatter = colorlog.ColoredFormatter('%(log_color)s%(asctime)s %(levelname)s %(message)s', datefmt = '%d-%m-%Y %H:%M:%S') + streamHandler = colorlog.StreamHandler() + streamHandler.setFormatter(colorFormatter) + + logger = logging.getLogger(__name__) + # clean the logger in case the script is run again in the same console + handlers = logger.handlers[:] + for handler in handlers: + handler.close() + logger.removeHandler(handler) + + logger.setLevel(logging.DEBUG) + logger.propagate = False + + logger.addHandler(fileHandler) + logger.addHandler(streamHandler) + + return logger + +def durationToPrint(d): + return str(round(d/60., 1)) + ' min = ' + str(round(d/3600., 1)) + ' hours = ' + str(round(d/(3600.*24), 1)) + ' days' + +def saver(q): + while True: + if not q.empty(): + logger.debug('Saving queue not empty') + toSave = q.get() + if toSave == 'finished': + logger.debug('Saver thread is going to terminate') + return + else: + sectionIndex, channel, tileId, folder, im, name = toSave + fileName = 'section_' + str(sectionIndex).zfill(4) + '_channel_' + str(channel) + '_tileId_' + str(tileId[0]).zfill(2) + '-' + str(tileId[1]).zfill(2) + (len(name)>0) * ('-' + str(name)) + '.tif' + path = os.path.join(folder, fileName) + logger.debug('Saving snapped image in ' + path) + # imsave(path, im) # with tifffile + + # with PIL and saving as png + # im = np.array(im) + result = PIL.Image.fromarray((im).astype(np.uint16)) + result.save(path) + + time.sleep(sleepSaver) + +########################### +### Geometric functions ### +def applyAffineT(points,coefs): + x,y = np.array(points) + x_out = coefs[1]*x - coefs[0]*y + coefs[2] + y_out = coefs[1]*y + coefs[0]*x + coefs[3] + return np.array([x_out,y_out]) + +def rotate(points, angle): + angleRadian = angle * pi / 180. + coefs = [sin(angleRadian), cos(angleRadian), 0, 0] + return applyAffineT(points,coefs) + +def translate(points, v): + coefs = [0, 1, v[0], v[1]] + return applyAffineT(points,coefs) + +def affineT(sourceLandmarks, targetLandmarks, sourcePoints): + # separating the x and y into separate variables + x_sourceLandmarks, y_sourceLandmarks = np.array(sourceLandmarks).T[:len(targetLandmarks.T)].T #sourceLandmarks trimmed to the number of existing targetlandmarks + x_targetLandmarks, y_targetLandmarks = targetLandmarks + x_sourcePoints, y_sourcePoints = sourcePoints + + # Solving the affine transform + A_data = [] + for i in range(len(x_sourceLandmarks)): + A_data.append( [-y_sourceLandmarks[i], x_sourceLandmarks[i], 1, 0]) + A_data.append( [x_sourceLandmarks[i], y_sourceLandmarks[i], 0, 1]) + b_data = [] + for i in range(len(x_targetLandmarks)): + b_data.append(x_targetLandmarks[i]) + b_data.append(y_targetLandmarks[i]) + A = np.matrix( A_data ) + b = np.matrix( b_data ).T + c = np.linalg.lstsq(A, b)[0].T #solving happens here + c = np.array(c)[0] +# print('Absolute errors in target coordinates : (xError, yError)') +# for i in range(len(x_sourceLandmarks)): + #print ("%f, %f" % ( + # np.abs(c[1]*x_sourceLandmarks[i] - c[0]*y_sourceLandmarks[i] + c[2] - x_targetLandmarks[i]), + # np.abs(c[1]*y_sourceLandmarks[i] + c[0]*x_sourceLandmarks[i] + c[3] - y_targetLandmarks[i]))) + + #computing the accuracy + x_target_computed_landmarks, y_target_computed_landmarks = applyAffineT(sourceLandmarks, c) + accuracy = 0 + for i in range(len(x_targetLandmarks)): + accuracy = accuracy + np.sqrt( np.square( x_targetLandmarks[i] - x_target_computed_landmarks[i] ) + np.square( y_targetLandmarks[i] - y_target_computed_landmarks[i] ) ) + accuracy = accuracy/float(len(x_sourceLandmarks) + 1) +# print 'The mean accuracy in target coordinates is', accuracy + + #computing the target points + x_target_points, y_target_points = applyAffineT(sourcePoints,c) + return np.array([x_target_points, y_target_points]) + +def getCenter(corners): + center = np.array(map(np.mean, corners)) + return center + +def getAngle(line): + line = np.array(line) + diff = line[0:2] - line[2:4] + theta = np.arctan2(diff[1], diff[0]) + return theta + +def getZInPlane(x,y,abc): #Fitted plane function + return float(abc[0]*x + abc[1]*y + abc[2]) + +def focusThePoints(focusedPoints, pointsToFocus): + x_pointsToFocus, y_pointsToFocus = pointsToFocus[0], pointsToFocus[1] # works even if pointsToFocus has no z coordinates + x_focusedPoints, y_focusedPoints, z_focusedPoints = focusedPoints # focusedPoints of course has 3 coordinates + + # logger.debug('focusedPoints = ' + str(focusedPoints)) + # logger.debug('pointsToFocus = ' + str(pointsToFocus)) + # remove outliers + idInliers = getInlierIndices(z_focusedPoints) + # logger.debug('idInliers = ' + str(idInliers) ) + # logger.debug('There are ' + str(idInliers.size) + ' inliers in ' + str(map(lambda x:round(x, 2), z_focusedPoints*1e6)) + ' um' ) + if idInliers.size == 3: + logger.warning('One autofocus point has been removed for interpolative plane calculation') + x_focusedPoints, y_focusedPoints, z_focusedPoints = focusedPoints.T[idInliers].T + elif idInliers.size < 3: + logger.warning('There are only ' + str(idInliers.size) + ' inliers for the interpolative plane calculation. A strategy should be developed to address such an event.') + + A = np.column_stack([x_focusedPoints, y_focusedPoints, np.ones_like(x_focusedPoints)]) + abc,residuals,rank,s = np.linalg.lstsq(A, z_focusedPoints) + z_pointsToFocus = map(lambda a: getZInPlane (a[0],a[1],abc), np.array([x_pointsToFocus.transpose(), y_pointsToFocus.transpose()]).transpose()) + + # calculating the accuracy + z_check = np.array(map(lambda a: getZInPlane (a[0],a[1],abc), np.array([x_focusedPoints.transpose(), y_focusedPoints.transpose()]).transpose())) + diff = z_check - z_focusedPoints + meanDiff = np.mean(np.sqrt(diff * diff)) + logger.debug('The plane difference is ' + str(diff*1e6) + ' um') + logger.info('The mean distance of focus points to the plane is ' + str(round(meanDiff*1e6, 3)) + ' um') + + return np.array([x_pointsToFocus, y_pointsToFocus, z_pointsToFocus]) + +def transformCoordinates(coordinates, center, angle): + return (translate(rotate(coordinates.T, angle), center)).T + +def getInlierIndices(data, m = 2.5): + d = np.abs(data - np.median(data)) + # mdev = float(np.median(d)) + mdev = float(np.mean(d)) + s = d/mdev if mdev else np.array(len(data) * [0.]) + # print 'mdev', mdev + # print 'd', d + # print 's', s + return np.where(s <= m)[0] + +def getOutlierIndices(data, m = 2.5): + d = np.abs(data - np.median(data)) + # mdev = float(np.median(d)) + mdev = float(np.mean(d)) + s = d/mdev if mdev else np.array(len(data) * [0.]) + # print 'mdev', mdev + # print 'd', d + # print 's', s + return np.where(s > m)[0] + +def bbox(points): + minx, miny = 1e9, 1e9 + maxx, maxy = -1e9, -1e9 + for point in points: + if point[0] > maxx: + maxx = point[0] + if point[0] < minx: + minx = point[0] + if point[1] > maxy: + maxy = point[1] + if point[1] < miny: + miny = point[1] + return minx, miny, maxx-minx, maxy-miny + +def gridInBb(bb, gridLayout = None, gridSpacing = None): + if gridLayout is not None: + gridSpacing = np.array(bb[2:])/(np.array(gridLayout)-1) + else: + gridLayout = (np.array(bb[2:])/np.array(gridSpacing)).astype(int) + gridSpacing = np.array(bb[2:])/(np.array(gridLayout)-1) + + topLeftCorner = bb[:2] + gridPoints = [] + for x in range(gridLayout[0]): + for y in range(gridLayout[1]): + gridPoints.append( topLeftCorner + np.array([x,y]) * gridSpacing) + return np.array(gridPoints), gridLayout, gridSpacing + + # # x = np.linspace(bb[0], bb[0] + bb[2], gridLayout[0]) + # # y = np.linspace(bb[1], bb[1] + bb[3], gridLayout[1]) + +def bestNeighbors(gridPoints, targetPoints): + bestNeighbors = [] + targetPoints = np.array(targetPoints).T[:2].T + for gridPoint in gridPoints: + distances = ((np.array(targetPoints) - gridPoint)**2).sum(axis=1) + sortedDistances = distances.argsort() + bestNeighbors.append(targetPoints[sortedDistances[0]]) + return np.array(bestNeighbors) + +def getNearestPoints(points, refPoint, nPoints = 1): + ''' + Returns the nPoints indices of points indicating the closest points to the reference point refPoint + ''' + # return (min((hypot(x2-refPoint[0],y2-refPoint[1]), x2, y2) for x2,y2 in points))[1:3] + + sortedDistances = sorted( [[np.linalg.norm(np.array([point[0],point[1]]) - np.array(refPoint)), id] for id, point in enumerate(points)], key = lambda x: x[0], reverse = False)[:nPoints] + + # print 'sortedDistances XXX UUU', sortedDistances + # return np.array([x[1:3] for x in sortedDistances]), np.array([x[0] for x in sortedDistances]) # centers, indices + return np.array([x[1] for x in sortedDistances]) # indices + +def getPlaneMesh(array2D, array3D, grid): + ''' + array2D gives the x,y boundaries for the mesh + array3D gives the interpolative plane + ''' + # grid = 50 + x = np.linspace(np.min(array2D[0]), np.max(array2D[0]), grid) + y = np.linspace(np.min(array2D[1]), np.max(array2D[1]), grid) + + xv, yv = np.meshgrid(x, y) + + xvFlat = [item for sublist in xv for item in sublist] + yvFlat = [item for sublist in yv for item in sublist] + + planeMesh = focusThePoints( array3D, np.array([xvFlat, yvFlat])) + return planeMesh + +def getRBFMesh(array2D, array3D, grid): + ''' + array2D gives the x,y boundaries for the mesh + array3D gives the interpolative plane + ''' + # grid = 50 + x = np.linspace(np.min(array2D[0]), np.max(array2D[0]), grid) + y = np.linspace(np.min(array2D[1]), np.max(array2D[1]), grid) + + xv, yv = np.meshgrid(x, y) + + xvFlat = np.array([item for sublist in xv for item in sublist]) + yvFlat = np.array([item for sublist in yv for item in sublist]) + + rbf = Rbf(np.array(array3D[0], array3D[1], array3D[2], epsilon = 2, function = 'thin_plate')) + + autofocusedRBF = np.array([xvFlat, yvFlat, rbf(xvFlat, yvFlat)]).T + + return autofocusedRBF + +##################### +### GUI Functions ### +dirtyCounter = 0 + +class App: + global wafer + def __init__(self, master): + self.live = False + self.acquisitionInterval = acquisitionIntervalBF + + self.frame = Frame(master) + self.frame.pack() + + self.button1 = Button(self.frame, text='Acquire tissue *HAF*', command = self.tissueAcquireHAF) + self.button1.pack(side=LEFT) + + self.button95 = Button(self.frame, text='Acq. manual mosaic *HAF*', command = self.tissueAcquireHAFFromManualSections) + self.button95.pack(side=LEFT) + + self.button50 = Button(self.frame, text='Acquire tissue *manual*', command = self.tissueAcquireManual) + self.button50.pack(side=LEFT) + + self.button2 = Button(self.frame, text='Add mosaic here', command = self.addMosaicHere) + self.button2.pack(side=LEFT) + + self.button3 = Button(self.frame, text='Add lowres landmark', command = self.addLowResLandmark) + self.button3.pack(side=LEFT) + + self.button22 = Button(self.frame, text='Add highres landmark', command = self.addHighResLandmark) + self.button22.pack(side=LEFT) + + self.button4 = Button(self.frame, text='Load wafer', command = self.loadWafer) + self.button4.pack(side=LEFT) + + self.button44 = Button(self.frame, text='ResetWaferKeepTargets', command = self.resetWaferKeepTargetCalibration) + self.button44.pack(side=LEFT) + + self.button5 = Button(self.frame, text='Load sections and landmarks from pipeline', command = self.loadSectionsAndLandmarksFromPipeline) + self.button5.pack(side=LEFT) + + self.button6 = Button(self.frame, text='Acquire mag *HAF*', command = self.magAcquireHAF) + self.button6.pack(side=LEFT) + + self.button24 = Button(self.frame, text='Acquire mag *manual*', command = self.magAcquireManual) + self.button24.pack(side=LEFT) + + self.button7 = Button(self.frame, text='Save Wafer', command = self.saveWafer) + self.button7.pack(side=LEFT) + + self.button9 = Button(self.frame, text='Stop live', command = self.stopLive) + self.button9.pack(side=LEFT) + + # # # self.buttonN = Button(self.frame, text='N', command = self.north) + # # # self.buttonN.pack(side=LEFT) + + # # # self.buttonS = Button(self.frame, text='S', command = self.south) + # # # self.buttonS.pack(side=LEFT) + + # # # self.buttonE = Button(self.frame, text='E', command = self.east) + # # # self.buttonE.pack(side=LEFT) + + # # # self.buttonW = Button(self.frame, text='W', command = self.west) + # # # self.buttonW.pack(side=LEFT) + + # self.button11 = Button(self.frame, text='GoToNextMag', command = self.goToNextMag) + # self.button11.pack(side=LEFT) + + # self.button12 = Button(self.frame, text='resetGreenFocus', command = self.resetGreenFocus) + # self.button12.pack(side=LEFT) + + # # self.button13 = Button(self.frame, text='rbf', command = self.rbf) + # # self.button13.pack(side=LEFT) + + # # self.button14 = Button(self.frame, text='plane', command = self.plane) + # # self.button14.pack(side=LEFT) + + # self.button15 = Button(self.frame, text='goToNextMagFocus', command = self.goToNextMagFocus) + # self.button15.pack(side=LEFT) + + self.button66 = Button(self.frame, text='Live Dapi', command = self.liveDapi) + self.button66.pack(side=LEFT) + + self.button16 = Button(self.frame, text='Live Green', command = self.liveGreen) + self.button16.pack(side=LEFT) + + self.button30 = Button(self.frame, text='Live Red', command = self.liveRed) + self.button30.pack(side=LEFT) + + self.button8 = Button(self.frame, text='Live BF', command = self.liveBF) + self.button8.pack(side=LEFT) + + self.button87 = Button(self.frame, text='Live 647', command = self.live647) + self.button87.pack(side=LEFT) + + + # self.button17 = Button(self.frame, text='AF In Place', command = self.afInPlace) + # self.button17.pack(side=LEFT) + + self.button18 = Button(self.frame, text='Snap', command = self.snap) + self.button18.pack(side=LEFT) + + # self.button19 = Button(self.frame, text='ManualRetakesMag', command = self.manualRetakesMag) + # self.button19.pack(side=LEFT) + + self.button20 = Button(self.frame, text='logHAF', command = self.logHAF) + self.button20.pack(side=LEFT) + + self.button23 = Button(self.frame, text='HAF', command = self.logHAF) + self.button23.pack(side=LEFT) + + self.button27 = Button(self.frame, text='ToggleNikonHAF', command = self.toggleNikonAutofocus) + self.button27.pack(side=LEFT) + + self.button21 = Button(self.frame, text='ResetImagedSections', command = self.resetImagedSections) + self.button21.pack(side=LEFT) + + self.button81 = Button(self.frame, text='getXYZ', command = self.logXYZ) + self.button81.pack(side=LEFT) + + + self.buttonQuit = Button(self.frame, text='Quit', command = root.destroy) + self.buttonQuit.pack(side=LEFT) + + photo = ImageTk.PhotoImage(PIL.Image.fromarray(np.zeros((int(imageSize_px[0]), int(imageSize_px[1]))))) + self.label = Label(master, image = photo) + self.label.image = photo # keep a reference? + self.label.pack() + + # self.canvas = Canvas(master, width = imageSize_px[0], height = imageSize_px[1]) + # self.imageCanvas = self.canvas.create_image(0, 0, image = photo) + # self.canvas.grid(row = 0, column = 0) + + def snap(self): + if mmc.isSequenceRunning(): + mmc.stopSequenceAcquisition() + takeImage(0, [0,0], folderSave, str(int(time.time()))) + + def north(self): + setXY(getXY()[0], getXY()[1] + stageInc) + + def south(self): + setXY(getXY()[0], getXY()[1] - stageInc) + + def east(self): + setXY(getXY()[0] - stageInc, getXY()[1]) + + def west(self): + setXY(getXY()[0] + stageInc, getXY()[1]) + + def acquireWaferButtonAction(self): + self.stopLive() + # # wafer.magSections = [] + # # wafer.createMagSectionsFromPipeline() + # # self.frame.quit() + self.acquireWafer() + + def resetGreenFocus(self): + wafer.targetMagFocus = [] + + def rbf(self): + rbf = Rbf(np.array(wafer.targetMagFocus).T[0], np.array(wafer.targetMagFocus).T[1], np.array(wafer.targetMagFocus).T[2], epsilon = 2, function = 'thin_plate') + wafer.targetMagCenters = np.array([np.array(wafer.targetMagCenters).T[0], np.array(wafer.targetMagCenters).T[1], rbf(np.array(wafer.targetMagCenters).T[0], np.array(wafer.targetMagCenters).T[1]) ]).T + logger.debug('rbf') + + def plane(self): + wafer.targetMagCenters = focusThePoints(np.array(wafer.targetMagFocus).T, np.array(wafer.targetMagCenters).T).T + logger.debug('Plane') + + def liveGrab(self): + logger.debug('Starting liveGrab ' + str(self.acquisitionInterval) ) + if not mmc.isSequenceRunning(): + mmc.startContinuousSequenceAcquisition(self.acquisitionInterval) + time.sleep(0.2) + + currentChannel = getChannel() + logger.debug('** LIVEGRAB current channel ** ' + str(currentChannel)) + try: + while self.live: + if (mmc.getRemainingImageCount() > 0): + lastImage = mmc.getLastImage() + if currentChannel == 'brightfield': + im = np.uint8(lastImage/256) + # im = mmc.getLastImage() + else: + # im = np.uint8(lastImage/256) + # im = np.uint8(lastImage/256 + 50) + im = lastImage + for x in range(crossLength): + for y in range(crossWidth): + im[imageSize_px[1]/2 - crossWidth/2 + y][imageSize_px[0]/2 - int(crossLength/2.) + x] = 0 + for y in range(crossLength): + for x in range(crossWidth): + im[imageSize_px[1]/2 - int(crossLength/2.) + y ][imageSize_px[0]/2 - crossWidth/2 + x] = 0 + else: + im = np.zeros((30, 30)) + + im = im[::2, ::2] + photo = ImageTk.PhotoImage(PIL.Image.fromarray(im)) + self.label.configure(image=photo) + self.label.image = photo # keep a reference! + self.label.pack() + + # self.canvas.itemconfig(self.imageCanvas, image = photo) + + time.sleep(self.acquisitionInterval/50.) + except Exception, e: + logger.error('In liveGrab: ' + str(e)) + logger.debug('liveGrab terminated') + + def liveGreen(self): + # global live, liveThread + self.acquisitionInterval = acquisitionIntervalMag + self.live = True + self.liveThread = Thread(target = self.liveGrab) + + ########################################## + # # # Setting fluo conditions # # # + setChannel(488) + if mic == 'Z2': + setExposure(channels[488][5]['exposure']['live']['beads']) + elif mic == 'Leica' or mic == 'Nikon' or mic == 'NikonPeter': + setExposure(channels[488][currentObjectiveNumber]['exposure']['live']['general']) + mmc.setAutoShutter(False) + openShutter() + self.liveThread.start() + + def liveRed(self): + # global live, liveThread + self.acquisitionInterval = acquisitionIntervalMag + self.live = True + self.liveThread = Thread(target = self.liveGrab) + + ########################################## + # # # Setting fluo conditions # # # + setChannel(546) + if mic == 'Z2': + setExposure(channels[546][5]['exposure']['live']['beads']) + elif mic == 'Leica' or mic == 'Nikon' or mic == 'NikonPeter': + setExposure(channels[546][currentObjectiveNumber]['exposure']['live']['general']) + mmc.setAutoShutter(False) + openShutter() + self.liveThread.start() + + def live647(self): + # global live, liveThread + self.acquisitionInterval = acquisitionIntervalMag + self.live = True + self.liveThread = Thread(target = self.liveGrab) + + ########################################## + # # # Setting fluo conditions # # # + setChannel(647) + if mic == 'Z2': + setExposure(channels[546][5]['exposure']['live']['beads']) + elif mic == 'Leica' or mic == 'Nikon' or mic == 'NikonPeter': + setExposure(channels[546][currentObjectiveNumber]['exposure']['live']['general']) + mmc.setAutoShutter(False) + openShutter() + self.liveThread.start() + + + def liveDapi(self): + # global live, liveThread + self.acquisitionInterval = acquisitionIntervalMag + self.live = True + self.liveThread = Thread(target = self.liveGrab) + + ########################################## + # # # Setting fluo conditions # # # + setChannel('dapi') + if mic == 'Z2': + setExposure(channels['dapi'][5]['exposure']['live']['beads']) + elif mic == 'Leica' or mic == 'Nikon' or mic == 'NikonPeter': + setExposure(channels['dapi'][currentObjectiveNumber]['exposure']['live']['general']) + mmc.setAutoShutter(False) + openShutter() + self.liveThread.start() + + def liveBF(self): + # global live, liveThread + self.acquisitionInterval = acquisitionIntervalMag + self.live = True + self.liveThread = Thread(target = self.liveGrab) + + ########################################## + # # # Setting fluo conditions # # # + setChannel('brightfield') + if mic == 'Z2': + setExposure(channels['brightfield'][5]['exposure']['live']['beads']) + elif mic == 'Leica' or mic == 'Nikon' or mic == 'NikonPeter': + setExposure(channels['brightfield'][currentObjectiveNumber]['exposure']['live']['general']) + mmc.setAutoShutter(False) + openShutter() + self.liveThread.start() + + def stopLive(self): + self.live = False + time.sleep(0.2) + if mmc.isSequenceRunning(): + mmc.stopSequenceAcquisition() + closeShutter() + # root.destroy() # no, it kills everything ... + + def addLowResLandmark(self): + stageXY = getXY() + logger.info('wafer.targetLowResLandmarks --- before --- ' + str(wafer.targetLowResLandmarks)) + wafer.targetLowResLandmarks.append([stageXY[0], stageXY[1], getZ()]) + logger.info('wafer.targetLowResLandmarks --- after --- ' + str(wafer.targetLowResLandmarks)) + + nValidatedLandmarks = len(wafer.targetLowResLandmarks) + if nValidatedLandmarks == len(wafer.sourceLandmarks.T): # all target landmarks have been identified + logger.info('Good. All landmarks have been calibrated.') + wafer.save() + writePoints(os.path.join(wafer.pipelineFolder, 'target_lowres_landmarks.txt'), wafer.targetLowResLandmarks) + # # # # self.generateSections() # now done with the high res calibration + # # # # wafer.save() + elif nValidatedLandmarks > 1: + logger.info('There are still ' + str(len(wafer.sourceLandmarks[0]) - nValidatedLandmarks ) + ' landmarks to calibrate. The stage has been moved to the next landmark to be calibrated') + + nextXY = affineT(wafer.sourceLandmarks, np.array(wafer.targetLowResLandmarks).T[:2], wafer.sourceLandmarks).T[nValidatedLandmarks] + logger.debug('Computing nextXY: wafer.sourceLandmarks - ' + str(wafer.sourceLandmarks) + ' np.array(wafer.targetLowResLandmarks).T[:2] - ' + str(np.array(wafer.targetLowResLandmarks).T[:2]) + ' nextXY = ' + str(nextXY)) + setXY(*nextXY) + + if nValidatedLandmarks > 3: + nextZ = focusThePoints(np.array(wafer.targetLowResLandmarks).T, np.array([[nextXY[0]], [nextXY[1]]]))[2][0] # the interpolative plane is calculated on the fly + print 'nextZ', nextZ + setZ(nextZ) + else: + logger.info('Please go manually to the second landmark.') + + def addHighResLandmark(self): + nHighRes = len(wafer.targetHighResLandmarks) + if nHighRes == 0: + setXY(*wafer.targetLowResLandmarks[0][:2]) + logger.info('Just moved to first landmark. Adjust this first landmark position with high resolution') + wafer.targetHighResLandmarks.append('dummy') + elif wafer.targetHighResLandmarks[0] == 'dummy': + wafer.targetHighResLandmarks.pop() + stageXY = getXY() + wafer.targetHighResLandmarks.append([stageXY[0], stageXY[1], getZ()]) + setXY(*wafer.targetLowResLandmarks[1][:2]) + logger.info('First high res landmark calibrated. Just moved to the second low res landmark: please adjust it.') + elif nHighRes == len(wafer.targetLowResLandmarks): + logger.info('HighRes target wafers had already been calibrated. Reinitializing calibration ...') + wafer.targetHighResLandmarks = [] + setXY(*wafer.targetLowResLandmarks[0][:2]) + logger.info('Just moved to first landmark. Adjust this first landmark position with high resolution') + wafer.targetHighResLandmarks.append('dummy') + + else: + stageXY = getXY() + wafer.targetHighResLandmarks.append([stageXY[0], stageXY[1], getZ()]) + + if not (len(wafer.targetHighResLandmarks) == len(wafer.targetLowResLandmarks)): + nextXY = affineT(wafer.sourceLandmarks, np.array(wafer.targetHighResLandmarks).T[:2], wafer.sourceLandmarks).T[len(wafer.targetHighResLandmarks)] + setXY(*nextXY) + logger.info(str(len(wafer.targetHighResLandmarks)) + ' high res landmarks calibrated.') + else: + logger.info('All high res landmarks calibrated. Generating all sections.') + wafer.save() + writePoints(os.path.join(wafer.pipelineFolder, 'target_highres_landmarks.txt'), wafer.targetHighResLandmarks) + self.generateSections() + wafer.save() + + + def resetWaferKeepTargetCalibration(self): # no need to make wafer glogal as it was already global, right ? + newWafer = Wafer(waferName, ip) + newWafer.targetLowResLandmarks = copy.deepcopy(wafer.targetLowResLandmarks) + newWafer.targetHighResLandmarks = copy.deepcopy(wafer.targetHighResLandmarks) + newWafer.targetMagFocus = copy.deepcopy(wafer.targetMagFocus) + newWafer.sourceLandmarks = copy.deepcopy(wafer.sourceLandmarks) + newWafer.sourceMagDescription = copy.deepcopy(wafer.sourceMagDescription) + newWafer.sourceSectionsMagCoordinates = copy.deepcopy(wafer.sourceSectionsMagCoordinates) + newWafer.sourceSectionsTissueCoordinates = copy.deepcopy(wafer.sourceSectionsTissueCoordinates) + newWafer.sourceTissueDescription = copy.deepcopy(wafer.sourceTissueDescription) + self = newWafer # wtf is that ? + self.createSections() + + def addMosaicHere(self): + wafer.addCurrentPosition() + + def generateSections(self): + wafer.createSections() + + def acquireWafer(self): + self.stopLive() + wafer.acquire() + + def loadWafer(self): + global wafer + waferPath = getPath('Select the wafer pickle file', startingFolder = folderSave) + f = open(waferPath, 'r') + wafer = pickle.load(f) + f.close() + for magSection in wafer.magSections: + magSection.localized = False + + + def saveWafer(self): + wafer.save() + + # def manualRetakesMag(self): + # wafer.manualRetakes(mag = True) + + def loadSectionsAndLandmarksFromPipeline(self): + pipelineFolder = getDirectory('Select the folder containing the sections and landmarks from the pipeline', startingFolder = folderSave) + wafer.pipelineFolder = pipelineFolder # needed to write the target landmark coordinates for later proper orientation + + sourceSectionsMagPath = os.path.join(pipelineFolder, 'source_sections_mag.txt.') + sourceSectionsTissuePath = os.path.join(pipelineFolder, 'source_sections_tissue.txt.') + + wafer.sourceSectionsMagCoordinates = readSectionCoordinates(sourceSectionsMagPath) # list of lists + wafer.sourceSectionsTissueCoordinates = readSectionCoordinates(sourceSectionsTissuePath) # list of lists + + # # wafer.sourceSectionCenters = np.array([getCenter(np.array(sourceSectionTissueCoordinates).T) for sourceSectionTissueCoordinates in wafer.sourceSectionsCoordinates]) + + sourceTissueMagDescriptionPath = os.path.join(pipelineFolder, 'source_tissue_mag_description.txt.') # 2 sections: template tissue and template mag + if os.path.isfile(sourceTissueMagDescriptionPath): + wafer.sourceTissueDescription, wafer.sourceMagDescription= readSectionCoordinates(sourceTissueMagDescriptionPath) + else: + logger.warning('There is no source_tissue_mag_description') + + sourceLandmarksPath = os.path.join(pipelineFolder, 'source_landmarks.txt.') + wafer.sourceLandmarks = readPoints(sourceLandmarksPath) + + sourceROIDescriptionPath = os.path.join(pipelineFolder, 'source_ROI_description.txt') + if os.path.isfile(sourceROIDescriptionPath): + wafer.sourceROIDescription = readSectionCoordinates(sourceROIDescriptionPath) + else: + logger.warning('There is no source_ROI_description. The center of the section will be used.') + + def magAcquireHAF(self): + self.stopLive() + wafer.magSections = [] + wafer.createMagSectionsFromPipeline() + wafer.magAcquire() + + def magAcquireManual(self): + self.stopLive() + if wafer.magSections == []: + wafer.magSections = [] + wafer.createMagSectionsFromPipeline() + wafer.magAcquire(manualFocus = True) + self.liveGreen() + time.sleep(0.2) + self.liveGreen() + winsound.Beep(440,100) + + def tissueAcquireHAF(self): + self.stopLive() + wafer.sections = [] + wafer.createSectionsFromPipeline() + wafer.acquire() + + def tissueAcquireHAFFromManualSections(self): + self.stopLive() + wafer.acquire(manualFocus = False) + + def tissueAcquireManual(self): + self.stopLive() + if wafer.sections == []: + wafer.sections = [] + wafer.createSectionsFromPipeline() + wafer.acquire(manualFocus = True) + self.liveRed() + time.sleep(0.2) + self.liveRed() + time.sleep(0.2) + self.liveRed() + winsound.Beep(440,100) + + def afInPlace(self): + # setZSnapAndGetFocusScore(getZ()) + beadAutofocus() + + def manualRetakesMag(self, mag = True): + self.liveGreen() + idsToRetake = readPoints(findFilesFromTags(folderSave, ['manualRetakes'])[0])[0] + + if not hasattr(self, 'counterRetake'): + self.counterRetake = -1 + sectionToRetake = wafer.magSections[idsToRetake[0] - 1] + x,y,z = sectionToRetake.center[0], sectionToRetake.center[1], sectionToRetake.startingZ + setXY(x, y) + elif self.counterRetake < len(idsToRetake): + self.counterRetake = self.counterRetake + 1 + idToRetake = idsToRetake[self.counterRetake] + logger.info('Retaking manually mag section number ' + str(idToRetake)) + + sectionToRetake = wafer.magSections[idToRetake-1] + sectionToRetake.focusedZ = getZ() + # update the taken flag ? + + self.stopLive() + + for imageToDelete in os.listdir(sectionToRetake.folderSectionSave): + os.remove(os.path.join(sectionToRetake.folderSectionSave, imageToDelete)) + time.sleep(1) + openShutter() + # retake + + for idTile, point in enumerate(sectionToRetake.imagingCoordinates['tiles']): + setXY(*point[:2]) # Z has just been set + for imagingChannel in channels['imaging']['beads']: + setChannel(imagingChannel) + setExposure(channels[imagingChannel][objectiveBeads]['exposure']['imaging']['beads']) + logger.debug('Scanning tile ' + str(idTile) + ' with channel ' + str(imagingChannel)) + + logger.info('Deleting in folder ' + str(sectionToRetake.folderSectionSave)) + + takeImage(sectionToRetake.index, sectionToRetake.ip.idsMag[idTile], sectionToRetake.folderSectionSave, name = 'mag') + + # closeShutter() + + + # get section and set x,y,z + sectionToRetake = wafer.magSections[idsToRetake[self.counterRetake + 1] - 1] + x,y,z = sectionToRetake.center[0], sectionToRetake.center[1], sectionToRetake.startingZ + setXY(x, y) + self.liveGreen() + + # # ask user for focus + # logger.info('Please focus then click ok') + + # # # v = True + # # # t = time.time() + # # # while v: + # # # if time.time() - t > 5: + # # # v = False + + # ctypes.windll.user32.MessageBoxW(0,'Are you done with manual focusing ?','Do it',0) + + # wafer.save() + # logger.info('MagSection number ' + str(idToRetake) + ' has been retaken') + + + # def manualRetakesMag(self, mag = True): + # self.liveGreen() + + def logHAF(self): + logger.debug(str(mmc.getLastFocusScore()) + ' getLastFocusScore') + logger.debug(str(mmc.getCurrentFocusScore()) + ' getCurrentFocusScore') + logger.debug(str(mmc.isContinuousFocusEnabled()) + ' isContinuousFocusEnabled') + logger.debug(str(mmc.isContinuousFocusLocked()) + ' isContinuousFocusLocked') + # logger.debug(str(mmc.isContinuousFocusDrive(zStage)) + ' isContinuousFocusDrive') # xxx isContinuousFocusDrive (const char *stageLabel) + if mic == 'Leica': + logger.debug(str(mmc.getAutoFocusOffset()) + ' getAutoFocusOffset') + if mic == 'Nikon' or mic == 'NikonPeter': + logger.debug(str(mmc.getPosition('TIPFSOffset')) + ' autoFocusOffset') + + logger.debug(str(getZ()) + ' getZ') + + def HAF(self): + autofocus() + self.logHAF() + + def resetImagedSections(): + for section in wafer.sections: + section.acquireFinished = False + logger.info('The "imaged" tag has been reverted to False for all sections from current wafer (you need to save manually) ') + + def toggleNikonAutofocus(self): + if mmc.isContinuousFocusEnabled(): + mmc.enableContinuousFocus(False) + else: + mmc.enableContinuousFocus(True) + + def logXYZ(self): + x,y = getXY() + z = getZ() + logger.info(str(x) + ',' + str(y) + ',' + str(z)) + +############################ +### Microscope functions ### +def getZ(): + return mmc.getPosition(zStage) + +def getXY(): + sensorXY = mmc.getXPosition(stage), mmc.getYPosition(stage) + logger.debug('Sensor read stage ' + str([round(sensorXY[0], 3),round(sensorXY[1], 3)]) + ' um' ) + if mic == 'Leica': + x = sensorXY[0] + y = -sensorXY[1] + elif mic == 'Nikon': + x = sensorXY[0] + y = -sensorXY[1]/float(4) # there is an amazing 4x factor between x and y axes ! + elif mic == 'NikonPeter': + x = sensorXY[0] + y = sensorXY[1] # to calibrate xxx + + else: + x = sensorXY[0] + y = sensorXY[1] + return np.array([x, y]) + +def setXY(x,y): + logger.debug('Moving stage to ' + str([round(x, 3),round(y, 3)]) + ' um' ) +# mmc.waitForDevice(stage) +# time.sleep(0.1) + + if mic == 'Leica': + mmc.setXYPosition(stage, x , -y) # the y axis is flipped + elif mic == 'Nikon': + try: + mmc.setXYPosition(stage, x , -y * 4) # the y axis is flipped and there is a factor between the two axes + except Exception, e: + logger.error('*** STAGE ERROR LEVEL 1 - TRYING AGAIN ***') + try: + mmc.setXYPosition(stage, x , -y * 4) # the y axis is flipped and there is a factor between the two axes + except Exception, e: + logger.error('*** STAGE ERROR LEVEL 2 - TRYING AGAIN ***') + mmc.setXYPosition(stage, x , -y * 4) # the y axis is flipped and there is a factor between the two axes + + elif mic == 'NikonPeter': + mmc.setXYPosition(stage, x , y) # xxx to calibrate + else: + mmc.setXYPosition(stage, x , y) + mmc.waitForDevice(stage) + newXY = getXY() + logger.debug('Moved stage to ' + str([round(newXY[0], 3),round(newXY[1], 3)]) + ' um' ) + +def setZ(z): + mmc.setPosition(zStage,z) + mmc.waitForDevice(zStage) + logger.debug('zStage moved to ' + str(round(z,3)) + ' um') + +def isShutterOpen(): + # return (int(mmc.getProperty('ZeissReflectedLightShutter', 'State')) == 1) + return mmc.getShutterOpen() + +def openShutter(): + if mic == 'Leica': + if not isShutterOpen(): + mmc.setShutterOpen(True) + mmc.waitForDevice(shutter) + +def closeShutter(): + if mic == 'Nikon': + logger.debug('close shutter on the nikon means disenabling the lumencor channels') + mmc.setProperty('Spectra', 'White_Enable', 0) + mmc.setProperty('Spectra', 'YG_Filter', 1) + elif mic == 'NikonPeter': + logger.debug('close shutter on the nikon means disenabling the lumencor channels') + mmc.setProperty('SpectraLED', 'White_Enable', 0) + mmc.setProperty('SpectraLED', 'YG_Filter', 1) + +def autofocus(): + try: + mmc.fullFocus() + focusedZ = getZ() + logger.debug('Hardware autofocus performed: z = ' + str(round(focusedZ,3)) + ' um') + except Exception,e: + logger.error('### AUTOFOCUS FAIL ###') + logger.error(e) + + if mmc.isContinuousFocusEnabled(): # reactivating the autofocus, typical error is dichroic not in place + mmc.enableContinuousFocus(False) + else: + mmc.enableContinuousFocus(True) + + time.sleep(1) + mmc.fullFocus() + focusedZ = getZ() + logger.debug('Hardware autofocus performed AFTER ERROR: z = ' + str(round(focusedZ,3)) + ' um') + + return focusedZ + +def setChannel(channel): + if mic == 'Z2': + microscopeChannelName = microscopeChannelNames[channelNames.index(channel)] + mmc.setProperty('ZeissReflectorTurret', 'Label', microscopeChannelName) + mmc.waitForDevice('ZeissReflectorTurret') + # mmc.setShutterOpen(False) + elif mic == 'Leica': + mmc.setProperty('FastFilterWheelEX', 'Label', channelNames[channel][0]) + mmc.setProperty('FastFilterWheelEM', 'Label', channelNames[channel][1]) + mmc.waitForDevice('FastFilterWheelEM') #xxx + mmc.waitForDevice('FastFilterWheelEX') #xxx + elif mic == 'Nikon': + if len(channelNames[channel]) > 2: + for additionalState in channelNames[channel][2:]: + mmc.setProperty(additionalState[0], additionalState[1], additionalState[2]) + else: + if mmc.getProperty('TIFilterBlock1', 'Label') != '2-Quad': # if the standard NikonBodyCube has been changed for brightfield. This could be smartly managed to have reduce the number of switches by half (would gain about 2 hours for 1000 sections x [3x3] mosaics) + mmc.setProperty('TIFilterBlock1', 'Label', '2-Quad') +# print 'mmc.getProperty(Spectra,Green_Level)', mmc.getProperty('Spectra', 'Green_Level') +# if mmc.getProperty('Spectra', 'Green_Level') != '100': # does not work I do not understand why. The state is already read as being 100 but it is effectively still at 2 percent ... + logger.error('Setting green level back to 100') # what is that ? + mmc.setProperty('Spectra', 'Green_Level', '100') + + if channelNames[channel][0] == 'White': + mmc.setProperty('Spectra', 'White_Enable', 1) + else: + mmc.setProperty('Spectra', 'White_Enable', 0) # closes all channels + mmc.setProperty('Spectra', channelNames[channel][0] + '_Enable', 1) + mmc.setProperty('CSUW1-Filter Wheel', 'Label', channelNames[channel][1]) + mmc.setProperty('Spectra', 'YG_Filter', 1) # later in case some delay is needed ... + elif mic == 'NikonPeter': + if len(channelNames[channel]) > 2: # happens only for brightfield actually + for additionalState in channelNames[channel][2:]: + mmc.setProperty(additionalState[0], additionalState[1], additionalState[2]) + else: + logger.debug('Setting white level back to 100') # because set at level 10 during brightfield + mmc.setProperty('SpectraLED', 'White_Level', '100') + + mmc.setProperty('SpectraLED', 'White_Enable', 0) # closes all channels + + mmc.setProperty('SpectraLED', channelNames[channel][0] + '_Enable', 1) + if mmc.getProperty('EmissionWheel', 'Label') != channelNames[channel][1]: +# mmc.waitForDevice('EmissionWheel') # does that solve the wheel failures ? +# mmc.waitForDevice('Core') # does that solve the wheel failures ? + time.sleep(0.1) +# mmc.waitForSystem() # does that solve the wheel failures ? + + try: + mmc.setProperty('EmissionWheel', 'Label', channelNames[channel][1]) + except Exception, e: + logger.error('### EMISSIONWHEEL ERROR ###') + logger.error(e) + time.sleep(1) + try: + mmc.setProperty('EmissionWheel', 'Label', channelNames[channel][1]) + except Exception, e: + logger.error('###-### EMISSIONWHEEL ERROR LEVEL 2 ###-###') + logger.error(e) + time.sleep(1) + mmc.setProperty('EmissionWheel', 'Label', channelNames[channel][1]) + + mmc.setProperty('SpectraLED', 'YG_Filter', 1) # later in case some delay is needed ... + + logger.debug('Channel set to ' + str(channel)) + +def getChannel(): + if mic == 'Z2': + return channelNames[microscopeChannelNames.index(mmc.getProperty('ZeissReflectorTurret', 'Label'))] + elif mic == 'Leica': + return channelNames.keys()[channelNames.values().index([mmc.getProperty('FastFilterWheelEX', 'Label'), mmc.getProperty('FastFilterWheelEM', 'Label')])] + elif mic == 'Nikon': + if mmc.getProperty('Spectra', 'Violet_Enable') == '1': + return 'dapi' + elif mmc.getProperty('Spectra', 'Cyan_Enable') == '1': + return 488 + elif mmc.getProperty('Spectra', 'Green_Enable') == '1': + if mmc.getProperty('TIFilterBlock1', 'Label') == '3-FRAP': + return 'brightfield' + else: + return 546 + elif mmc.getProperty('Spectra', 'Red_Enable') == '1': + return 647 + else: + return None + elif mic == 'NikonPeter': + if mmc.getProperty('SpectraLED', 'White_Enable') == '1': + return 'brightfield' + elif mmc.getProperty('SpectraLED', 'Violet_Enable') == '1': + return 'dapi' + elif mmc.getProperty('SpectraLED', 'Cyan_Enable') == '1': + return 488 + elif mmc.getProperty('SpectraLED', 'Green_Enable') == '1': + return 546 + elif mmc.getProperty('SpectraLED', 'Red_Enable') == '1': + return 647 + else: + return None + + else: + return None + +def setExposure(exposure): + mmc.setExposure(exposure) + logger.debug('Exposure set to ' + str(exposure) + ' ms') + +def takeImage(sectionIndex, tileId, folder, name = ''): + # global savingQueue + mmc.snapImage() + channel = getChannel() + logger.debug('Image taken. Section: ' + str(sectionIndex) + '; channel: ' + str(channel) + '; tileId: ' + str(tileId) + '; name: ' + str(name)) + im = mmc.getImage() + savingQueue.put([sectionIndex, channel, tileId, folder, im, name]) + +def getImageSize(): + return np.array([mmc.getImageWidth(), mmc.getImageHeight()]) + +def getMicState(saveName = 'micState'): + d = {} + for device in mmc.getLoadedDevices(): + d[device] = {} + for property in mmc.getDevicePropertyNames(device): + d[device][property] = {} + d[device][property]['allowedValues'] = mmc.getAllowedPropertyValues(device, property) + try: + d[device][property]['currentValue'] = mmc.getProperty(device, property) + except Exception, e: + d[device][property]['currentValue'] = e + print 'Microscope State Dictionary + \n', json.dumps(d, indent=4, sort_keys=True) + with open(os.path.join(folderSave, saveName + '.txt'), 'w') as f: + json.dump(d, f) + with open(os.path.join(folderSave, saveName + '_humanReadable.txt'), 'w') as f: + f.write(json.dumps(d, indent=4, sort_keys=True)) + + return d + +def loadLowResLandmark(): + wafer.targetLowResLandmarks = readPoints(os.path.join(os.path.normpath(wafer.pipelineFolder), 'target_lowres_landmarks.txt')) + wafer.targetLowResLandmarks = wafer.targetLowResLandmarks.T + wafer.save() + +def loadHighResLandmark(): + wafer.targetHighResLandmarks = readPoints(os.path.join(os.path.normpath(wafer.pipelineFolder), 'target_highres_landmarks.txt')) + wafer.targetHighResLandmarks = wafer.targetHighResLandmarks.T + wafer.save() + wafer.createSections() + wafer.save() + +def resetNikonStage(): + print mmc.getProperty('LudlController', 'Reset') + mmc.setProperty('LudlController', 'Reset', 'Reset') + print mmc.getProperty('LudlController', 'Reset') + time.sleep(10) + logger.debug('Stage controller has been reset') + +def runImQualityFromFolder(folder): + command = r'python D:\Images\Templier\pyimagequalityranking\pyimq\bin\main.py --mode=directory --working-directory=' + folder + # command = r'python D:\Images\Templier\pyimagequalityranking\pyimq\bin\main.py --mode=directory --mode=analyze --mode=plot --result=fpw --working-directory=' + autofocusTestsFolder + + result = subprocess.call(command, shell=True) + +def readImQuality(path): # read the .csv output from the pyimagequality library + with open(path, 'r') as f: + lines = f.readlines() + val = [] + for line in lines: + val.append(line.split(',')) + return val + +########################## +### Imaging parameters ### +class ImagingParameters(object): + def __init__(self, *args): + self.channels = args[0] + self.tileGrid = np.array(args[1]) # 3x4 mosaic for example ... + self.overlap_pct = args[2] + self.objective = args[3] + + self.tileSize = np.array(magnificationImageSizes[self.objective][1:]) + self.pixelSize = 1/2. * (self.tileSize[0]/float(imageSize_px[0]) + self.tileSize[1]/float(imageSize_px[1])) # averaging on x and y to be more precise ? + self.mosaicSize = np.array([0, 0]) # just initializing and showing how it looks + self.ids = [] # indices of the successive tiles [[0,0],[1,0],...,[n,n]] + self.layoutFigurePath = os.path.join(folderSave, 'mosaicLayout.png') + self.templateTileCoordinates = self.getTemplateTileCoordinates(self.mosaicSize, self.tileSize, self.tileGrid, self.overlap_pct, self.ids, self.layoutFigurePath) + + self.tileGridMag = np.array(args[4]) # 3x4 mosaic for example ... + self.overlap_pctMag = args[5] + self.objectiveMag = args[6] + self.tileSizeMag = np.array(magnificationImageSizes[self.objectiveMag][1:]) + self.pixelSizeMag = 1/2. * (self.tileSizeMag[0]/float(imageSize_px[0]) + self.tileSizeMag[1]/float(imageSize_px[1])) # averaging on x and y to be more precise ? + self.mosaicSizeMag = np.array([0, 0]) # just initializing and showing how it looks + self.idsMag = [] # indices of the successive tiles [[0,0],[1,0],...,[n,n]] + self.layoutFigurePathMag = os.path.join(folderSave, 'mosaicLayoutMag.png') + self.templateTileCoordinatesMag = self.getTemplateTileCoordinates(self.mosaicSizeMag, self.tileSizeMag, self.tileGridMag, self.overlap_pctMag, self.idsMag, self.layoutFigurePathMag) + + logger.debug('Imaging parameters initialized') + + def getTemplateTileCoordinates(self, mosaicSize, tileSize, tileGrid, overlap_pct, ids, layoutFigurePath): + fig = plt.figure() # producing a figure of the mosaic and autofocus locations + ax = fig.add_subplot(111) + + # compute and plot mosaic size + mosaicSize = tileSize * tileGrid - (tileGrid - 1) * (overlap_pct/100. * tileSize) + logger.debug('The size of the mosaic is ' + str(mosaicSize[0] * 1e6) + ' um x ' + str(mosaicSize[1] * 1e6) + ' um') + p = patches.Rectangle((-mosaicSize[0]/2., -mosaicSize[1]/2.), mosaicSize[0], mosaicSize[1], fill=False, clip_on=False, color = 'blue', linewidth = 3) + ax.add_patch(p) + + # compute tile locations starting from the first on the top left (which is actually top right in the Merlin ...) + topLeftCenter = (- mosaicSize + tileSize)/2. + + tilesCoordinates = [] + for idY in range(tileGrid[1]): + for idX in range(tileGrid[0]): + if mic == 'Leica': # warning: leica stage inverted on x-axis + # id = np.array([tileGrid[0] - 1 - idX, idY]) + id = np.array([idX, idY]) + else: + id = np.array([idX, idY]) + ids.append(id) + tileCoordinates = (topLeftCenter + id * (1-overlap_pct/100.) * tileSize) + tilesCoordinates.append(tileCoordinates) + plt.plot(tileCoordinates[0], tileCoordinates[1], 'ro') + p = patches.Rectangle((tileCoordinates[0] - tileSize[0]/2. , tileCoordinates[1] - tileSize[1]/2.), tileSize[0], tileSize[1], fill=False, clip_on=False, color = 'red') + ax.add_patch(p) + + tilesCoordinates = np.array(tilesCoordinates) + + # compute autofocus locations (actually not used) + autofocusCoordinates = mosaicSize/2. * (1 + self.autofocusOffsetFactor) * np.array([ [-1 , -1], [1, -1], [-1, 1], [1, 1]]) # 4 points focus: + + # plot autofocus locations + for point in autofocusCoordinates: + plt.plot(point[0], point[1], 'bo') + + plt.savefig(layoutFigurePath) + return tilesCoordinates, autofocusCoordinates + +################################# +### Wafer and Section classes ### +class Wafer(object): + def __init__(self, *args): + self.name = args[0] + self.ip = args[1] # ImagingParameters + self.sections = [] + self.magSections = [] + self.folderWaferSave = mkdir_p(os.path.join(folderSave, self.name)) + self.waferPath = os.path.join(self.folderWaferSave, 'Wafer_' + self.name) + shutil.copy(ip.layoutFigurePath, self.folderWaferSave) + logger.info('Wafer ' + self.name + ' initiated.') + self.startingTime = -1 + self.finishingTime = -1 + self.targetLowResLandmarks = [] + self.targetHighResLandmarks = [] + self.targetMagFocus = [] + self.timeEstimate = 0 + + def createSections(self): + currentZ = getZ() + if hasattr(self, 'targetHighResLandmarks'): + if len(self.targetHighResLandmarks) == len(self.sourceLandmarks.T): # all target landmarks have been identified + # creating targetTissuesCoordinates + self.targetTissues = [] + self.targetTissuesCoordinates = [] + for sourceSectionTissueCoordinates in self.sourceSectionsTissueCoordinates: + if hasattr(self, 'sourceROIDescription'): # transform the sourceRoi to the ROI in the sourceSection using the transform sourceSectionRoi -> sourceSectionCoordinates + sourceSectionTissueCoordinates = affineT(np.array(self.sourceROIDescription[0]).T, np.array(sourceSectionTissueCoordinates).T, np.array(self.sourceROIDescription[1]).T).T + targetTissueCoordinates = affineT(self.sourceLandmarks, np.array(self.targetHighResLandmarks).T[:2], np.array(sourceSectionTissueCoordinates).T) + self.targetTissuesCoordinates.append(targetTissueCoordinates) + targetTissue = getCenter(targetTissueCoordinates) + try: + targetTissueCenterZ = focusThePoints(np.array(self.targetHighResLandmarks).T, np.array([targetTissue]).T)[-1][0] + except Exception, e: + targetTissueCenterZ = currentZ + self.targetTissues.append([targetTissue[0], targetTissue[1], targetTissueCenterZ]) + + + self.targetMagsCoordinates = [] + self.targetMagCenters = [] + for sourceSectionMagCoordinates in self.sourceSectionsMagCoordinates: + targetMagCoordinates = affineT(self.sourceLandmarks, np.array(self.targetHighResLandmarks).T[:2], np.array(sourceSectionMagCoordinates).T) + self.targetMagsCoordinates.append(targetMagCoordinates) + targetMagCenter = getCenter(targetMagCoordinates) + try: + targetMagCenterZ = focusThePoints(np.array(self.targetHighResLandmarks).T, np.array([targetMagCenter]).T)[-1][0] + except: + targetMagCenterZ = currentZ + + self.targetMagCenters.append([targetMagCenter[0], targetMagCenter[1], targetMagCenterZ]) + + # if hasattr(self, 'sourceMagDescription'): + # self.targetMagCoordinates = [] + # for targetTissueCoordinates in self.targetTissuesCoordinates: + # magCoord = affineT(np.array(self.sourceMagDescription[0]).T, targetTissueCoordinates, np.array(self.sourceMagDescription[1]).T ) + # self.targetMagCoordinates.append(magCoord) + # self.targetMagCenters.append(getCenter(magCoord)) + + ###### Currently wrong, should simply copy the paragraph above + # # self.targetROICoordinates = [] + # # self.targetROICenters = [] + # # for targetTissueCoordinates in self.targetTissuesCoordinates: # self.targetTissuesCoordinates seems to have been created earlier + # # if hasattr(self, 'sourceROIDescription'): # if no sourceRoiDescription provided, then simply use the center of the tissue sections as center of the ROI + # # ROICoord = affineT(np.array(self.sourceROIDescription[0]).T, targetTissueCoordinates, np.array(self.sourceROIDescription[1]).T ) + # # else: + # # ROICoord = targetTissueCoordinates + # # self.targetROICoordinates.append(ROICoord) + # # self.targetROICenters.append(getCenter(ROICoord)) + + else: + logger.error('Sections cannot be generated because there are currently no target landmarks') + + + def addCurrentPosition(self): + section = Section([len(self.sections), getXY(), getZ(), self.ip, self.folderWaferSave]) + self.sections.append(section) + logger.info('New section number added with current position') + + def save(self): + f = open(self.waferPath, 'w') + pickle.dump(self, f) + f.close() + logger.debug('Wafer saved in ' + self.waferPath) + + def acquire(self, manualFocus = False): + # write metadata + imSize = getImageSize() + metadataPath = os.path.join(self.folderWaferSave, 'LM_Metadata.txt') + with open(metadataPath, 'w') as f: + f.write('width = ' + str(imSize[0]) + '\n') + f.write('height = '+ str(imSize[1]) + '\n') + f.write('nChannels = '+ str(len(self.ip.channels['imaging']['tissue'])) + '\n') + f.write('xGrid = ' + str(self.ip.tileGrid[0]) + '\n') + f.write('yGrid = ' + str(self.ip.tileGrid[1]) + '\n') + + f.write('scaleX = ' + str(self.ip.pixelSize) + '\n') + f.write('scaleY = ' + str(self.ip.pixelSize) + '\n') + f.write('channels = [' + ','.join(map(str, self.ip.channels['imaging']['tissue'])) + ']') + + + if (not manualFocus): + logger.info('Acquire tissue with hardware autofocus') + self.save() + logger.info('Starting acquisition of wafer ' + str(self.name)) + self.startingTime = time.time() + logger.info(str(len(filter(lambda x: x.acquireFinished, self.sections))) + ' sections have been already scanned before this start') + + nSectionsAcquired = sum([section.acquireFinished for section in self.sections] ) # for after interruptions + sectionIndicesToAcquire = range(nSectionsAcquired, len(self.sections), 1) + + for currentSessionCounter, id in enumerate(sectionIndicesToAcquire): + section = self.sections[id] + logger.info('Starting acquisition of section ' + str(section.index) + ' (' + str(id) + ') ' + ' in wafer ' + str(self.name) ) + section.acquire(mag = False, manualFocus = manualFocus) + closeShutter() + + #logging some durations + averageSectionDuration = (time.time()- self.startingTime)/float(currentSessionCounter + 1) + timeRemaining = (len(self.sections) - (id + 1)) * averageSectionDuration + logger.info(str(currentSessionCounter + 1) + ' sections have been scanned during this session, with an average of ' + str(round(averageSectionDuration/60., 1)) + ' min/section.' ) + logger.info('Time remaining estimated: ' + durationToPrint(timeRemaining) + ' for ' + str((len(self.sections) - (id + 1))) + ' sections' ) + self.save() + self.finishingTime = time.time() + elapsedTime = (self.finishingTime - self.startingTime) + savingQueue.put('finished') # closing the saverThread + time.sleep(2 * sleepSaver) + # saverThread.join() # xxx is it ok ? Is it not going to return before ? + + logger.info('The current session for the wafer took ' + durationToPrint(elapsedTime)) + closeShutter() + else: + logger.info('Acquire tissue with manual focus') + mmc.setAutoShutter(False) + + sectionsToAcquire = filter(lambda x: (not x.acquireFinished), self.sections) # I could have TSP ordered the sections earlier ... + + if len(sectionsToAcquire) == 0: + logger.info('All sections have already been acquired.') + else: + logger.info(str(len(sectionsToAcquire)) + ' sections remaining to be acquired') + nextTissueSectionToAcquire = filter(lambda x: x.localized, sectionsToAcquire) + + if len(nextTissueSectionToAcquire) == 0: + logger.info('Currently not centered on a section. Moving to a section ...') + logger.info('Moving to section number ' + str(sectionsToAcquire[0].index) ) + sectionsToAcquire[0].moveToSection() + # updating the localization flag (false for all except for the current one) + for tissueSection in wafer.sections: + tissueSection.localized = False + sectionsToAcquire[0].localized = True + logger.info('Section is now localized. Manually adjust the focus then press ManualTissue button again.') + elif len(nextTissueSectionToAcquire) == 1: + theNextTissueSectionToAcquire = nextTissueSectionToAcquire[0] + logger.info('Section ' + str(theNextTissueSectionToAcquire.index) + ' is localized. Acquiring ...') + theNextTissueSectionToAcquire.acquire(mag = False, manualFocus = manualFocus) # it will update the acquireFinished flag + logger.info('Section ' + str(theNextTissueSectionToAcquire.index) + ' acquired.') + + sectionsToAcquire = filter(lambda x: (not x.acquireFinished), self.sections) # this is the new list + if len(sectionsToAcquire) == 0: + logger.info('Good, all tissue sections have been acquired.') + else: + nextTissueSectionToAcquire = [sectionsToAcquire[0]] # enlisting for naming consistency + + nextTissueSectionToAcquire[0].moveToSection() + theNextTissueSectionToAcquire.localized = False # delocalize the previous section + nextTissueSectionToAcquire[0].localized = True + logger.info('Moved to section ' + str(nextTissueSectionToAcquire[0].index) + '. Manually adjust the focus and press the button again.') + else: + logger.error('It cannot be. Go tell Thomas what a bozo he is.') + + # closeShutter() + self.save() + logger.debug('All tissue sections have been acquired. Looking for focus outliers ...') # outdated as localImaging will be better + + def createSectionsFromPipeline(self): + self.sections = [] + for idSection, targetTissue in enumerate(self.targetTissues): # xxx it should be targetROICenters instead of targetTissues + section = Section([idSection, [targetTissue[0], targetTissue[1]], targetTissue[2], self.ip, self.folderWaferSave]) + self.sections.append(section) + logger.debug('Tissue sections have been created') + + def createMagSectionsFromPipeline(self): + self.magSections = [] + for idSection, targetMagCenter in enumerate(self.targetMagCenters): + magSection = Section([idSection, [targetMagCenter[0], targetMagCenter[1]], targetMagCenter[2], self.ip, self.folderWaferSave], mag = True) # targetMagCenter[2] comes from the interpolation of the targetMagFocus (manually focused beads) + self.magSections.append(magSection) + logger.debug('Mag sections have been created') + + def magAcquire(self, manualFocus = False): + if not manualFocus: + logger.debug('Starting acquisition of all mag sections with hardware autofocus for order retrieval') + mmc.setAutoShutter(False) + + magSectionsToAcquire = filter(lambda x: (not x.acquireMagFinished), self.magSections) # I could have TSP ordered the sections earlier ... + for id, magSection in enumerate(magSectionsToAcquire): # xxx should add a filter for only sections not acquired + logger.info('Ordering: moving to section number ' + str(magSection.index) + '(' + str(id) + ')') + magSection.acquire(mag = True) + self.save() # to keep track of which sections were scanned + # closeShutter() + self.save() + logger.debug('All mag sections have been acquired.') + else: + logger.debug('Acquire mag with manual focus') + mmc.setAutoShutter(False) + magSectionsToAcquire = filter(lambda x: (not x.acquireMagFinished), self.magSections) # I could have TSP ordered the sections earlier ... + + # I am using 2 flags: localized (is the section currently centered) and acquireMagFinished (has the section already been acquired) + if len(magSectionsToAcquire) == 0: + logger.info('All mag sections have already been acquired.') + else: + logger.info(str(len(magSectionsToAcquire)) + ' mag sections remaining to be acquired') + nextMagSectionToAcquire = filter(lambda x: x.localized, magSectionsToAcquire) + + if len(nextMagSectionToAcquire) == 0: + logger.info('Currently not centered on a section. Moving to a section ...') + logger.info('Moving to section number ' + str(magSectionsToAcquire[0].index) ) + magSectionsToAcquire[0].moveToSection() + # updating the localization flag (false for all except for the current one) + for magSection in wafer.magSections: + magSection.localized = False + magSectionsToAcquire[0].localized = True + logger.info('Section is now localized. Manually adjust the focus then press ManualMag button again.') + elif len(nextMagSectionToAcquire) == 1: + logger.info('Section ' + str(nextMagSectionToAcquire[0].index) + ' is localized. Acquiring ...') + nextMagSectionToAcquire[0].acquire(mag = True, HAF = False) # it will update the acquireMagFinished flag + logger.info('Section ' + str(nextMagSectionToAcquire[0].index) + ' acquired.') + self.save() + + magSectionsToAcquire = filter(lambda x: (not x.acquireMagFinished), self.magSections) # this is the new list + if len(magSectionsToAcquire) == 0: + logger.info('Good, all mag sections have been acquired.') + else: + nextMagSectionToAcquire = [magSectionsToAcquire[0]] # enlisting for naming consistency + + nextMagSectionToAcquire[0].moveToSection() + nextMagSectionToAcquire[0].localized = True + logger.info('Moved to section ' + str(nextMagSectionToAcquire[0].index) + '. Manually adjust the focus and press the button again.') + else: + logger.error('It cannot be. Go tell Thomas what a bozo he is.') + + # closeShutter() + self.save() + logger.debug('All mag sections have been acquired.') + +class Section(object): + def __init__(self, args, angle = 0, mag = False): + self.index = args[0] + self.center = args[1] + self.startingZ = args[2] # given by the interpolative plane + self.ip = args[3] # MosaicParameters + self.folderWaferSave = args[4] + self.angle = angle + self.imagingCoordinates = {} + if mag: + self.imagingCoordinates['tiles'] = transformCoordinates(self.ip.templateTileCoordinatesMag[0], self.center, self.angle) + self.imagingCoordinates['autofocus'] = transformCoordinates(self.ip.templateTileCoordinatesMag[1], self.center, self.angle) + else: + self.imagingCoordinates['tiles'] = transformCoordinates(self.ip.templateTileCoordinates[0], self.center, self.angle) + self.imagingCoordinates['autofocus'] = transformCoordinates(self.ip.templateTileCoordinates[1], self.center, self.angle) + + self.focusedPoints = [] + self.acquireStarted = False + self.acquireFinished = False + self.acquireMagStarted = False + self.acquireMagFinished = False + self.currentZ = self.startingZ + self.startingTile = 0 # for after interruptions + self.folderSectionSave = os.path.join(self.folderWaferSave, 'section_' + str(self.index).zfill(4)) + self.startingTime = -1 + self.finishingTime = -1 + self.focusedMagZ = -1 + self.focusScore = -99 + + self.localized = False # for manual focus: flag that tells whether the stage is currently in position on that section + + def acquire(self, mag = False, manualFocus = False): + self.startingTime = time.time() + mkdir_p(self.folderSectionSave) + logger.info('Scanning: Section ' + str(self.index)) + self.moveToSection() + setZ(self.startingZ) + closeShutter() + if mag: + self.acquireMagStarted = True + # self.focusedMagZ, self.focusScore = beadAutofocus() # careful, beadAutofocus is changing the channel + if not manualFocus: + self.focusedMagZ = autofocus() + else: + self.focusedMagZ = getZ() + for idTile, point in enumerate(self.imagingCoordinates['tiles']): + logger.debug('Scanning tile ' + str(idTile)) + setXY(*point[:2]) + if not manualFocus: + autofocus() + tileZ = getZ() +# openShutter() # not necessary on Nikon +# time.sleep(0.1) + for channel in channels['imaging']['beads']: + setZ(tileZ + channels[channel][objectiveBeads]['offset']['imaging']['beads']) + setChannel(channel) + setExposure(channels[channel][objectiveBeads]['exposure']['imaging']['beads']) +# time.sleep(0.1) + takeImage(self.index, self.ip.idsMag[idTile], self.folderSectionSave, name = 'mag') + closeShutter() + closeShutter() + self.acquireMagFinished = True + + else: + self.acquireStarted = True + if not manualFocus: + autofocus() + self.focusedZ = getZ() # the focus of the section + for idTile, point in enumerate(self.imagingCoordinates['tiles']): + logger.debug('Scanning tile ' + str(idTile)) + setXY(*point[:2]) + if not manualFocus: + autofocus() + tileFocus = getZ() # the focus of the tile + openShutter() + # time.sleep(0.1) + for channel in channels['imaging']['tissue']: + setChannel(channel) + setExposure(channels[channel][objectiveTissue]['exposure']['imaging']['tissue']) + currentOffset = channels[channel][objectiveTissue]['offset']['imaging']['tissue'] + if currentOffset != 0: + setZ(tileFocus + currentOffset) + takeImage(self.index, self.ip.ids[idTile], self.folderSectionSave, name = 'tissue') + if currentOffset != 0: + setZ(tileFocus) # ideally would take into account what is the next offset, but annoying and not crucial ... + + self.finishingTime = time.time() + logger.debug('Section ' + str(self.index) + ' acquired. It has taken ' + str((self.finishingTime - self.startingTime)/60.) + ' min.' ) + self.acquireFinished = True + logger.debug('Section ' + str(self.index) + ' has been acquired') + + + def computeZPlane(self): + self.focusedPoints = [] # this is needed for after interruptions: the focused points should be cleared + setChannel('brightfield') + + for idPoint, autofocusPosition in enumerate(self.imagingCoordinates['autofocus']): + logger.debug('Autofocusing of point number ' + str(idPoint) + ' in Tile number ' + str(self.index)) + setXY(autofocusPosition[0], autofocusPosition[1]) + openShutter() + # # # if (idPoint == 0): # not possible because the filter wheels are not motorized ? + # # # self.getRoughFocus() + focusedZ = autofocus() + self.focusedPoints.append([autofocusPosition[0], autofocusPosition[1], focusedZ]) + closeShutter() + self.imagingCoordinates['tiles'] = focusThePoints(np.array(self.focusedPoints).T, self.imagingCoordinates['tiles'].T).T + + logger.debug('The imaging coordinates will be ' + str(self.imagingCoordinates['tiles'])) + logger.info('Interpolative plane calculated for Section number ' + str(self.index)) + + def scanTiles(self): + tilesToScan = range(self.startingTile, len(self.imagingCoordinates['tiles']), 1) # for restart after interruption + for idTile in tilesToScan: + tileCoordinates = self.imagingCoordinates['tiles'][idTile] + logger.debug('Scanning: Section ' + str(self.index) + ' - Tile ' + str(idTile) ) + setXY(tileCoordinates[0], tileCoordinates[1]) + if mic == 'Leica': + mmc.fullfocus() + + for channel in channels['imaging']['tissue']: + setChannel(channel) + setExposure(self.ip.channels[channel][objectiveTissue]['exposure']['imaging']['tissue']) + + currentZ = getZ() # xxx this is wrong no !? + setZ(currentZ + self.ip.channels[channel][objectiveTissue]['offset']) + + logger.info('Scanning: Section ' + str(self.index) + ' - Tile ' + str(idTile) + ' - Channel ' + str(channel)) + if idTile == tilesToScan[0]: + openShutter() + takeImage(self.index, self.ip.ids[idTile], self.folderSectionSave) + self.startingTile = idTile + 1 + if idTile == tilesToScan[-1]: + closeShutter() + + def moveToSection(self): + setXY(self.center[0], self.center[1]) + logger.debug('Moved to center of section ' + str(self.index)) + +################################# +# Some focus function utils +################################# +def testAutofocus(): + allAutofocus = [] + focusedZ = getZ() + outOfFocusFactor = 30 + + ## for i in range(10): + # # outZ = focusedZ + outOfFocusFactor * random.random() * random.choice([1, -1]) + # # setZ(outZ) + # # autofocus() + # # allAutofocus.append([getZ(), outZ]) + # + ## meanOffset = focusedZ - np.mean([f[0] for f in allAutofocus]) + + for i in range(5): + z = [] + elapsedTime = [] + outZ = focusedZ + outOfFocusFactor * random.random() * random.choice([1, -1]) + setZ(outZ) + startingTime = time.time() + # time.sleep(0.5) + autofocus() + elapsedTime.append(int(time.time() - startingTime)) + z.append(getZ()) + # if elapsedTime[0] > 4: + # setZ(z[0] + 15) + # startingTime = time.time() + # time.sleep(0.5) + # autofocus() + # elapsedTime.append(int(time.time() - startingTime)) + # z.append(getZ()) + # if elapsedTime[1] > 4: + # setZ(z[0] - 15) + # startingTime = time.time() + # time.sleep(0.5) + # autofocus() + # elapsedTime.append(int(time.time() - startingTime)) + # z.append(getZ()) + allAutofocus.append([z, elapsedTime, outZ]) + # take and save image + + setChannel(488) + setExposure(channels[488][objectiveBeads]['exposure']['imaging']['beads']) + mmc.snapImage() + im = mmc.getImage() + result = PIL.Image.fromarray((im).astype(np.uint16)) + result.save(os.path.join(folderSave, 'autofocusTest_488_' + str(i).zfill(3)) + '.tif') + + setChannel(546) + setExposure(channels[546][objectiveBeads]['exposure']['imaging']['beads']) +# setZ(getZ() + channels[546][objectiveBeads]['offset']['imaging']['beads']) + setZ(getZ() + channels[546][objectiveTissue]['offset']['imaging']['tissue']) + mmc.snapImage() + im = mmc.getImage() + result = PIL.Image.fromarray((im).astype(np.uint16)) + result.save(os.path.join(folderSave, 'autofocusTest_546_' + str(i).zfill(3)) + '.tif') + + setChannel('brightfield') + setExposure(channels['brightfield'][objectiveBeads]['exposure']['imaging']['beads']) + setZ(getZ() + channels['brightfield'][objectiveTissue]['offset']['imaging']['tissue']) + mmc.snapImage() + im = mmc.getImage() + result = PIL.Image.fromarray((im).astype(np.uint16)) + result.save(os.path.join(folderSave, 'autofocusTest_BF_' + str(i).zfill(3)) + '.tif') + + closeShutter() + meanOffset = focusedZ - np.mean([f[0][-1] for f in allAutofocus]) + return allAutofocus + +def findRedFocus(): + altitudes = np.arange( 1494, 1496 , 0.2) + for altitude in altitudes: + logger.info(str(altitude)) + setExposure(channels[647][objectiveTissue]['exposure']['imaging']['tissue']) + setZ(altitude) + setChannel(647) + mmc.snapImage() + closeShutter() + + im = mmc.getImage() + result = PIL.Image.fromarray((im).astype(np.uint16)) + result.save(os.path.join(folderSave, 'findFocus_647_' + str(altitude).zfill(6)) + '.tif') + + setExposure(channels['brightfield'][objectiveTissue]['exposure']['imaging']['tissue']) + setZ(altitude) + setChannel('brightfield') + mmc.snapImage() + closeShutter() + im = mmc.getImage() + result = PIL.Image.fromarray((im).astype(np.uint16)) + result.save(os.path.join(folderSave, 'findFocus_brightfield_' + str(altitude).zfill(6)) + '.tif') + closeShutter() + +################################# +# Some benchmarking of microscope components +################################# +def testNikonBodyFilterSpeed(): + startingTime = time.time() + + for i in range(20): + mmc.setProperty('TIFilterBlock1', 'Label', '3-FRAP') + mmc.waitForDevice('TIFilterBlock1') + mmc.setProperty('TIFilterBlock1', 'Label', '2-Quad') + mmc.waitForDevice('TIFilterBlock1') + + print 'average cycle ', int(time.time() - startingTime)/float(i) + +def testNikonPeterFilterWheel(): + channels = ['brightfield','dapi', 488, 546, 647] + for channel in channels: + setChannel(channel) + + +if __name__ == '__main__': + + ####################### + ### Initializations ### + + # starting the thread that saves the images that are inserted into the queue + savingQueue = Queue() + saverThread = threading.Thread(target= saver, args= (savingQueue,) ) # the comma is important to make it a tuple + saverThread.start() + + # initializing logger + logPath = os.path.join(folderSave, 'log_' + waferName + '.txt') + logger = initLogger(logPath) + logger.info('Logger started.') + logger.info('Wafer name : ' + str(waferName)) + logger.info('Saving path : ' + str(folderSave)) + + # Micromanager core + mmc = MMCorePy.CMMCore() + logger.info('Micromanager version and API version: ' + str(mmc.getVersionInfo()) + ' , ' + str(mmc.getAPIVersionInfo())) + mmc.enableStderrLog(False) + mmc.enableDebugLog(True) + mmc.setPrimaryLogFile(os.path.join(folderSave, 'logMMC_' + waferName + '.txt')) + mmc.loadSystemConfiguration(mmConfigFile) + + # all initial properties from a specific microscope + for initialProperty in initialProperties: + mmc.setProperty(initialProperty[0], initialProperty[1], initialProperty[2]) + + # Initializing main microscope devices + camera = mmc.getCameraDevice() + shutter = mmc.getShutterDevice() + zStage = mmc.getFocusDevice() + stage = mmc.getXYStageDevice() + imageSize_px = getImageSize() + try: + autofocusDevice = mmc.getAutoFocusDevice() + except Exception, e: + logger.error('Error: hardware autofocus could not be loaded') + + # deactivate autoshutter + mmc.setAutoShutter(False) + + # what is the current objective + currentObjectiveName = mmc.getProperty(objectiveProperties[0], objectiveProperties[1]) + + if '20x' in currentObjectiveName: + currentObjectiveNumber = 20 + elif '60x' in currentObjectiveName: # oil + currentObjectiveNumber = 63 + + ### Create mosaic parameters ### + overlap_pct = np.array([overlap, overlap]) + overlap_pctMag = np.array([overlapMag, overlapMag]) + ip = ImagingParameters(channels, tileGrid, overlap_pct, objectiveTissue, tileGridMag, overlap_pctMag, objectiveBeads) + + ############# + ### Start ### + root = Toplevel() + wafer = Wafer(waferName, ip) + app = App(root) + root.mainloop() \ No newline at end of file diff --git a/License.txt b/License.txt new file mode 100644 index 0000000..9f81458 --- /dev/null +++ b/License.txt @@ -0,0 +1,560 @@ +Copyright (C) 2018 Thomas Templier + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. \ No newline at end of file diff --git a/MagC.py b/MagC.py new file mode 100644 index 0000000..9b6e75b --- /dev/null +++ b/MagC.py @@ -0,0 +1,223 @@ +# orchestrator script that launches all scripts for MagC +import Tkinter, tkFileDialog +import os, sys, time +import shutil +import subprocess +import signal +from subprocess import call, Popen +import argparse +import platform + +def getDirectory(text): + root = Tkinter.Tk() + root.withdraw() + path = tkFileDialog.askdirectory(title = text) + os.sep + path = os.path.join(path, '') + return path + +def askFile(*args): + root = Tkinter.Tk() + root.withdraw() + if len(args) == 1: + path = tkFileDialog.askopenfilename(title = args[0]) + else: + path = tkFileDialog.askopenfilename(title = args[0], initialdir = args[1]) + return path + +def whereAmI(): + path = os.path.dirname(os.path.realpath(__file__)) + return os.path.join(path, '') + +def whereIs(item, itemType, displayText, MagCScriptsFolder, isNew): + storedItemPath = os.path.join(MagCScriptsFolder , 'whereIs' + item + '.txt') + try: + if isNew: + raise IOError + with open(storedItemPath , 'r') as f: + itemPath = f.readline() + if itemType == 'file' and not os.path.isfile(itemPath): + raise IOError + except IOError: + print 'I do not know where ' + item + ' is' + if itemType == 'file': + itemPath = askFile(displayText) + elif itemType == 'folder': + itemPath = getDirectory(displayText) + with open(storedItemPath, 'w') as f: + f.write(itemPath) + return itemPath + +def init(): + + parser = argparse.ArgumentParser() + parser.add_argument('-p', default='', help = '"new" (to trigger a dialog to enter the path of the main MagC folder) OR The path to the parent folder that contains all the MagC data') + parser.add_argument('-f', default='', help = '"new" (to trigger a dialog to enter the path to the Fiji executable) OR The path to the Fiji executable') + args = parser.parse_args() + + MagCScriptsFolder = whereAmI() + # get the fiji Path + if args.p == '' or args.p == 'new': + fiji8Path = whereIs('Fiji8', 'file', 'Please select the *** JAVA 8 *** Fiji', MagCScriptsFolder, args.p == 'new') + else: + fiji8Path = os.path.normPath(args.p) # broken because of 2 fiji + + if args.p == '' or args.p == 'new': + fiji6Path = whereIs('Fiji6', 'file', 'Please select the *** JAVA 6 *** Fiji', MagCScriptsFolder, args.p == 'new') + else: + fiji6Path = os.path.normPath(args.p) # broken because of 2 fiji + + # plugins folder based on fiji path + fijiPluginsFolders = [os.path.join (os.path.split(fijiPath)[0], 'plugins','') for fijiPath in [fiji8Path, fiji6Path]] + + # copy all the scripts into the plugins folders of Fiji + for fijiPluginsFolder in fijiPluginsFolders: + for root, dirs, files in os.walk(MagCScriptsFolder): + for file in filter(lambda x: x.endswith('.py'), files): + shutil.copy(os.path.join(root, file), fijiPluginsFolder) + + # get the MagCFolder path + if args.f == '' or args.f == 'new': + MagCFolder = whereIs('MagCFolder', 'folder', 'Please select the MagC folder', MagCScriptsFolder, args.f == 'new') + else: + MagCFolder = os.path.join(os.path.normPath(args.f),'') + + # If the MagC_Parameters file is not there, then add the standard one from the repo + MagCParamPath = findFilesFromTags(MagCFolder, ['MagC_Parameters']) + if len(MagCParamPath) == 0: # MagC_Parameters is not in the data folder + shutil.copy(os.path.join(MagCScriptsFolder, 'MagC_Parameters.txt'), MagCFolder) + + return MagCFolder, fiji8Path, fiji6Path + +def findImageryFolder(MagCFolder, modality): + ImageryPath = '' + for (dirpath, dirnames, filenames) in os.walk(MagCFolder): + for filename in filenames: + if filename.endswith ( '.zva' * (modality == 'LM') + '.ve-asf' * (modality == 'EM')): + ImageryPath = dirpath + return cleanPathForFijiCall(ImageryPath) + +def cleanPathForFijiCall(path): +# the path here is provided as an argument to a Fiji script. The path has to be handled differently if it is a folder or a file path so that Fiji understands it well. + path = os.path.normpath(path) + if not os.path.isfile(path): + path = os.path.join(path, '') + path = path.replace(os.sep, os.sep + os.sep) + return path + +def runFijiScript(plugin): + fijiFlag = plugin[1] + plugin = plugin[0] + + repeat = True + signalingPath = os.path.join(MagCFolder, 'signalingFile_' + plugin.replace(' ', '_') + '.txt') + print 'signalingPath', signalingPath + plugin = "'" + plugin + "'" + arguments = cleanPathForFijiCall(MagCFolder) + while repeat: + print 'running plugin ', plugin, ' : ', str(time.strftime('%Y%m%d-%H%M%S')) + + # print ' with arguments ', arguments + # command = fijiPath + ' -eval ' + '"run(' + plugin + ",'" + arguments + "'" + + if fijiFlag == 0: + fijiPath = fiji8Path + else: + fijiPath = fiji6Path + + command = fijiPath + ' -eval ' + '"run(' + plugin + ",'" + arguments + "'" + ')"' + print 'command', command + if platform.system() == 'Linux': + p = subprocess.Popen(command, shell=True, preexec_fn = os.setsid) # do not use stdout = ... otherwise it hangs + else: + p = subprocess.Popen(command, shell=True) # do not use stdout = ... otherwise it hangs + # result = subprocess.call(command, shell=True) + + # print 'subprocess', p + + waitingForPlugin = True + while waitingForPlugin: + # print 'waitingForPlugin' + if os.path.isfile(signalingPath): + time.sleep(2) + with open(signalingPath, 'r') as f: + line = f.readlines()[0] + if line == 'kill me': + if platform.system() == 'Linux': + #p.terminate() + os.killpg(os.getpgid(p.pid), signal.SIGTERM) + else: # what else ? + subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)]) + print plugin , ' has run successfully: ', str(time.strftime('%Y%m%d-%H%M%S')) + repeat = False + elif line == 'kill me and rerun me': + if platform.system() == 'Linux': + #p.terminate() + os.killpg(os.getpgid(p.pid), signal.SIGTERM) + else: # what else ? + subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)]) + print plugin , ' has run successfully and needs to be rerun ', str(time.strftime('%Y%m%d-%H%M%S')) + else: + print '********************* ERROR' + print 'signalingPath from MagC', signalingPath + os.remove(signalingPath) + waitingForPlugin = False + time.sleep(1) + + # # # if result == 0: + # # # print 'result',result + # # # print plugin , ' has run successfully: ', str(time.strftime('%Y%m%d-%H%M%S')) + # # # repeat = False + # # # elif result == 2: + # # # print plugin , ' has run successfully and needs to be rerun ', str(time.strftime('%Y%m%d-%H%M%S')) + # # # else: + # # # print plugin, ' has failed' + # # # sys.exit(1) + +def findFilesFromTags(folder,tags): + filePaths = [] + for (dirpath, dirnames, filenames) in os.walk(folder): + for filename in filenames: + if (all(map(lambda x:x in filename,tags)) == True): + path = os.path.join(dirpath, filename) + filePaths.append(path) + return filePaths + +############################################################# +# Script starts here +############################################################# + +MagCFolder, fiji8Path, fiji6Path = init() + +pipeline = [ +['preprocess ForPipeline', 0], + +### LM ### + +['assembly LM', 0], +['montage LM', 0], +['alignRigid LM', 0], +['export LMChannels', 0], + +### EM ### + +['init EM', 0], +['downsample EM', 0], +['assembly lowEM', 0], +['assembly EM', 0], +['montage ElasticEM', 1], # fails in java8 +['export stitchedEMForAlignment', 0], +['reorder postElasticMontage', 0], +['alignRigid EM', 0], +['alignElastic EM', 0], +['export alignedEMForRegistration', 0], + +### LM-EM registration### + +['compute RegistrationMovingLeastSquares', 1], #fiji8 fails to save MLS transforms +['export TransformedCroppedLM', 0], +['assembly LMProjects', 1], #java8 fails to apply coordinateTransforms + +] + +for step in pipeline: + runFijiScript(step) \ No newline at end of file diff --git a/MagC_Parameters.txt b/MagC_Parameters.txt new file mode 100644 index 0000000..b9f26e5 --- /dev/null +++ b/MagC_Parameters.txt @@ -0,0 +1,108 @@ +# MagC parameters +# Lines that start with a single '#' are comment lines +# lines that start with a '##### Plugin' indicate to which plugin the parameters below this line belong +# Lines that do not start with a '#' are parameter lines and should have the following form: 'param = 36' or 'd = 'thePath' or "theList = [3, 6, 'baguette']" .' + +############################## +##### Plugin preprocess_ForPipeline ##### +############################## +# execute or not the LM and EM parts (1 - True, 0 - False) +executeLM = 1 +executeEM = 1 + +############################## +##### Plugin assembly_LM ##### +############################## +# Overlap of the LM patches in the mosaics +overlap = 0.1 + +# size of the neighborhood for local contrast for the brightfield channel +normLocalContrastSize = 50 + +# unique identifier for the reference LM channel used for LM alignment and LM/EM registration. The identifier must be contained in the name of the reference channel but not in any other channel +refChannelIdentifier = field + +# for contrasting the fluo channels +normLocalContrastSizeFluo = 2000 + +# for thresholding the fluo channels +minMaxFluo = [15000,65000] + +# flip horizontally the LM tiles +flipHorizontally = 1 + +########################################### +##### Plugin montage_LM ##### +########################################### +# mosaic dimension of tiles +mosaic = [2,2] + +########################################### +##### Plugin alignRigid_LM ##### +########################################### +executeAlignment = 0 + +# e.g. 0.5, use only the center part of the layer to compute alignment: 0.5 divides the x and y dimensions by 2 +boxFactor = 0.5 + +#################################### +##### Plugin export_LMChannels ##### +#################################### +# assembled LM mosaics will be saved to files with the following scale factors +# /!\ Scalefactor 1 should always be included, as these images are used for LM/EM registration +scaleFactors = [0.1,1] + +#################################### +##### Plugin downsample_EM ##### +#################################### +downsamplingFactor = 20 +nTilesAtATime = 400 +normLocalContrastSize = 500 + +########################################### +##### Plugin montage_Translation ##### +########################################### +# Number of layers to montage by each Fiji instance +nLayersAtATime = 2 + +########################################### +##### Plugin montage_ElasticEM ##### +########################################### +# Number of layers to montage by each Fiji instance +nLayersAtATime = 40 +nThreads = 20 + +########################################### +##### Plugin export_stitchedEMForAlignment ##### +########################################### +# Number of layers to montage by each Fiji instance +nLayersAtATime = 50 +nThreads = 10 + +########################################### +##### Plugin alignElastic_EM ##### +########################################### +# Number of layers to montage by each Fiji instance +layerOverlap = 1 +nLayersAtATime = 30 + +########################################### +##### Plugin export_alignedEMForRegistration ##### +########################################### +nLayersAtATime = 40 +nThreads = 10 + +########################################### +##### Plugin compute_RegistrationMovingLeastSquares ##### +########################################### +# rejection threshold for the mean displacement of the transforms for the low and high resolution steps, respectively +matchingThreshold = [10,10] + +# number of octaves in the SIFT features search at the low and high resolution steps, respectively +nOctaves = [1,4] + +# percentage of cropping the EM box for registration. Be careful, the cropped boundaries are then "lost" +cropBoxPercent = 0.1 + +# on the 8-core computer (zarathustra), more than 6 makes problems when RVS is saving the images (there is a suspicious IJ.openImage in the RVS code ...) +nLayersAtATime = 20 \ No newline at end of file diff --git a/Motor.py b/Motor.py new file mode 100644 index 0000000..96e0b31 --- /dev/null +++ b/Motor.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +""" +Example code showing how to control Thorlabs TDC Motors using PyAPT +V1.2 +Michael Leung +mcleung@stanford.edu +""" +from PyAPT import APTMotor +import time + +def left(d, v = 0.3): + motVel = v #motor velocity, in mm/sec + Motor2.mcRel(d, motVel) # Negative Right / Positive Left + +def right(d, v = 0.3): + motVel = v #motor velocity, in mm/sec + Motor2.mcRel(-d, motVel) # Negative Right / Positive Left + +def up(d, v = 0.3): + motVel = v #motor velocity, in mm/sec + Motor1.mcRel(d, motVel) # negative me / positive knife + +def down(d, v = 0.3): + motVel = v #motor velocity, in mm/sec + Motor1.mcRel(-d, motVel) # negative me / positive knife + +Motor1 = APTMotor(45844773, HWTYPE=42) +Motor2 = APTMotor(45844576, HWTYPE=42) + +# print 'Motor1', Motor1.getPos() +# time.sleep(0.1) +# print 'Motor2', Motor2.getPos() + +# offset1 = 0 +# offset2 = 0 + +# print 'Motor1', Motor1.getStageAxisInformation() +# Motor1.setStageAxisInformation(offset1, offset1 + 145) +# print 'Motor1', Motor1.getStageAxisInformation() + +# print 'Motor2', Motor2.getStageAxisInformation() +# Motor2.setStageAxisInformation(offset2, offset2 + 145) +# print 'Motor2', Motor2.getStageAxisInformation() + +###################################################################### + +# down(5, v = 0.3) +# up(5, v = 0.3) +# left(5, v = 0.3) +right(40, v = 0.2) + +###################################################################### + +Motor1.cleanUpAPT() +Motor2.cleanUpAPT() \ No newline at end of file diff --git a/Readme.md b/Readme.md new file mode 100644 index 0000000..4ef42c5 --- /dev/null +++ b/Readme.md @@ -0,0 +1,289 @@ +# Platforms +Windows - for all steps except Linux for after LM-EM registration (for exports to render and Neuroglancer) +Linux - should work too but currently untested (probably to adjust: call to concorde linkern for reordering, maybe add some fc.cleanLinuxPath to problematic paths with trakEM) + +# Installation +Download Fiji - java8 +Download Fiji - java6 (needed because some components currently broken in the java 8 version, e.g., elastic montage and moving least squares transforms in trakEM2) +Place the file fijiCommon.py in the 'plugins' folder of Fiji: it is a library of helpful functions. +Python 2 for everything until final data export in linux - Typically install with anaconda +Python 3 for the data export in linux +Git for windows recommended to make command line calls +The software Concorde for solving traveling salesman problems. On Windows, download [linkern-cygwin](http://www.math.uwaterloo.ca/tsp/concorde/downloads/downloads.htm) and place both linkern.exe and cygwin1.dll in this locally cloned repository. + + +# Imaging + +## LM Imaging +### Wafer overview for section segmentation + +No scripts were used for the acquisition of low magnification (5x) brightfield imagery of wafers. Using the software of my microscope (ZEN) I acquired DAPI and brightfield mosaics of the wafer. Assemble the obtained data in a folder: + +``` +AllImages +│ProjectName_Fluo_b0s0c0x22486-1388y7488-1040m170.tif +│... (all tiles from the Fluo DAPI channel) +│ +│ProjectName_BF_b0s0c0x22486-1388y7488-1040m170.tif +│...(all tiles from the Brightfield channel) +│ +│Mosaic_Metadata.xml (the mosaic metadata file written by ZEN, rename it with this exact name) +``` +This part of the name "b0s0c0x22486-1388y7488-1040m170" comes from ZEN and cannot be controlled. Any 'ProjectName' is ok, but ```_Fluo_``` and ```_BF_``` must be present to indicate which channel is which. + +If you are not using ZEN, then adjust the code to use your own mosaic format. + +### Fluorescent imaging of beads for section order retrieval +After following the section segmentation step explained later, you will have the folder 'preImaging' containing + +- images showing the locations of the landmarks: these images help navigating when setting up the landmarks at the light and electron microscopes +- text files with + - the locations of the corners of the magnetic portion of the sections + - the locations of the corners of the tissue portion of the sections + - the locations of the landmarks + - the locations of the corners of the tissue and of the magnetic portion of a reference section + - the locations of the corners of the tissue portions and of a region of interest relative to that tissue portion (optional) + +Configure your microscope to be usable with Micromanager. In LM_Imaging.py, adjust the paths of the configuration at the beginning of the script. The current configuration is suited for the Nikon BC Nikon2 G-Floor of Peter's lab at ETHZ. + +To use with another microscope, you will probably need to adjust names of some components (e.g., 'Objective-Turret' might have another name on another microscope, etc.) + +#### Setup +- Load the wafer and set the 20x objective. +- In LM_Imaging.py, adjust experiment parameters such as, waferName, channels, mosaic size. These parameters are at the beginning of the script. + +#### Calibrate landmarks +- Run LM_imaging.py with spyder. In the GUI, click on the button "loadSectionsAndLandmarksFromPipeline" and select the 'preImaging' folder containing the section coordinates. +- Click on "Live BF" to activate live brightfield imaging. Using the overview images from the 'preImaging' folder, locate the first landmark on wafer (red cross) by moving the stage. Once the landmark is centered on the central cross in the field of view, press 'Add lowres landmark'. +- Navigate to the second landmark and press again 'Add lowres landmark'. This button press has triggered the movement of the stage to the 3rd landmark: adjust it to the center and press 'Add lowres landmark' again. +- The last button press has triggered again a stage movement. The stage is now centered on the 4th landmark: adjust and press again the 'Add lowres landmark'. All four landmarks are now calibrated (message updates are given in the console). If you had defined more than 4 landmarks in the wafer segmentation pipeline, then you would need to adjust more landarks similarly. Only the first two landmarks need to be adjusted manually. + +Warning: when using with another microscope, make sure that the axes are not flipped and that there is no scaling different between the x and y axes (e.g., the confocal Visitron at ETHZ has a factor 3.x between x and y axes, and the y axis is flipped). Adjust the getXY and setXY functions accordingly (e.g., y = -y, x = x * 1/3.), etc.). + +- To verify the position of the landmarks, now click "Add highres landmark": it will move to the first landmark. Adjust the landmark if needed, then press again 'Add highres landmark' and so on until the last (typically 4th) landmark is calibrated. The landmarks are now calibrated. + +This "Add highres landmarks" procedure is actually useful when calibrating with 20x without oil and then calibrating with a higher-magnification oil objective (typically done for imaging immunostained sections at high resolution). + +After successful calibration, a file "target_highres_landmarks.txt" has been added in the preImaging folder. Keep it for further processing in the section reordering part of the pipeline (this file helps orienting correctly the acquired images). + +#### Calibrate hardware autofocus +To calibrate the hardware autofocus (HAF) +- start live imaging with a fluorescent channel with which beads are visible (e.g. "Live Green" for 488) +- locate a patch of fluorescent beads +- press the button "ToggleNikonHAF" (you hear a beep from the hardware autofocus) +- adjust the focus with the wheel of the HAF +- press again "ToggleNikonHAF". + +If there is a focus offset between different channels, then adjust the offset values at the beginning of the script. These values are already calibrated for the ETHZ Nikon microscope. + +#### Fluorescent bead acquisition +If you had stopped the GUI, you can restart it by rerunning the script and loading the wafer file that had been automatically saved when calibrating the landmarks (button "load wafer"). The wafer file is in the saveFolder defined in the script. Press the button "Acquire mag HAF" to start the automated acquisition of the bead imagery. + +### Fluorescent imaging for immunostained tissue + +#### Setup +Load the wafer (which has mounting medium and a coverslip), set the 20x objective, and ensure that the sample holder is well anchored to one corner of the sample holder slot (so that you can remove and replace the sample holder at the same position without too much offset). + +#### Calibrate landmarks + +The calibration procedure is the same as described earlier for the imaging of beads. After successful calibration of the "low resolution" landmarks (with the 20x, and with the coverslip **without** immersion oil) +- remove the sample holder +- add immersion oil on the coverslip above the area with sections (ensure that no oil is touching the wafer, which would be a bad contamination) +- set the 63x oil objective +- place back the holder at the same location (make sure it touches well one of the corners the same way as you inserted it before adding immersion oil). + +Adjust manually the focus to make sure that the objective is well immersed in the oil, then press "Add highres landmark": it will move the stage to the first landmark. Adjust it and press the same button again, and so on until all landmarks are calibrated. + +#### Calibrate hardware autofocus +Same procedure as described earlier for the beads. + +#### Acquisition of fluorescently stained tissue +Press "Acquire tissue HAF" to start the automated acquisition. + +The ROI in each section is defined in the file "source_ROI_description" in the "preImaging" folder (created in the wafer segmentation part of the pipeline). This ROI description can also be created manually, it contains the coordinates of the four corners of a section (the tissue portion, x,y) and the four corners of the ROI (a,b) (tab-delimited). + +``` +x1,y1 x2,y2 x3,y3 x4,y4 +a1,b1 a2,b2 a3,b3 a4,b4 +``` + +If there is no "source_ROI_description" file, then the center of the tissue section is the center of the region acquired. + +## EM Imaging +The script EM_imaging.py was used with a Zeiss Merlin that controlled the microscope through the Zeiss API. + +#### Setup + +Load the wafer and adjust imaging parameters (brightness, contrast). These imaging parameters will not be changed during automated imaging and can be changed during the acquisition if needed. Adjust parameters at the bottom of the EM_imaging.py script (mosaic grid, tile size, scan speed, wafer name). + +#### Calibrate landmarks + +Run EM_Imaging.py with Spyder. Click "loadSectionsAndLandmarksFromPipeline" in the GUI and select the "preImaging" folder containing section coordinates. + +Locate the first landmark and center it in the field of view. Similarly as for the LM landmark calibration, repetitively press the same button and calibrate the other landmarks (the first two landmarks are calibrated manually, the following ones are precomputed and you simply need to adjust them). + +#### Automated acquisition + +Set the correct detector and start automated acquisition with "Acquire wafer". If you want to acquired only a subset of sections, press the "Acquire sub wafer" and then enter the indices of the sections in the spyder console then press enter. + +The acquisition of a wafer can be interrupted and restarted. The wafer file keeps track of which sections were already acquired. + +The acquisition of a specific ROI in the tissue section is determined the same way as for the LM described earlier, that is, by the text file with the coordinates of a reference tissue section and of a the relative ROI. + + +# Section segmentation + +Organize the sections in a folder like described earlier in the LM 'Wafer overview for section segmentation' paragraph. + +Adjust the root folder in the script of sectionSegmentation.py and run it with the Fiji script editor. Follow the instructions that will pop up during the processing. + +The output of this script is the folder "preImaging" that contains +- images showing the locations of the landmarks: these images help navigating when setting up the landmarks at the light and electron microscopes +- text files with + - the locations of the corners of the magnetic portion of the sections + - the locations of the corners of the tissue portion of the sections + - the locations of the landmarks + - the locations of the corners of the tissue and of the magnetic portion of a reference section + - the locations of the corners of the tissue portions and of a region of interest relative to that tissue portion (optional). + +# Section order retrieval with fluorescent beads + +Organize the fluorescent imagery of the beads acquired with the pipeline with the following format: + +``` +rootFolder +│└───preImaging (comes from section segmentation part) +│└───section_0000 +│ │section_0000_channel_488_tileId_00-00-mag.tif +│ │section_0000_channel_488_tileId_00-01-mag.tif +│ │... +│ │section_0000_channel_546_tileId_00-00-mag.tif +│ │... +│└───section_0001 +│└───... +│└───section_n +``` + +In the file SOR.py (Section Order Retrieval), adjust the inputFolder path to your rootFolder. + +Run the SOR.py script from the Fiji script editor. It will output the section order in the folder "calculations" with the name solution488-546.txt (using two fluorescent channels) or solution488.txt (using only one fluorescent channel). You can manually copy paste this file for the data assembly pipeline below. + +The script also outputs many trakemProjects that show reordered bead imagery at different stages of the processing and with all fluorescent channels. + +# CLEM Data assembly + +## Initial folder setup with input data +If you have used a different imaging pipeline than the one described above you should arrange your data with the following format: + +``` +YourMagCProjectFolder +│MagCParameters.txt +│solutionxxx.txt +│sectionOrder.txt +│LMEMFactor.txt +└───EMDataRaw +│ │SomeName_EM_Metadata.txt +│ └───section_0000 +│ │ │Tile_0-0.tif (Tile_x-y.tif) +│ │ │Tile_0-1.tif +│ │ │Tile_1-0.tif +│ │ │Tile_1-1.tif +│ └───section_0001 +│ └───... +│ └───section_n +│ +└───LMData +│ │xxx_LM_Meta_Data.txt +│ └───section_0000 +│ │ │section_0000_channel_488_tileId_00-00-tissue.tif +│ │ │section_0000_channel_488_tileId_00-01-tissue.tif +│ │ │... +│ │ │section_0000_channel_546_tileId_00-01-tissue.tif +│ │ │... +│ │ │section_0000_channel_brightfield_tileId_00-01-tissue.tif +│ └───section_0001 +│ └───... +│ └───section_n +``` +Description of the files above: +- MagCParameters.txt - if you do not put it yourself from the template in the repository, the default one will be added with default parameters. There are surely parameters that you need to adjust. +- solutionxxx.txt (e.g. solution488-546.txt, solution488.txt) - the section reordering solution computed from Concorde from the reordering pipeline using fluorescent beads +- sectionOrder.txt - indices of the sections in the correct order, one number per line; If this file does not exist, then it will be automatically generated from solutionxxx.txt, or it will be generated at the beginning of the EM data assembly pipeline using EM imagery +- LMEMFactor.txt - the magnification factor between LM and EM imagery (float, the file contains this single number). Typically around 7-13 for 60x magnification LM and about 10 nm EM pixel size. Typically measure the distance between 2 easily identifiable points in LM and EM and calculate the distance ratio in piixels. +- xxx_EM_Metadata.txt - created by EM_Imaging.py. If you do not use this script for EM imaging, look at the Example_EM_Metadata.txt to create this file yourself +- xxx_LM_Meta_Data.txt - created by LM_Imaging.py. If you do not use this script for LM imaging, look at the Example_LM_Meta_Data.txt to create this file yourself + +## Running the pipeline + +The pipeline consists of Fiji scripts that are called one after the other externally from the orchestrator python script MagC.py. You can run it directly from where you have cloned the repository. Upon first run it will open a GUI to ask the user to input: +- the location of the Fiji-java8 executable +- the location of the Fiji-java6 executable +- the location of YourMagCProjectFolder + +It will create three corresponding text files in the repository that store the three locations. If you want to change these, edit these files or remove them to trigger the GUI (the GUI does not pop up when these files are already present). + +## Scripts of the pipeline + +If you want to run only a part of the pipeline, comment out the steps in MagC.py + +Here is a brief description of what each script does in the pipeline. +### LM +- preprocess_ForPipeline - copy and reorder the LM sections. Copy EM sections. +- assembly_LM - preprocess the LM channels (local contrast enhancement, thresholding, and 8-biting). Creates the contrastedBrightfield channel used for alignmnent, stiching, and CLEM registration. Assemble the tiles of the reference brightfield channel in a trakem project according to LM metadata. +- montage_LM - use on of the montage plugins to montage the LM tiles (phase correlation from Cardona, least squares from Saalfeld, or the main Fiji stitching plugin from Preibisch) +- alignRigid_LM - align (with rigid transforms) the 3D stack (using the brightfield imagery). This alignment is not crucial. If it is faulty, set doAlignment in the parameters to 0. The alignment will anyway be redone during the CLEM registration. +- export_LMChannels - export to disk assembled sections from all channels + +### EM + +- init_EM - read metadata and initialize folder +- EM_Reorderer - performs sections order retrieval using EM imagery. Pairwise similarities between sections are calculated at the center of each tile of the mosaic grid (e.g. 2x2), and then averaged. +- downsample_EM - downsample and preprocess all tiles with local contrast normalization +- assembly_lowEM - assemble the downsampled tiles into a trakem project according to metadata (to determine tile position) followed by montaging with translations (using Fiji's stitching plugin by Preibisch et al.) +- assembly_EM - assemble a trakem project with original resolution using the transforms computed previously on low resolution data +- montage_ElasticEM - montage all tiles with elastic transforms +- export_stitchedEMForAlignment - downscale and export to file the stitched sections +- reorder_postElasticMontage - reorder projects and exported sections with the order provided in the sectionOrder.txt file (or solutionxxx.txt file if sectionOrder.txt not present) +- alignRigid_EM - rigidly align the low resolution EM stack and propagate the transforms to the high resolution project +- alignElastic_EM' - elastically align the EM stack at full resolution +- export_alignedEMForRegistration' - export all sections to file with the downscaling LMEMFactor so that the exported EM sections have roughly the same resolution as the LM imagery + +### LM-EM registration + +- compute_RegistrationMovingLeastSquares - compute the cross-modality moving least squares (MLS) LM-EM transforms +- export_TransformedCroppedLM - export to file affine transformed and cropped LM channels : these images can be transformed with the computed MLS transforms and upscaled to fit in the EM imagery +- assembly_LMProjects - create trakem projects containing the LM imagery transformed with the MLS transforms (not upscaled) + +# Export of assembled data (linux only) +## Install +In a folder, e.g. 'repos', clone the following repositories: +- this repo +- [render](https://github.com/saalfeldlab/render) from Saalfeld's lab + +Create a folder with the following data computed from the pipeline above: +``` +projects +└───project_yourProjectName + │ElasticaAlignedEMProject.xml (from the pipeline) + │LMProject_488.xml (from the pipeline) + │LMProject_546.xml (from the pipeline) + │LMProject_brightfield.xml (from the pipeline) + └───EMData (from the pipeline) + └───affineCropped_488 (from the pipeline) + └───affineCropped_546 (from the pipeline) + └───affineCropped_brightfield (from the pipeline) +``` +In trakemToNeuroglancer.py adjust the paths to the 'repo' folder and 'projects' folder. + +## Run +Run trakemToNeuroglancer.py. It will +- create separate render projects for the EM and for the LM channels +- render to file mipmaps from the EM and the LM channels +- create precomputed chunks for the EM and LM channels ready to be visualized with neuroglancer + + +# Section collection + +The script Motor.py allows control of a 2-axis manipulator (Thorlabs) using the [PyAPT library](https://github.com/mcleung/PyAPT) from Michael Leung. Follow instructions on the github page of the repo for installation. + +The script syringePump.py allows control of a syringe pump (KDScientific 200) for water infusion and withdrawal. diff --git a/SOR.py b/SOR.py new file mode 100644 index 0000000..9314532 --- /dev/null +++ b/SOR.py @@ -0,0 +1,2335 @@ +from __future__ import with_statement +import os, time, threading, pickle, shutil +from collections import Counter + +import java +from java.util import ArrayList, HashSet, Stack +from java.util.concurrent.atomic import AtomicInteger +from java.awt import Polygon, Rectangle, Color, Point +from java.awt.geom import Area, AffineTransform +from java.lang import Math, Runtime, Exception # to catch java exceptions +from java.lang import Float +from java.lang.reflect import Array +from java.lang.Math import hypot, sqrt, atan2, PI, abs +from Jama import Matrix +import jarray +from jarray import zeros, array + +import fijiCommon as fc + +import ij +from ij import IJ, ImagePlus +from ij.gui import OvalRoi +from ij.gui import Roi +from ij.plugin import ImageCalculator +from ij.plugin.frame import RoiManager +from ij.plugin.filter import MaximumFinder +from ij.plugin.filter import ParticleAnalyzer +from ij.process import FloatProcessor +from ij.measure import Measurements + +from java.lang import Double + +from mpicbg.models import RigidModel2D, AffineModel2D, PointMatch, NotEnoughDataPointsException +from mpicbg.ij import SIFT, FeatureTransform +from mpicbg.ij.plugin import NormalizeLocalContrast +from mpicbg.imagefeatures import FloatArray2DSIFT +from mpicbg.ij.util import Util +from mpicbg.imglib.image import ImagePlusAdapter +from mpicbg.imglib.algorithm.scalespace import DifferenceOfGaussianPeak +from mpicbg.imglib.algorithm.scalespace.DifferenceOfGaussian import SpecialPoint +from mpicbg.imglib.algorithm.correlation import CrossCorrelation +from mpicbg.imglib.type.numeric.integer import IntType + +from fiji.plugin.trackmate.detection import LogDetector + + +from net.imglib2.img.display.imagej import ImageJFunctions +# from net.imglib2 import Point +import net +from net.imglib2.algorithm.region.hypersphere import HyperSphere +from net.imglib2.img.imageplus import ImagePlusImgFactory +from net.imglib2.type.numeric.integer import UnsignedByteType + +from ini.trakem2 import Project +from ini.trakem2.display import AreaList, Patch, Display +from ini.trakem2.imaging import StitchingTEM, Blending +from ini.trakem2.imaging.StitchingTEM import PhaseCorrelationParam + +from process import Matching +from plugin import DescriptorParameters + +from bunwarpj.bUnwarpJ_ import computeTransformationBatch, elasticTransformImageMacro +from bunwarpj import Param + +#################### +# I/O operations and geometric utils +#################### +def matToList(m): + l = [] + for column in m: + l.append(list(column)) + return l + +def pickleSave(a, path): + f = open(path, 'w') + pickle.dump(a, f) + f.close() + +def pickleLoad(path): + f = open(path, 'r') + a = pickle.load(f) + f.close() + return a + +def inferTilingFromNames(names): + n = len(names) + maxX = max([getFieldFromName(os.path.basename(name), 'X') for name in names]) + 1 + return maxX, int(n/maxX) + +def getFieldFromName(name, field): + # patch_015_025_Edges.tif + result = None + if field == 'X': + result = os.path.splitext(name)[0].split('_')[2] + print name, field, os.path.splitext(name)[0].split('_') + elif field == 'Y': + result = os.path.splitext(name)[0].split('_')[3] + print field, result + return int(result) + +def readPoints(path): + points = [] + with open(path, 'r') as f: + lines = f.readlines() + for point in lines: + points.append(map(lambda x: int(float(x)), point.split('\t') )) + return points + +def getLength(p1,p2): + return sqrt((p1[0]-p2[0]) * (p1[0]-p2[0]) + (p1[1]-p2[1]) * (p1[1]-p2[1])) + +def getAngle(line): + diff = [line[0] - line[2], line[1] - line[3]] + theta = atan2(diff[1], diff[0]) + return theta + +def getEdgeLengths(corners): + edgeLengths = [] + nCorners = len(corners) + for i in range(nCorners): + if (None in corners[i]) or (None in corners[(i+1) % nCorners]) : + edgeLengths.append(None) + else: + edgeLengths.append(getLength(corners[i], corners[(i+1) % nCorners])) + return edgeLengths + +def getSectionsAngles(allSectionsCoordinates): + angles = [] + for sectionCorners in allSectionsCoordinates: + angle = getAngle([sectionCorners[0][0], sectionCorners[0][1], sectionCorners[1][0], sectionCorners[1][1]]) + angles.append(angle) + return angles + +def getNewMosaicSize(): + imPath = os.path.join(preprocessedFolder, filter(lambda x: os.path.splitext(x)[1] == '.tif', os.listdir(preprocessedFolder))[0]) + im = IJ.openImage(imPath) + im.close() + return im.getWidth(), im.getHeight() + +def pointsToDogs(points): + dogs = ArrayList() + for point in points: + dogs.add(DifferenceOfGaussianPeak( [int(point[0]), int(point[1]) ] , IntType(255), SpecialPoint.MAX )) + return dogs + +def pointListToList(pointList): # [[1,2],[5,8]] to [1,2,5,8] + l = array(2 * len(pointList) * [0], 'd') + for id, point in enumerate(pointList): + l[2*id] = point[0] + l[2*id+1] = point[1] + return l + +def listToPointList(l): # [1,2,5,8] to [[1,2],[5,8]] + pointList = [] + for i in range(len(l)/2): + pointList.append([l[2*i], l[2*i+1]]) + return pointList + +def cropPeaks(peaks, cropParams): + croppedPeaks = filter(lambda p: p[0]>cropParams[0] and p[0]cropParams[2] and p[1] 0) and (a+r < w) and (b-r > 0) and (b+r < h): + drawSphere(img, net.imglib2.Point([int(a),int(b)]), r, 255) + return imp + +def convertTo8Bit(atomicI, imagePaths, newImagePaths, minMax, downFactor = 1, vFlip = False, hFlip = False): + while atomicI.get() < len(imagePaths): + k = atomicI.getAndIncrement() + if (k < len(imagePaths)): + imagePath = imagePaths[k] + newImagePath = newImagePaths[k] + im = IJ.openImage(imagePath) + IJ.setMinAndMax(im, minMax[0], minMax[1]) + IJ.run(im, '8-bit', '') + if downFactor != 1: + im = fc.resize(im, float(1/float(downFactor))) + if 'rightfiel' in os.path.basename(imagePath): + im = fc.localContrast(im) + if vFlip: + IJ.run(im, 'Flip Vertically', '') + if hFlip: + IJ.run(im, 'Flip Horizontally', '') + IJ.save(im, newImagePath) + IJ.log(str(k) + ' of ' + str(len(imagePaths)) + ' processed') + im.close() + + +#################### +# TSP operations +#################### +def initMat(n, initValue = 0): + a = Array.newInstance(java.lang.Float,[n, n]) + for i in range(n): + for j in range(n): + a[i][j] = initValue + return a + +def copySquareMat(m): + a = initMat(len(m)) + for x, col in enumerate(m): + for y, val in enumerate(col): + a[x][y] = m[x][y] + return a + +def matSum(a,b): + width = len(a) + height = len(a[0]) + for x in range(width): + for y in range(height): + a[x][y] = a[x][y] + b[x][y] + return a + +def matAverage(ms): + width = len(ms[0]) + a = initMat(width) + for x in range(width): + for y in range(width): + l = [m[x][y] for m in ms] + if sum(l) > 50000: + a[x][y] = min(l) + IJ.log('discrepancy ' + str(l) ) + else: + a[x][y] = sum(l)/float(len(ms)) + return a + +def orderFromMat(mat, rootFolder, solutionName = ''): + tsplibPath = os.path.join(rootFolder, 'TSPMat.tsp') + saveMatToTSPLIB(mat, tsplibPath) + solutionPath = os.path.join(rootFolder, 'solution_' + solutionName + '.txt') + pluginFolder = IJ.getDirectory('plugins') + concordePath = os.path.join(pluginFolder, 'linkern.exe') + IJ.log('concordePath is there: ' + str(os.path.isfile(concordePath))) + # subprocess.call([concordePath , '-o', solutionPath , tsplibPath]) # I could specify more iterations + + # use os.system because subprocess currently broken + # command = concordePath + ' ' + tsplibPath + ' -o ' + solutionPath + command = concordePath + ' -o ' + solutionPath + ' ' + tsplibPath + IJ.log('Command: ' + str(command)) + # os.system(command) + # os.popen(command) + + process = Runtime.getRuntime().exec(command) + # output = process.getInputStream() + + while not os.path.isfile(solutionPath): + time.sleep(1) + IJ.log('Computing TSP solution ...') + time.sleep(1) + + with open(solutionPath, 'r') as f: + lines = f.readlines()[1:] + order = [] + for line in lines: + order.append(int(line.split(' ')[0])) + + # remove the dummy city 0 and apply a -1 offset + order.remove(0) + for id, o in enumerate(order): + order[id] = o-1 + + # logging some info + # IJ.log('The order is ' + str(order)) + costs = [] + for id, o in enumerate(order[:-1]): + o1, o2 = sorted([order[id], order[id+1]]) # sorting because [8, 6] is not in the matrix, but [6,8] is + cost = mat[o1][o2] + IJ.log( 'order cost ' + str(order[id]) + '_' + str(order[id+1]) + '_' + str(cost)) + costs.append(cost) + # xxx if there are jumps, then they must be visible in the costs + totalCost = sum(costs) + IJ.log('The total cost of the retrieved order is ' + str(totalCost)) + IJ.log('The total cost of the incremental order is ' + str( sum( [ mat[t][t+1] for t in range(len(order) - 1)] )) ) + return order, costs + +def saveMatToTSPLIB(mat, path): + # the matrix is a distance matrix + IJ.log('Entering saveMatToTSPLIB') + n = len(mat) + f = open(path, 'w') + f.write('NAME: Section_Similarity_Data' + '\n') + f.write('TYPE: TSP' + '\n') + f.write('DIMENSION: ' + str(n + 1) + '\n') + f.write('EDGE_WEIGHT_TYPE: EXPLICIT' + '\n') + f.write('EDGE_WEIGHT_FORMAT: UPPER_ROW' + '\n') + f.write('NODE_COORD_TYPE: NO_COORDS' + '\n') + f.write('DISPLAY_DATA_TYPE: NO_DISPLAY' + '\n') + f.write('EDGE_WEIGHT_SECTION' + '\n') + + distances = [0]*n #dummy city + for i in range(n): + for j in range(i+1, n, 1): + distance = mat[i][j] + distances.append(int(float(distance*1000))) + + for id, distance in enumerate(distances): + f.write(str(distance)) + if (id + 1)%10 == 0: + f.write('\n') + else: + f.write(' ') + f.write('EOF' + '\n') + f.close() + +# Jama matrix operations +def pythonToJamaMatrix(m): + a = Matrix(jarray.array([[0]*len(m) for id in range(len(m))], java.lang.Class.forName("[D"))) + for x, col in enumerate(m): + for y, val in enumerate(col): + a.set(x, y, m[x][y]) + return a + +def perm(order): # permutation matrix + n = len(order) + rows = [] + for idRow in range(n): + row = ([float(0)]*n) + row[order.index(idRow)] = 1 + rows.append(row) + # print '[row for row in rows]',[row for row in rows] + m = Matrix(jarray.array([row for row in rows], java.lang.Class.forName("[D"))) + # print 'permutation', m.getArrayCopy()[100], (m.getArrayCopy()[100]).index(1) + return m + +def reorderM(m, order): + pm = perm(order) + # pm = pm.transpose() + + # print 'inverse', pm.inverse().getArrayCopy()[0], (pm.inverse().getArrayCopy()[0]).index(1) + # print '((pm.inverse()).times(m)).times(pm)', (((pm.inverse().times(m)).times(pm)).getArrayCopy())[0] + return (pm.inverse().times(m)).times(pm) + # return pm.times(m).times(pm.inverse()) + +#################### +# Computing similarity +#################### + +def imToPeak(im, x, y, stdev, center, stretch, medianRadius, threshold = []): + im = fc.normLocalContrast(im, x, y, stdev, center, stretch) + + # IJ.run(im, 'Invert', '') + if threshold: + im = fc.minMax(im, threshold[0], threshold[1]) + # IJ.run(im, 'Median...', 'radius=' + str(medianRadius)) # this median might a big effect actually ... + # IJ.run(im, 'Invert', '') # invert should be run after the trakem Rotation to leave the beads bright and the background black + return im + +def preprocessImToPeak(imPaths, atomIndex, x, y, stdev, center, stretch, medianRadius, threshold): + while atomIndex.get() < len(imPaths): + index = atomIndex.getAndIncrement() + if index < len(imPaths): + IJ.log('Preprocessing tile ' + str(index)) + imPath = imPaths[index] + im = IJ.openImage(imPath) + im = imToPeak(im, x, y, stdev, center, stretch, medianRadius, threshold) + # imInfo = im.getOriginalFileInfo() + # IJ.save(im, os.path.join(imInfo.directory, imInfo.fileName)) + IJ.save(im, imPath) + im.close() + +def getPeaks(atom, paths): + while atom.get() < len(paths) : + k = atom.getAndIncrement() + if k < len(paths): + im = IJ.openImage(paths[k]) + ip = im.getProcessor() + points = [] + MF = MaximumFinder() + if ('A7' in inputFolder) or ('B6' in inputFolder) or ('C1' in inputFolder): + poly = MF.getMaxima(ip, maximaNoiseTolerance, True) # noise tolerance, excludeOnEdges + for x,y in zip(poly.xpoints, poly.ypoints): + + # not only append the location but also the size + theMax = im.getPixel(x, y)[0] + threshold = peakDecay * theMax + + intenseDisk = True + d = 1 + while intenseDisk:# grow concentric disks and check the mean instensity relative to the intensity of the peak + disk = OvalRoi(int(round(x-d/2.)), int(round(y-d/2.)), d, d) + im.setRoi(disk) + theMean = im.getStatistics(Measurements.MEAN).mean + if theMean > threshold: + d = d + 1 + else: + intenseDisk = False + im.killRoi() + + points.append([x, y, d]) + + + elif 'BIB' in inputFolder: + ############# /!\ Warning for BIB Manual to remove /!\ ############### + IJ.run(im, 'Invert', '') + im = fc.normLocalContrast(im, 10, 10, 3, True, True) + IJ.run(im, 'Median...', 'radius=' + str(2)) + im = fc.minMax(im, 210, 255) + im = fc.minMax(im, 200, 200) + IJ.run(im, 'Invert', '') + points = getConnectedComponents(im) + + IJ.log(str(k) + '-' + str(len(points)) + ' peaks') + IJ.log(str(k) + '--' + str(points[:10]) + ' peaks') + loader1.serialize(points, os.path.join(peaksFolder, 'peaks_channel_' + channel + '_' + str(k).zfill(4)) ) #xxx is there no other way to serialize in imagej without using a trakem2 loader ? I do not think so, see below ... + im.close() + +################################### +# # # # # # # deserializing attempt: +################################### +# # # # # from java.io import ObjectOutputStream, FileOutputStream, ObjectInputStream, FileInputStream +# # # # # from org.python.util import PythonObjectInputStream +# # # # # path = r'E:\Users\Thomas\Wafer_SFN_2016\OrderRetrieval_SFN_2016\sectionOutput\peaks_488-546_0102' +# # # # # #out = ObjectOutputStream(FileOutputStream(path)) +# # # # # #out.writeObject(ob) +# # # # # #out.close() +# # # # # #print out +# # # # # a = [] +# # # # # r = PythonObjectInputStream(FileInputStream(path)) +# # # # # print r +# # # # # #ob = r.read() +# # # # # #print ob +# # # # # #ob = r.readFully(a) +# # # # # #print a +# # # # # print r.resolveObject(r) +# # # # # for t in range(1000): + # # # # # print r.read() +# # # # # #ob = r.readObject() +# # # # # #r.close() +# # # # # #print ob + + +def getDogs(im, radius, threshold, doSubpixel, doMedian): + points = [] + img = ImageJFunctions.wrap(im) + interval = img + cal = im.getCalibration() + calibration = [cal.pixelWidth, cal.pixelHeight, cal.pixelDepth] + detector = LogDetector(img, interval, calibration, radius, threshold, doSubpixel, doMedian) + detector.process() + peaks = detector.getResult() + for peak in peaks: + points.append([peak.getDoublePosition(0) / cal.pixelWidth, peak.getDoublePosition(1) / cal.pixelHeight]) + return points + +def getConnectedComponents(im): + points = [] + roim = RoiManager(True) + pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER + ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES, Measurements.AREA, None, 0, Double.POSITIVE_INFINITY, 0.0, 1.0) + + pa.setRoiManager(roim) + + pa.analyze(im) + for roi in roim.getRoisAsArray(): + points.append(roi.getContourCentroid()) + roim.close() + + return points + +def getDistance(atom, pairs, allDogs, corrMat, affineDict, matchPointsDict): + dogsContainer = ArrayList() + nPairs = len(pairs) + if nPairs !=0: + while atom.get() < len(pairs): + k = atom.getAndIncrement() + if k < len(pairs): + IJ.log('Processing pair ' + str(k) ) + # if k%(int(nPairs/1000.)) == 0: + # print str(int(k/float(nPairs) * 1000.)), '/1000 done' + + id1, id2 = pairs[k][0], pairs[k][1] + dogs1 = allDogs[id1] + dogs2 = allDogs[id2] + + dogsContainer.add(dogs1) + dogsContainer.add(dogs2) + + comparePairs = Matching.descriptorMatching(dogsContainer, 2, dp, 0) + + distances = 0 + inliers = comparePairs[0].inliers + + if len(inliers)>0: + for inlier in inliers: + distances = distances + inlier.getDistance() + meanDistance = distances/float(len(inliers)) + + # # # # corrMat[id1][id2] = meanDistance + corrMat[id1][id2] = len(dogs1) - len(inliers) + + affineDict[(id1, id2)] = comparePairs[0].model.createAffine() + + points1 = [] + points2 = [] + + for inlier in inliers: + p1 = inlier.getP1().getL() + p2 = inlier.getP2().getL() + + points1.append([p1[0], p1[1]]) + points2.append([p2[0], p2[1]]) + + matchPointsDict[(id1, id2)] = [points1, points2] + IJ.log('dist value found ' + str(id1) + '-' + str(id2) + '--' + str(meanDistance)) + else: + corrMat[id1][id2] = 500000 + dogsContainer.clear() + IJ.log('getDistance has run') + +def getMatchingCost(dogsContainer, dogs1, dogs2, sizes1, sizes2): + # calculate a matching, this time the crop is probably larger and the pair of section has already matched + dogsContainer.clear() + dogsContainer.add(dogs1) + dogsContainer.add(dogs2) + comparePairs = Matching.descriptorMatching(dogsContainer, 2, dp, 0) + inliers = comparePairs[0].inliers + + # transform the dogs to standard points in lists + theDogs1 = [dog.getPosition() for dog in dogs1] + theDogs2 = [dog.getPosition() for dog in dogs2] + + # get the size differences between matching beads and get the inlier indexes + sizeCosts = [] + inlierIndexes1 = [] + inlierIndexes2 = [] + for inlier in inliers: + # find p1 in theDogs1 to get the index and access its size + p1 = inlier.getP1().getL() + id1 = theDogs1.index(p1) + inlierIndexes1.append(id1) + size1 = sizes1[id1] + + p2 = inlier.getP2().getL() + id2 = theDogs2.index(p2) + inlierIndexes2.append(id2) + size2 = sizes2[id2] + + sizeCosts.append(abs(size2-size1)) + + # find the outliers and get their size: they are counted as cost too. A 5 pixel bead that disappears adds a cost equal to 5. + outlierIndexes1 = set(inlierIndexes1) ^ set(range(len(theDogs1))) + for outlierId in outlierIndexes1: + sizeCosts.append(sizes1[outlierId]) # xxx warning: or is it sizes2 ? I do not think so ... + + outlierIndexes2 = set(inlierIndexes2) ^ set(range(len(theDogs2))) + for outlierId in outlierIndexes2: + sizeCosts.append(sizes2[outlierId]) + + totalCost = sum(sizeCosts) + + return totalCost + +# def countEvents(dogsContainer, dogs1, dogs2, sizes1, sizes2): + # disappearingBeads = countEndings(dogsContainer, dogs1, dogs2, sizes1, sizes2) + # # appearingBeads = countEndings(dogsContainer, dogs2, dogs1, sizes2, sizes1) + # return disappearingBeads + appearingBeads + +def getEvents(atom, pairs, allPeaks, allCropedDogs, cropSimilarity, corrMat, affineDict, matchPointsDict): + ''' + allCropedDogs already croped with cropMatching + ''' + dogsContainer = ArrayList() + nPairs = len(pairs) + if nPairs !=0: + while atom.get() < len(pairs): + k = atom.getAndIncrement() + if k < len(pairs): + IJ.log('Processing pair ' + str(k) ) + # if k%(int(nPairs/1000.)) == 0: + # print str(int(k/float(nPairs) * 1000.)), '/1000 done' + + id1, id2 = pairs[k][0], pairs[k][1] + dogs1, dogs2 = allCropedDogs[id1], allCropedDogs[id2] + # print '*** LEN --- ', len(dogs2) + + # Pairwise screening + dogsContainer = ArrayList() + dogsContainer.add(dogs1) + dogsContainer.add(dogs2) + comparePairs = Matching.descriptorMatching(dogsContainer, 2, dp, 0) + inliers = comparePairs[0].inliers + + if 'B6' in inputFolder: + inlierThreshold = 15 + else: + inlierThreshold = 5 + + if len(inliers) > inlierThreshold: # this pair of sections is matching + # saving then inverting the transform + affineT = comparePairs[0].model.createAffine() + affDeterminant = affineT.getDeterminant() + + if abs(1 - affDeterminant) > 0.1: # it sometimes happens that I get weird transforms, do not understand why yet ... + print 'Error: could not create the inverse - ', str(affineT), 'for pair', str([id1,id2]), 'len(inliers)', len(inliers) + IJ.log('Error: could not create the inverse - ' + str(affineT) + ' for pair ' + str([id1,id2])) + corrMat[id1][id2] = 50000 + + else: + affInverse = affineT.createInverse() + affineDict[(id1, id2)] = affineT + + # saving the match points + points1, points2 = [], [] + for inlier in inliers: + p1, p2 = inlier.getP1().getL(), inlier.getP2().getL() + points1.append([p1[0], p1[1]]) + points2.append([p2[0], p2[1]]) + matchPointsDict[(id1, id2)] = [points1, points2] + + # should I take the convex hull of the total matches and use #events/area ? + # a fake match may fail + # really ? I am not sure why I wrote that ... + # but by chance a fake inlier could give a wrong overlapping region + + # transform peaks1 into peaks2 (here these are all peaks, there was no crop) + + # keep the size information + peaks1 = allPeaks[id1] + peaks2 = allPeaks[id2] + + sizes1 = [peak[2] for peak in peaks1] + sizes2 = [peak[2] for peak in peaks2] + + # because there is the size info in the third position + peaks1 = [peak[:2] for peak in peaks1] + peaks2 = [peak[:2] for peak in peaks2] + + transformedPeaksList1 = array(2 * len(peaks1) * [0], 'd') + affInverse.transform(pointListToList(peaks1), 0, transformedPeaksList1, 0, len(peaks1)) + transformedPeaks1 = listToPointList(transformedPeaksList1) + + # put back the size information + transformedPeaks1 = [peak + [size] for peak,size in zip(transformedPeaks1, sizes1)] + peaks2 = [peak + [size] for peak,size in zip(peaks2, sizes2)] + + # crop the aligned peaks with the bounding box defined by cropSimilarity + croppedTransformedPeaks1 = cropPeaks(transformedPeaks1, cropSimilarity) + croppedPeaks2 = cropPeaks(peaks2, cropSimilarity) + + # getting once more the new sizes (cropping has occured) + sizes1 = [peak[2] for peak in croppedTransformedPeaks1] + sizes2 = [peak[2] for peak in croppedPeaks2] + + matchingCost = getMatchingCost(dogsContainer, pointsToDogs(croppedTransformedPeaks1), pointsToDogs(croppedPeaks2), sizes1, sizes2) + corrMat[id1][id2] = matchingCost + + IJ.log(str(id1) + '-' + str(id2) + '--' + str(matchingCost) ) + # except Exception, e: + # IJ.log('Did not succeed in inverting the affine transform in pair ' + str(id1) + '-' + str(id2)) + dogsContainer.clear() + IJ.log('getDistance has run') + +def getCC(im1,im2): + im1, im2 = map(ImagePlusAdapter.wrap, [im1, im2]) + cc = CrossCorrelation(im1, im2) + cc.process() + return cc.getR() + +def getHighResCorrMat(atom, pairs, affineDict, stitchedSectionPaths, corrMat): + counter = 0 + + if counter%400 ==399: + fc.closeProject(p) + p, loader, layerset, nLayers = fc.getProjectUtils( fc.initTrakem(ccCalculationFolder, 2) ) + # p.saveAs(os.path.join(ccCalculationFolder, 'pproject' + str(0) + '.xml'), True) + layer1 = layerset.getLayers().get(0) + layer2 = layerset.getLayers().get(1) + + # disp = Display(p, layerset.getLayers().get(0)) + # disp.showFront(layerset.getLayers().get(0)) + + while atom.get() < len(pairs) : + k = atom.getAndIncrement() + if k < len(pairs): + if counter%300 ==299: + fc.closeProject(p) + time.sleep(3) + p, loader, layerset, nLayers = fc.getProjectUtils( fc.initTrakem(ccCalculationFolder, 2) ) + p.saveAs(os.path.join(ccCalculationFolder, 'pproject' + str(k) + '.xml'), True) + layer1 = layerset.getLayers().get(0) + layer2 = layerset.getLayers().get(1) + + pair = pairs[k] + aff = affineDict[pair] + + path1 = stitchedSectionPaths[pair[0]] + path2 = stitchedSectionPaths[pair[1]] + + patch1 = Patch.createPatch(p, path1) + patch2 = Patch.createPatch(p, path2) + + layer1.add(patch1) + layer2.add(patch2) + + patch1.setAffineTransform(aff) + # patch2.updateBucket() + + fc.resizeDisplay(layerset) + bb = layerset.get2DBounds() + #shrink the BB ? + + im1 = loader.getFlatImage(layer1, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer1.getAll(Patch), True, Color.black, None) + im2 = loader.getFlatImage(layer2, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer2.getAll(Patch), True, Color.black, None) + + # im1 = imToPeak(im1, 10, 10, 3, True, True, [170,255]) + # im2 = imToPeak(im2, 10, 10, 3, True, True, [170,255]) + + # im1 = imToPeak(im1, 20, 20, 10, True, False) + # im2 = imToPeak(im2, 20, 20, 10, True, False) + + # # im1 = imToPeak(fc.blur(im1,1), 40, 40, 3, True, True) + # # im2 = imToPeak(fc.blur(im2,1), 40, 40, 3, True, True) + + # im1 = imToPeak(im1, 100, 100, 2, True, True, [160,255]) + # im2 = imToPeak(im2, 100, 100, 2, True, True, [160,255]) + + corr = Math.exp((4-getCC(im1,im2)) * 2) + # corr = 1./getCC(im1,im2) + corrMat[pair[0]][pair[1]] = corr + IJ.log('Processing pair ' + str(k) + ' with correlation ' + str(corr)) + if k%100 == 0: + print 'Processing pair ', str(k), ' with correlation ', str(corr) + # print 'Processing pair ', str(k), ' with correlation ' + + # return + layer1.remove(patch1) + layer2.remove(patch2) + counter = counter + 1 + fc.closeProject(p) + +def getCCCorrMat(atom, pairs, affineDict, stitchedSectionPaths, theMat): + layer1 = layersetZ.getLayers().get(0) + layer2 = layersetZ.getLayers().get(1) + + while atom.get() < len(pairs) : + k = atom.getAndIncrement() + if k < len(pairs): + + pair = pairs[k] + aff = affineDict[pair] + + path1 = stitchedSectionPaths[pair[0]] + path2 = stitchedSectionPaths[pair[1]] + + patch1 = Patch.createPatch(pZ, path1) + patch2 = Patch.createPatch(pZ, path2) + + # try: # in case of a rare exception caught once, see below + with lock:# projet operations + layer1.add(patch1) + layer2.add(patch2) + patch1.setAffineTransform(aff) + # patch2.updateBucket() + + fc.resizeDisplay(layersetZ) + bb = layersetZ.get2DBounds() + + factor = 0.65 + + # shrink the BB ? + bb = Rectangle(int(bb.width * (1 - factor)/2), int(bb.height * (1 - factor)/2), int(bb.width * factor), int(bb.height * factor)) + + im1 = loaderZ.getFlatImage(layer1, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer1.getAll(Patch), True, Color.black, None) + im2 = loaderZ.getFlatImage(layer2, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer2.getAll(Patch), True, Color.black, None) + # im1.show() + # im2.show() + # 8/0 + + layer1.remove(patch1) + layer2.remove(patch2) + + IJ.run(im1, 'Invert', '') + IJ.run(im2, 'Invert', '') + + # im1.show() # the first pair looked ok + # im2.show() + # 8/0 + + + # im1 = imToPeak(im1, 10, 10, 3, True, True, [170,255]) + # im2 = imToPeak(im2, 10, 10, 3, True, True, [170,255]) + + # im1 = imToPeak(im1, 20, 20, 10, True, False) + # im2 = imToPeak(im2, 20, 20, 10, True, False) + + # # im1 = imToPeak(fc.blur(im1,1), 40, 40, 3, True, True) + # # im2 = imToPeak(fc.blur(im2,1), 40, 40, 3, True, True) + + # im1 = imToPeak(im1, 100, 100, 2, True, True, [160,255]) + # im2 = imToPeak(im2, 100, 100, 2, True, True, [160,255]) + + corr = Math.exp( (4 - getCC(im1,im2)) * 2) + # corr = 1./getCC(im1,im2) + + theMat[pair[0]][pair[1]] = corr + IJ.log('Processing pair ' + str(k) + ' with correlation ' + str(corr)) + # except Exception, e: + # IJ.log('Catching in case a rare exception that has occurred once : im1 = loaderZ.getFlatImage(layer1, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer1.getAll(Patch), True, Color.black, None)NullPointerException: ava.lang.NullPointerException') + + if k%100 == 0: + print 'Processing pair ', str(k), ' with correlation ', str(corr) + IJ.log('Processing pair ' + str(k) + ' with correlation ' + str(corr)) + + +# def reorderProject(projectPath, reorderedProjectPath, order): + # folder = os.path.dirname(os.path.normpath(projectPath)) + + # pReordered, loaderReordered, layersetReordered, nLayers = fc.getProjectUtils( fc.initTrakem(folder, len(order)) ) + # pReordered.saveAs(reorderedProjectPath, True) + + # IJ.log('reorderedProjectPath ' + reorderedProjectPath) + + # project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) + + # for l,layer in enumerate(project.getRootLayerSet().getLayers()): + # IJ.log('Inserting layer ' + str(l) + '...') + # reorderedLayer = layersetReordered.getLayers().get(order.index(l)) + # # for ob in layer.getDisplayables(): + # # reorderedLayer.add(ob.clone(pReordered, False)) + # # xxx something missing to update the layer ? + # for patch in layer.getDisplayables(): + # patchPath = loader.getAbsolutePath(patch) + # patchTransform = patch.getAffineTransform() + + # newPatch = Patch.createPatch(pReordered, patchPath) + # reorderedLayer.add(newPatch) + # newPatch.setAffineTransform(patchTransform) + + # fc.closeProject(project) + # fc.resizeDisplay(layersetReordered) #I should check the size of the display of the reordered project, should be the same as the retrievalProject + # pReordered.save() + # fc.closeProject(pReordered) + # IJ.log('Project reordering done') + +def affineRealignProject(sourcePath, targetPath, SIFTMatchesPath, optionalMatchesPath = None): + shutil.copyfile(sourcePath, targetPath) + p, loader, layerset, nLayers = fc.openTrakemProject(targetPath) + p.saveAs(targetPath, True) + affineDict = loader.deserialize(SIFTMatchesPath)[1] + if optionalMatchesPath != None: + affineDictsOptional = [loader.deserialize(optionalMatchPath)[1] for optionalMatchPath in optionalMatchesPath] + + aff_0_To_N = AffineTransform() + firstPair = True + for l, layer1 in enumerate(layerset.getLayers()): + if l < nLayers - 1: + # if l < 100: + IJ.log('Processing layer - ' +str(l)) + layer1 = layerset.getLayers().get(l) + layer2 = layerset.getLayers().get(l+1) + + patch1 = layer1.getDisplayables(Patch)[0] + patch2 = layer2.getDisplayables(Patch)[0] + + aff = AffineTransform() + + aff1 = patch1.getAffineTransform() + aff2 = patch2.getAffineTransform() + + id1 = int(os.path.splitext(os.path.basename(patch1.getFilePath()))[0].split('_')[-1]) + id2 = int(os.path.splitext(os.path.basename(patch2.getFilePath()))[0].split('_')[-1]) + + aff12 = AffineTransform() + thereIsATransform = False + + IJ.log('a') + if ( (id1, id2) in affineDict and + abs(1 - affineDict[(id1, id2)].getDeterminant()) > 0.1): + print 'determinant for pair', id1, id2, affineDict[(id1, id2)].getDeterminant() + print 'Error: could not apply a non-invertible transform for pair ', str([id1, id2]) + IJ.log('Error: could not apply a non-invertible transform for pair ' + str([id1, id2])) + if ( (id1, id2) in affineDict and + abs(1-affineDict[(id1, id2)].getDeterminant()) < 0.1): + # print 'determinant for pair', id1, id2, affineDict[(id1, id2)].getDeterminant() + # IJ.log(str((id1, id2)) + ' in dict') + aff12.concatenate(affineDict[(id1, id2)].createInverse()) + thereIsATransform = True + elif (id2, id1) in affineDict: + # IJ.log(str((id2, id1)) + ' in dict') + if abs(1 - affineDict[(id2, id1)].getDeterminant()) > 0.1: + print 'Error with the forward transform' + else: + aff12.concatenate(affineDict[(id2, id1)]) + thereIsATransform = True + else: + if optionalMatchesPath != None: + for optionalDict in affineDictsOptional: + if not thereIsATransform: + if ( (id1, id2) in optionalDict and + abs(1 - optionalDict[(id1, id2)].getDeterminant()) > 0.1): + print 'Error: could not apply a non-invertible transform for pair ', str([id1, id2]) + IJ.log('Error: could not apply a non-invertible transform for pair ' + str([id1, id2])) + if ((id1, id2) in optionalDict and + abs(1 - optionalDict[(id1, id2)].getDeterminant() < 0.1)): + thereIsATransform = True + aff12.concatenate(optionalDict[(id1, id2)].createInverse()) + elif (id2, id1) in optionalDict: + if abs(1 - optionalDict[(id2, id1)].getDeterminant()) > 0.1: + print 'Error with the forward transform' + else: + thereIsATransform = True + aff12.concatenate(optionalDict[(id2, id1)]) + + if thereIsATransform: + aff.concatenate(aff_0_To_N) # apply all the previous affine + aff.concatenate(aff12) # apply the new affine from n to n+1 + patch2.setAffineTransform(aff) + patch2.updateBucket() + aff_0_To_N.concatenate(aff12) + + # disp = Display(p, layerset.getLayers().get(0)) + # disp.showFront(layerset.getLayers().get(0)) + IJ.log('d') + # fc.resizeDisplay(layerset) + IJ.log('e') + p.save() + IJ.log('f') + fc.closeProject(p) + IJ.log('g') + +def sumAffineProject(projectPath, orderedImagePaths, consecAffineTransformPaths): + p, loader, layerset, nLayers = fc.getProjectUtils( fc.initTrakem(baseFolder, nSections) ) + p.saveAs(projectPath, True) + + aff_0_To_N = AffineTransform() + for l, layer in enumerate(layerset.getLayers()): + patch = Patch.createPatch(p, orderedImagePaths[l]) + layer.add(patch) + for l, layer1 in enumerate(layerset.getLayers()): + if l < nLayers - 1: + IJ.log('Processing layer -- ' +str(l)) + layer2 = layerset.getLayers().get(l+1) + + patch1 = layer1.getDisplayables(Patch)[0] + patch2 = layer2.getDisplayables(Patch)[0] + + aff = AffineTransform() + + aff1 = patch1.getAffineTransform() + aff2 = patch2.getAffineTransform() + + aff12 = loader.deserialize(consecAffineTransformPaths[l]) + + aff.concatenate(aff_0_To_N) # apply all the previous affine + aff.concatenate(aff12) # apply the new affine from n to n+1 + patch2.setAffineTransform(aff) + patch2.updateBucket() + aff_0_To_N.concatenate(aff12) + fc.resizeDisplay(layerset) + p.save() + time.sleep(3) + fc.closeProject(p) + +def elasticRealignProject(sourcePath, targetPath): + shutil.copyfile(sourcePath, targetPath) + p, loader, layerset, nLayers = fc.openTrakemProject(targetPath) + p.saveAs(targetPath, True) + + dogsContainer = ArrayList() + newMosaicSize = getNewMosaicSize() + + for l, layer in enumerate(layerset.getLayers()): + # if (l > 0) and (l < 100): + if (l > 0): + # if (l < nSections - 1): + # if (l > - 1): + IJ.log('Elastic aligning layer ' + str(l)) + bb = layerset.get2DBounds() + + layer1 = layerset.getLayers().get(l-1) + layer2 = layerset.getLayers().get(l) + + # getting im1 to be transformed + sectionIndex1 = layer1.getAll(Patch)[0] + im1 = loader.getFlatImage(layer1, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer1.getAll(Patch), True, Color.black, None) + im1Path = os.path.join(calculationFolder, 'bunwarpTransform_finalAlignment_im1_' + channel + '_' + str(l).zfill(4) + '.tif') + IJ.save(im1, im1Path) + + # getting the peaks from the preprocessed section1 + imPreprocessed1 = IJ.openImage(os.path.join(preprocessedFolder, 'stitchedRotatedSection_' + channel + '_' + str(sectionIndex1).zfill(4) + '.tif')) + points1 = getConnectedComponents(imPreprocessed1) + IJ.log('Layer ' + str(l-1) + ' has ' + str(len(points1)) + ' peaks') + dogs1 = pointsToDogs(points1) + imPreprocessed1.close() + + # getting im1 to be transformed + # im2Path = os.path.join(sectionOutputFolder, 'bunwarpTransform_finalAlignment_im2_' + channel + '_' + str(l).zfill(4) + '.tif') + im2Index = int(os.path.splitext(os.path.basename(im2Path))[0].split('_')[-1]) + im2Path = os.path.normpath(layer.getDisplayables(Patch)[0].getImageFilePath()) + + # getting the peaks from the preprocessed section2 + # # # # For layer2, I can take the already calculated peaks because the image has sill not been warped yet. + # # # imPreprocessed2 = IJ.openImage(os.path.join(preprocessedFolder, 'stitchedRotatedSection_' + channel + '_' + str(sectionIndex2).zfill(4) + '.tif')) + # # # points2 = getConnectedComponents(imPreprocessed2) + # # # IJ.log('Layer ' + str(l-1) + ' has ' + str(len(points1)) + ' peaks') + # # # dogs2 = pointsToDogs(points2) + # # # imPreprocessed2.close() + dogs2 = pointsToDogs(loader.deserialize(os.path.join(peakFolder, 'peaks_' + channel + '_' + str(sectionIndex2).zfill(4)))) + + dogsContainer.add(dogs2) # WARNING: order is inverted + dogsContainer.add(dogs1) + comparePairs = Matching.descriptorMatching(dogsContainer, 2, dp, 0) + dogsContainer.clear() + + inliers = comparePairs[0].inliers + if not (len(inliers)>0): + IJ.log('ERROR: There should be inliers !') + print '# # # WARNING # # # : there are no inliers in layer ', l + else: + stack1 = Stack() + stack2 = Stack() + + for inlier in inliers: + p1 = inlier.getP1().getL() + p2 = inlier.getP2().getL() + + stack1.push(Point(int(p1[0]), int(p1[1]))) + stack2.push(Point(int(p2[0]), int(p2[1]))) + + trans = computeTransformationBatch(im1.getWidth(), im1.getHeight(), newMosaicSize[0], newMosaicSize[1], stack1, stack2, unwarpParam) + transPath = os.path.join(calculationFolder, 'bunwarpTransform_finalAlignment_' + channel + '_' + str(l).zfill(4)) + + trans.saveDirectTransformation(transPath) + # trans.saveInverseTransformation(transPath) + + transformedPath = os.path.join(warpFolder, 'bunwarpTransform_finalAlignment_elastiked_im2_' + channel + '_' + str(l).zfill(4) + '.tif') + + elasticTransformImageMacro(im1Path, im2Path, transPath, transformedPath) # targetPath, sourcePath, transPath, transformedPath + + im2Elastiked = IJ.openImage(transformedPath) + IJ.run(im2Elastiked, '8-bit', '') + im2Elastiked = fc.minMax(im2Elastiked, 100,100) + IJ.save(im2Elastiked, transformedPath) + + layer.remove(layer.getDisplayables(Patch)[0]) + patch = Patch.createPatch(p, transformedPath) + layer.add(patch) + patch.updateBucket() + fc.resizeDisplay(layerset) + p.save() + fc.closeProject(p) + +########## +# Garbage +########## +def pointListToList(pointList): # [[1,2],[5,8]] to [1,2,5,8] + l = array(2 * len(pointList) * [0], 'd') + for id, point in enumerate(pointList): + l[2*id] = point[0] + l[2*id+1] = point[1] + return l + +def listToPointList(l): # [1,2,5,8] to [[1,2],[5,8]] + pointList = [] + for i in range(len(l)/2): + pointList.append([l[2*i], l[2*i+1]]) + return pointList + +def pointListToDOGPs(points): + DOGPs = ArrayList() + for point in points: + DOGPs.add(DifferenceOfGaussianPeak( [int(point[0]), int(point[1]) ] , IntType(255), SpecialPoint.MAX )) + return DOGPs + +def sectionToPoly(l): + return Polygon( [int(a[0]) for a in l] , [int(a[1]) for a in l], len(l)) + +def getAffFromPoints(sourcePoints, targetPoints): + sourceDOGPs = pointListToDOGPs(sourcePoints) + targetDOGPs = pointListToDOGPs(targetPoints) + + dogpContainer = ArrayList() + + dogpContainer.add(sourceDOGPs) + dogpContainer.add(targetDOGPs) + + comparePairs = Matching.descriptorMatching(dogpContainer, 2, dp, 0) + aff = comparePairs[0].model.createAffine() + return aff + +def parallelWarpCC(pairs): + nPairs = len(pairs) + newMosaicSize = getNewMosaicSize() + if nPairs !=0: + while atom.get() < len(pairs): + k = atom.getAndIncrement() + if k < len(pairs): + IJ.log('Processing pair ' + str(k) ) + + pair = pairs[k][0] + points1, points2 = pairs[k][1] + + stack1 = Stack() + stack2 = Stack() + for point1 in points1: + # point = Point(2) + # point.setPosition([int(point1[0]), int(point1[1])]) + # stack1.push(point) + stack1.push(Point(int(point1[0]), int(point1[1]))) + for point2 in points2: + # point = Point(2) + # point.setPosition([int(point2[0]), int(point2[1])]) + # stack2.push(point) + stack2.push(Point(int(point2[0]), int(point2[1]))) + trans = computeTransformationBatch(newMosaicSize[0], newMosaicSize[1], newMosaicSize[0], newMosaicSize[1], stack1, stack2, unwarpParam) + + transPath = os.path.join(calculationFolder, 'bunwarpTransform_' + channel + '_' + str(pair[0]) + '_' + str(pair[1])) + + trans.saveDirectTransformation(transPath) + # trans.saveInverseTransformation(transPath) + + sourcePath = os.path.join(rawFolder, 'stitchedRotatedSection_' + channel + '_' + str(pair[0]).zfill(4) + '.tif') + targetPath = os.path.join(rawFolder, 'stitchedRotatedSection_' + channel + '_' + str(pair[1]).zfill(4) + '.tif') + + transformedPath = os.path.join(warpFolder, 'transformed_' + channel + '_' + str(pair[0]) + '_' + str(pair[1]) + '.tif') + elasticTransformImageMacro(targetPath, sourcePath, transPath, transformedPath) + + im1 = IJ.openImage(targetPath) + im2 = IJ.openImage(transformedPath) + IJ.run(im2, '8-bit', '') + + im1 = fc.minMax(im1, 5, 180) + im2 = fc.minMax(im2, 5, 180) + + # corr = Math.exp((4-getCC(im1,im2)) * 2) + corr = 1./getCC(im1,im2) + corrMat[pair[0]][pair[1]] = corr + + # crop to the center (or to the barycenter of the matchpoints) + # bary2 = barycenter(points2) + bary2 = [int(newMosaicSize[0]/2.), int(newMosaicSize[1]/2.)] # trying with simply the center + + x1 = max(0, bary2[0] - cropForSimilarity[0]/2.) + x2 = min(mosaicX, bary2[0] + cropForSimilarity[0]/2.) + y1 = max(0, bary2[1] - cropForSimilarity[1]/2.) + y2 = min(mosaicY, bary2[1] + cropForSimilarity[1]/2.) + roi = Roi(x1, y1, x2 - x1, y2 - y1) + + im1 = fc.crop(im1,roi) + im2 = fc.crop(im2,roi) + + # corr = Math.exp((4-getCC(im1,im2)) * 2) + corr = 1./getCC(im1,im2) + corrMat_Crop[pair[0]][pair[1]] = corr + + im1.close() + im2.close() + + # # I can remove at the end + # os.remove(transformedPath) + # os.remove(transPath) + + + logMessage = 'Pair number ' + str(k) + ' (' + str(pair[0]) + ',' + str(pair[1]) + ') has a CC value of ' + str(corr) + IJ.log(logMessage) + # print logMessage + +def orderDistance(order1, order2): + ''' order1 is the reference ''' + allCosts = [] + for id, section in enumerate(order2[:-1]): + nextSection = order2[id + 1] + + sectionPosition = order1.index(section) + nextSectionPosition = order1.index(nextSection) + + distance = min(abs(nextSectionPosition - sectionPosition) - 1, 10) + + allCosts.append(distance) + totalCost = sum(allCosts) + print Counter(allCosts) + return totalCost + +################################# +###### Parameters to enter ###### +################################# +# inputFolder = os.path.normpath(os.path.join(r'E:\Users\Thomas\Thesis\B6\B6_Wafer_203_Beads_WorkingFolder\AllBeads', '')) +inputFolder = os.path.normpath(os.path.join(r'E:\Users\Thomas\Thesis\C1\C1_Beads_Reordering\AllBeads', '')) + +# twoStepsReordering = True +twoStepsReordering = False +peakDecay = 0.8 + +beadChannels = ['488', '546'] +mosaicLayout = [1,1] +overlap = 50 +# stitchingChannel = 'brightfield' +stitchingChannel = '488' # use one of the bead channels if mosaicLayout = [1,1] and that there is no brightfield + +firstSectionFolder = os.path.join(inputFolder, os.walk(inputFolder).next()[1][0]) +firstImagePath = os.path.join(firstSectionFolder, os.walk(firstSectionFolder).next()[2][0]) +im0 = IJ.openImage(firstImagePath) +width = im0.getWidth() +height = im0.getHeight() +im0.close() + +# width, height = 2048, 2048 +# width, height = 1388, 1040 + +# refSectionOffset = -22.5 * PI / 180 # change this only if there is no target_highres_landmarks +refSectionOffset = 58 * PI / 180 # for A7_200 + + +# The scaling factor between the magnification of the wafer overview and the magnification of the bead imaging +# beadsToWaferFactor = 13 # Leica 630/52 and 1180/88 +# beadsToWaferFactor = 630/float(155) # (ZeissZ1, 5x) to (Nikon, 20x) 155 630 +# beadsToWaferFactor = 630/float(155) * 1 # (ZeissZ1, 5x) to (Z1, 20x) + +# beadsResolution = +# waferResolution = 1804000/1388. +# beadsToWaferFactor = beadsResolution/float(waferResolution) +beadsToWaferFactor = 4.01409 # from Visitron20x to ZeissZ1 5x + +# parameter for maxima finder +maximaNoiseTolerance = 200 +if 'C1' in inputFolder: + noiseTolerance = 150 + + +################################# +################################# + +####### matching parameters, probably not optimal yet ####### +dp = DescriptorParameters() +# dp.model = RigidModel2D() # old, not good, use affine now +dp.model = AffineModel2D() +dp.dimensionality = 2 +dp.fuse = 2 # no overlay +dp.brightestNPoints = 2000 +dp.redundancy = 1 +# dp.ransacThreshold = 50 # 37 pairs with neighbors = 3 +dp.ransacThreshold = 10 # 37 pairs with neighbors = 3 +# dp.ransacThreshold = 1 # 29 pairs with 3 neighbors +# dp.ransacThreshold = 1000 # 42 pairs with 3 neighbors +dp.lookForMaxima = True + +dp.minSimilarity = 100 +# dp.numNeighbors = 6 # 10 pairs for 5 sections +# dp.numNeighbors = 4 # too few matches in A7_200 +# dp.numNeighbors = 3 # 600 pairs for 200 sections in A7_200 ? 37 pairs for 5 sections +# dp.numNeighbors = 3 # pairs for 5 sections +# dp.numNeighbors = 4 # pairs for 5 sections +# dp.numNeighbors = 8 +dp.numNeighbors = 3 + +print 'dp.minSimilarity', dp.minSimilarity +# dp.minSimilarity = 10 +print 'minInlierFactor', dp.minInlierFactor +print 'sigma1', dp.sigma1 +print 'sigma2', dp.sigma2 +print 'threshold', dp.threshold +print 'filterRANSAC', dp.filterRANSAC +print 'redundancy', dp.redundancy # 1 is ok +print 'dp.significance', dp.significance +# dp.significance = 10 +# 8/0 + + +# Older parameters. Keep in case +# dp.numNeighbors = 3 +# dp.brightestNPoints = 3 +# dp.maxIterations = 1000 +# dp.iterations = 1000 +# dp.max = 255 +# dp.ransacThreshold = 1000 + +# parameters for DoGs +radius = 10 +threshold = 0.7 +doSubpixel = True +doMedian = False + +################################# +###### Folder initializations ### +################################# +# Experiment-279_b0s1c2x920-1388y360-1040m0 + + +# # /!\TO COMMENT +# ##################################################### +# ##################################################### +# ##################################################### +# # For Zeiss BIB manual experiment: preprocess the files to the right format +# wrongIndexes = [1,2,3,20,4,6,5,19,7,16,15,17,8,18,11,9,13,12,14,10] +# for idChannel, beadChannel in enumerate(beadChannels): + # imageNames = filter(lambda x: ('c' + str(idChannel + 1) + 'x' in x) and (os.path.splitext(x)[1] =='.tif'), os.listdir(inputFolder)) + # for imageName in imageNames: + # id = int(imageName.split('s')[1].split('c')[0]) + # sectionFolder = fc.mkdir_p(os.path.join(inputFolder, 'section_' + str(wrongIndexes[id]-1).zfill(4))) + # shutil.copyfile(os.path.join(inputFolder, imageName), os.path.join(sectionFolder, 'section_' + str(wrongIndexes[id]-1).zfill(4) + '_channel_' + str(beadChannel) + '_tileId_00-00-mag.tif')) +# ##################################################### +# ##################################################### +# ##################################################### + +if stitchingChannel in beadChannels: + allChannels = beadChannels +else: + allChannels = beadChannels + [stitchingChannel] + +baseFolder = os.path.dirname(inputFolder) +sectionsCoordinates = fc.readSectionCoordinates(os.path.join(baseFolder, 'preImaging', 'source_sections_mag.txt')) + +nSections = len(os.walk(inputFolder).next()[1]) +IJ.log('nSections: ' + str(nSections)) + +rawFolder = fc.mkdir_p(os.path.join(baseFolder, 'rawSections')) +preprocessedFolder = fc.mkdir_p(os.path.join(baseFolder, 'preprocessedSections')) +preprocessedMosaicsFolder = fc.mkdir_p(os.path.join(baseFolder, 'preprocessedMosaics')) +peaksFolder = fc.mkdir_p(os.path.join(baseFolder, 'peaks')) +blobizedFolder = fc.mkdir_p(os.path.join(baseFolder, 'blobizedSections')) +forCCFolder = fc.mkdir_p(os.path.join(baseFolder, 'forCCSections')) +calculationFolder = fc.mkdir_p(os.path.join(baseFolder, 'calculations')) +ccCalculationFolder = fc.mkdir_p(os.path.join(baseFolder, 'ccCalculations')) + +# calculate the offset based on first section and section template +targetLandmarksPath = os.path.join(baseFolder, 'preImaging', 'target_highres_landmarks.txt') +if os.path.isfile(targetLandmarksPath): + targetLandmarks = readPoints(targetLandmarksPath) + # targetLandmarks = [ [-point[0], point[1]] for point in targetLandmarks ] # is flip necessary for Visitron ? + sourceLandmarks = readPoints(os.path.join(baseFolder, 'preImaging', 'source_landmarks.txt')) + affWaferOverviewToLeica = fc.getModelFromPoints(sourceLandmarks, targetLandmarks).createAffine() + angleWaferOverviewToLeica = Math.atan2(affWaferOverviewToLeica.getShearY(), affWaferOverviewToLeica.getScaleY()) # in radian + IJ.log('angleWaferOverviewToLeica ' + str(angleWaferOverviewToLeica)) + refSectionOffset = angleWaferOverviewToLeica # (14.5 + 35) * PI/float(180) # maybe a 35 offset remaining ? +print refSectionOffset +print refSectionOffset * 180/float(PI) +# 8/0 + +effectiveChannels = beadChannels + ['-'.join(beadChannels)] # the raw channels plus the merged channel (no need to make all possible merge configurations, I just take the max merger) + +mosaicX = int(width * mosaicLayout[0] - (mosaicLayout[0] - 1) * (overlap/100. * width)) +mosaicY = int(height * mosaicLayout[1] - (mosaicLayout[1] - 1) * (overlap/100. * height)) +IJ.log('The mosaic dimensions is ' + str(mosaicX) + ' ; ' + str(mosaicY)) + +templateMag = fc.readSectionCoordinates(os.path.join(baseFolder, 'preImaging', 'source_tissue_mag_description.txt'))[1] + +safetyFactor = 1.2 # factor to extend the cropping box further to make sure that the section is not overcropped in case the FOV is not well centered on the section + +templateMagBBox = sectionToPoly(templateMag).getBounds() + +widthTemplateMag = templateMagBBox.width # The template mag is already turned: I can simply take the width of the bounding box +heightTemplateMag = templateMagBBox.height + +# section crop parameters used when exporting the stitched images +widthCropMag = int(min(beadsToWaferFactor * widthTemplateMag, mosaicX) * safetyFactor) # the min is used for when the mosaic is smaller than the mag area +heightCropMag = int(min(beadsToWaferFactor * heightTemplateMag, mosaicY) * safetyFactor) + +# Crop parameter used during the pairwise screening +matchingShrinkFactor = 1 # the bounding box for matching will actually be originalSize * safetyFactor * matchingShrinkFactor, currently almost 1 ... + + +if 'B6' in inputFolder: + cropForMatching = [400, 400] + # cropForMatching = [800, 800] +else: + cropForMatching = [int(beadsToWaferFactor * widthTemplateMag * matchingShrinkFactor), int(beadsToWaferFactor * heightTemplateMag * matchingShrinkFactor)] +IJ.log('cropForMatching: ' + str(cropForMatching)) + +# Crop parameter used after alignment of a match: this is the box in which the events are counted +# similarityShrinkFactor = 0.8 +similarityShrinkFactor = 0.8 + +if 'B6' in inputFolder: + cropForSimilarity = [400, 400] + cropForSimilarity = [800, 800] + cropForSimilarity = [1200, 1200] +else: + cropForSimilarity = [int(beadsToWaferFactor * widthTemplateMag * similarityShrinkFactor), int(beadsToWaferFactor * heightTemplateMag * similarityShrinkFactor)] +IJ.log('cropForSimilarity: ' + str(cropForSimilarity)) + +####################### +# 8-biting everything (optionally flipping if x-axis inversion) +####################### +firstSectionFolder = os.path.join(inputFolder, os.walk(inputFolder).next()[1][0]) +firstImagePath = os.path.join(firstSectionFolder, os.walk(firstSectionFolder).next()[2][0]) +im0 = IJ.openImage(firstImagePath) +bitDepth = im0.getBitDepth() +im0.close() +if bitDepth != 8: + IJ.log('8-biting (optionally flipping if x-axis inversion)') + downFactor, vFlip, hFlip = 1, False, False + for channel in allChannels: + theMeanMin, theMeanMax = 0, 0 + counter = 0 + imagePaths = [] + for id, sectionFolderName in enumerate(os.walk(inputFolder).next()[1]): # not using shutil.copytree as it yields an uncatchable error 20047 + sectionIndex = int(sectionFolderName.split('_')[1]) + for tileName in os.walk(os.path.join(inputFolder, sectionFolderName)).next()[2]: + if os.path.splitext(tileName)[1] == '.tif': + if ('channel_' + channel + '_') in tileName: + imagePath = os.path.join(inputFolder, sectionFolderName, tileName) + imagePaths.append(imagePath) + + im = IJ.openImage(imagePath) + stats = im.getStatistics(Measurements.MIN_MAX) + theMeanMax = theMeanMax + stats.max + theMeanMin = theMeanMin + stats.min + counter = counter + 1 + im.close() + theMeanMax = int(theMeanMax/counter * 1.1) + theMeanMin = int(theMeanMin/counter * 0.9) + IJ.log('MinMax for channel ' + str(channel) + ' is ' + str([theMeanMin, theMeanMax])) + + atomicI = AtomicInteger(0) + fc.startThreads(convertTo8Bit, fractionCores = 1, wait = 0, arguments = (atomicI, imagePaths, imagePaths, [theMeanMin, theMeanMax], downFactor, vFlip, hFlip)) +####################### + +####################### +# Creating the merged channel +####################### +mergedChannel = effectiveChannels[-1] +ic = ImageCalculator() + +if len(filter(lambda x: 'channel_' + mergedChannel + '_' in x, [filename for root, dirnames, filenames in os.walk(inputFolder) for filename in filenames])) != nSections * mosaicLayout[0] * mosaicLayout[1]: + IJ.log('Creating the merged channel ...') + for id, sectionFolderName in enumerate(os.walk(inputFolder).next()[1]): + sectionIndex = int(sectionFolderName.split('_')[1]) + for idX in range(mosaicLayout[0]): + for idY in range(mosaicLayout[1]): + imagePaths = [] # the paths of the images to merge + imsToMerge = [] # the images to merge + tileTag = str(idX).zfill(2) + '-' + str(idY).zfill(2) + for tileName in os.walk(os.path.join(inputFolder, sectionFolderName)).next()[2]: + for chan in beadChannels: + if ('channel_' + chan + '_tileId_' + tileTag) in tileName: + imagePaths.append(os.path.join(inputFolder, sectionFolderName, tileName)) + for imagePath in imagePaths: + im = IJ.openImage(imagePath) + imsToMerge.append(im) + mergedIm = imsToMerge[0] + for imToMerge in imsToMerge: + mergedIm = ic.run('Max create', mergedIm, imToMerge) + + mergedPath = imagePaths[0].replace('channel_' + beadChannels[0] + '_', 'channel_' + mergedChannel + '_') + IJ.save(mergedIm, mergedPath) + IJ.log('Merged channel created...') +##################################### +# Computing the stitching (and rotating based on section orientation) transforms for the mosaics based on the reference stitching channel (e.g. brightfield) and exporting the sections (section = stitched mosaic) +##################################### +sectionAngles = getSectionsAngles(sectionsCoordinates) +mosaicAffineTransformsPath = os.path.join(baseFolder, 'stitchingTransforms') + +worldSize = 5 * width +IJ.log('worldSize ' + str(worldSize)) +boxOffset = 2 * width +IJ.log('boxOffset ' + str(boxOffset)) + +if (not os.path.isfile(mosaicAffineTransformsPath)): + IJ.log('Computing the stitching and rotation transforms for the mosaics based on the reference stitching channel and the preImaging, respectively.') + p, loader, layerset, nLayers = fc.getProjectUtils(fc.initTrakem(baseFolder, nSections)) + layerset.setDimensions(0, 0, worldSize, worldSize) + for sectionFolderName in fc.naturalSort(os.walk(inputFolder).next()[1]): + sectionFolder = os.path.join(inputFolder, sectionFolderName) + sectionIndex = int(sectionFolderName.split('_')[1]) + IJ.log('Stitching/Rotating section ' + str(sectionIndex) + ' ...') + layer = layerset.getLayers().get(sectionIndex) + # rotationAff = AffineTransform().getRotateInstance(- sectionAngles[sectionIndex] + refSectionOffset, boxOffset + mosaicX/2., boxOffset + mosaicY/2.) + rotationAff = AffineTransform().getRotateInstance(sectionAngles[sectionIndex] + refSectionOffset, boxOffset + mosaicX/2., boxOffset + mosaicY/2.) + # rotationAff = AffineTransform().getRotateInstance(0) # no angle for debug + + if mosaicLayout != [1,1]: # stitching the layer with the stitchingChannel patches + tileConfigurationPath = os.path.join(sectionFolder, 'TileConfiguration.registered.txt') + if not os.path.isfile(tileConfigurationPath): + IJ.run('Grid/Collection stitching', 'type=[Filename defined position] order=[Defined by filename ] grid_size_x=' + str(mosaicLayout[0]) + ' grid_size_y=' + str(str(mosaicLayout[0])) + ' tile_overlap=' + str(overlap) + ' first_file_index_x=0 first_file_index_y=0 directory=' + sectionFolder + ' file_names=section_' + str(sectionIndex).zfill(4) + '_channel_' + stitchingChannel + '_tileId_{xx}-{yy}-mag.tif output_textfile_name=TileConfiguration.txt fusion_method=[Do not fuse images (only write TileConfiguration)] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap subpixel_accuracy computation_parameters=[Save computation time (but use more RAM)] image_output=[Write to disk] output_directory=' + sectionFolder) + + f = open(tileConfigurationPath, 'r') + lines = f.readlines()[4:] # trimm the heading + f.close() + + for line in lines: + imPath = os.path.join(sectionFolder, line.replace('\n', '').split(';')[0]) + x = int(float(line.replace('\n', '').split(';')[2].split(',')[0].split('(')[1])) + y = int(float(line.replace('\n', '').split(';')[2].split(',')[1].split(')')[0])) + + IJ.log('Inserting patch ' + str(imPath)) + patch = Patch.createPatch(p, imPath) + layer.add(patch) + patch.updateBucket() + patch.setLocation(x + boxOffset, y + boxOffset) + patch.updateBucket() + else: # simply inserting the [1,1] patch in the project + IJ.log('The mosaicLayout is actually only [1,1]') + imName = 'section_' + str(sectionIndex).zfill(4) + '_channel_' + stitchingChannel + '_tileId_00-00-mag.tif' + imPath = os.path.join(sectionFolder, imName) + IJ.log('Inserting patch ' + str(imPath)) + patch = Patch.createPatch(p, imPath) + patch.setLocation(boxOffset, boxOffset) + layer.add(patch) + patch.updateBucket() + + for patch in layer.getDisplayables(Patch): # it should work both with [1,1] and other mosaics + currentAff = patch.getAffineTransform() + IJ.log('currentAff' + str(currentAff)) + + currentAff.preConcatenate(rotationAff) + patch.setAffineTransform(currentAff) + + fc.writeAllAffineTransforms(p, mosaicAffineTransformsPath) + p.saveAs(os.path.join(baseFolder, 'stitchingProject.xml'),True) + fc.closeProject(p) + + with (open(mosaicAffineTransformsPath, 'r')) as f: + transforms = f.readlines() + + if stitchingChannel in beadChannels: + channelsToExport = effectiveChannels + else: + channelsToExport = effectiveChannels + [stitchingChannel] + + for channel in channelsToExport: # insert all channels with coordinates computed previously with the stitching and export + p, loader, layerset, _ = fc.getProjectUtils(fc.initTrakem(baseFolder, nSections)) + layerset.setDimensions(0, 0, worldSize, worldSize) + paths = [] + locations = [] + layers = [] + + # for i in range(0, len(transforms), 8)[:5 * mosaicLayout[0] * mosaicLayout[1]]: + for i in range(0, len(transforms), 8): + alignedPatchPath = transforms[i] + alignedPatchName = os.path.basename(alignedPatchPath) + toAlignPatchName = alignedPatchName.replace('channel_' + stitchingChannel, 'channel_' + channel) # otherwise problem with section 488 ... + toAlignPatchPath = os.path.join(os.path.dirname(alignedPatchPath), toAlignPatchName) + toAlignPatchPath = toAlignPatchPath[:-1] # why is there a trailing something !? + IJ.log('toAlignPatchPath ' + toAlignPatchPath) + + l = int(transforms[i+1]) + paths.append(toAlignPatchPath) + locations.append([0,0]) + layers.append(l) + + importFilePath = fc.createImportFile(baseFolder, paths, locations, layers = layers, name = 'channel_' + channel) + IJ.log('Inserting all patches into a trakem project ...') + task = loader.importImages(layerset.getLayers().get(0), importFilePath, '\t', 1, 1, False, 1, 0) + task.join() + layerset.setDimensions(0, 0, worldSize, worldSize) + + IJ.log('Applying the transforms to all patches ...') + for i in range(0, len(transforms), 8): + alignedPatchPath = transforms[i] + alignedPatchName = os.path.basename(alignedPatchPath) + toAlignPatchName = alignedPatchName.replace('channel_' + stitchingChannel, 'channel_' + channel) + toAlignPatchPath = os.path.join(os.path.dirname(alignedPatchPath), toAlignPatchName) + toAlignPatchPath = toAlignPatchPath[:-1] # why is there a trailing something !? + IJ.log('toAlignPatchPath ' + toAlignPatchPath) + + l = int(transforms[i+1]) + aff = AffineTransform([float(transforms[i+2]), float(transforms[i+3]), float(transforms[i+4]), float(transforms[i+5]), float(transforms[i+6]), float(transforms[i+7])]) + layer = layerset.getLayers().get(l) + patches = layer.getDisplayables(Patch) + thePatch = filter(lambda x: os.path.normpath(loader.getAbsolutePath(x)) == os.path.normpath(toAlignPatchPath), patches)[0] + thePatch.setAffineTransform(aff) + thePatch.updateBucket() + + fc.resizeDisplay(layerset) + p.saveAs(os.path.join(baseFolder, 'exportingProject' + channel + '.xml'),True) + Blending.blendLayerWise(layerset.getLayers(), True, None) + + for l, layer in enumerate(layerset.getLayers()): + # # without cropping + # cropRectangle = layerset.get2DBounds() + + # with cropping + center = [layerset.get2DBounds().width/2., layerset.get2DBounds().height/2.] + cropRectangle = Rectangle(int(center[0] - widthCropMag/2.), int(center[1] - heightCropMag/2.), widthCropMag, heightCropMag) + + im = loader.getFlatImage(layer, cropRectangle, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer.getAll(Patch), True, Color.black, None) + # IJ.save(im, os.path.join(rawFolder, 'rawStitchedSection_' + channel + '_' + str(l).zfill(4) + '.tif')) + IJ.save(im, os.path.join(rawFolder, 'stitchedRotatedSection_channel_' + channel + '_' + str(l).zfill(4) + '.tif')) + fc.closeProject(p) + + # 8/0 + +##################################### +# Create the trakem project for large wafer overview +##################################### + +sectionAngles = getSectionsAngles(sectionsCoordinates) +projectPath0 = os.path.join(baseFolder, 'waferOverviewProject_channel_' + beadChannels[0] + '.xml') + +if not os.path.isfile(projectPath0): + for channel in beadChannels: + IJ.log('Creating the trakem project for large wafer overview') + projectPath = os.path.join(baseFolder, 'waferOverviewProject_channel_' + channel + '.xml') + + p, loader, layerset, nLayers = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + layer = layerset.getLayers().get(0) + sectionPaths = [os.path.join(inputFolder, 'section_' + str(l).zfill(4), 'section_' + str(l).zfill(4) + '_channel_' + channel + '_tileId_00-00-mag.tif') for l in range(nSections)] + + maxX = 0 + maxY = 0 + + for id, sectionPath in enumerate(sectionPaths): + magCenterWafer = barycenter(sectionsCoordinates[id]) + aff = AffineTransform() + + rotationAff = AffineTransform().getRotateInstance(-refSectionOffset, width/2., height/2.) + + transX = magCenterWafer[0] * beadsToWaferFactor -width/2. + transY = magCenterWafer[1] * beadsToWaferFactor -height/2. + + translationAff = AffineTransform().getTranslateInstance(transX, transY) + + maxX = max(maxX, transX) + maxY = max(maxY, transY) + + aff.concatenate(translationAff) + aff.concatenate(rotationAff) + + # a flip needs to be introduced because there is a flip between Visitron and ZeissZ1 + flippedSectionPath = os.path.splitext(sectionPath)[0] + '_flipped' + os.path.splitext(sectionPath)[1] + + im = IJ.openImage(sectionPath) + IJ.run(im, 'Flip Horizontally', '') + IJ.save(im, flippedSectionPath) + im.close() + + IJ.log('Inserting patch ' + str(flippedSectionPath)) + patch = Patch.createPatch(p, flippedSectionPath) + layer.add(patch) + + patch.setAffineTransform(aff) + + patch.updateBucket() + + layerset.setDimensions(0, 0, maxX * 1.1, maxY * 1.1) + + p.saveAs(projectPath, True) + fc.closeProject(p) + +8/0 +##################################### +# Processing each channel +##################################### +for channel in effectiveChannels: # warning, channel is used as a global parameter to call functions +# for channel in [effectiveChannels[-1]]: # warning, channel is used as a global parameter to call functions + IJ.log('Processing channel ' + channel) + + ####################### + # Defining paths + ####################### + retrievalProjectPath = os.path.join(baseFolder, 'retrieval_Project_' + channel + '.xml') # trackem project + + SIFTMatchesPath = os.path.join(calculationFolder, 'SIFTMatches_' + channel) + SIFTMatchesPicklePath = os.path.join(calculationFolder, 'SIFTMatchesPickle_' + channel) + + CCMatchesPath = os.path.join(calculationFolder, 'CCMatches_' + channel) + CCMatchesPicklePath = os.path.join(calculationFolder, 'CCMatchesPickle_' + channel) + + CCCorrMatPath = os.path.join(calculationFolder, 'CCCorrMat_' + channel) + CCCorrMatPicklePath = os.path.join(calculationFolder, 'CCCorrMatPickle_' + channel) + + CCCorrMatPathRaw = os.path.join(calculationFolder, 'highResCorrMatRaw_' + channel) + CCCorrMatPicklePathRaw = os.path.join(calculationFolder, 'highResCorrMatPickleRaw_' + channel) + + SIFTOrderPath = os.path.join(calculationFolder, 'SIFTOrder_' + channel) + SIFTOrderPicklePath = os.path.join(calculationFolder, 'SIFTOrderPickle_' + channel) + + sumSIFTOrderPath = os.path.join(calculationFolder, 'sumSIFTOrder_' + channel) + sumSIFTOrderPicklePath = os.path.join(calculationFolder, 'sumSIFTOrderPickle_' + channel) + + CCOrderPath = os.path.join(calculationFolder, 'CCOrder_' + channel) + CCOrderPicklePath = os.path.join(calculationFolder, 'CCOrderPickle_' + channel) + + CCOrderPathRaw = os.path.join(calculationFolder, 'CCOrderRaw_' + channel) + CCOrderPicklePathRaw = os.path.join(calculationFolder, 'CCOrderPickleRaw_' + channel) + + SIFTReorderedProjectPath = os.path.join(baseFolder, 'SIFTReorderedProject_' + channel + '.xml') + CCReorderedProjectPath = os.path.join(baseFolder, 'CCReorderedProject_' + channel + '.xml') + + affineAlignedSIFTReorderedProjectPath = os.path.normpath(os.path.join(baseFolder, 'affineAlignedSIFTReorderedProject_' + channel + '.xml')) + affineAlignedCCReorderedProjectPath = os.path.join(baseFolder, 'affineAlignedCCReorderedProject_' + channel + '.xml') + elasticAlignedCCReorderedProjectPath = os.path.join(baseFolder, 'elasticAlignedCCReorderedProject_' + channel + '.xml') + + # raw paths + rawPaths = [os.path.join(rawFolder, 'stitchedRotatedSection_channel_' + str(channel) + '_' + str(id).zfill(4) + '.tif') for id in range(nSections)] + rawProjectPath = os.path.join(baseFolder, 'raw_Project_' + channel + '.xml') + SIFTReorderedRawProjectPath = os.path.join(baseFolder, 'SIFTReorderedRawProject_' + channel + '.xml') + affineAlignedRawProjectPath = os.path.join(baseFolder, 'affineAlignedRawProject_' + channel + '.xml') + + # preprocessed paths + preprocessedPaths = [os.path.join(preprocessedFolder, 'stitchedRotatedSection_channel_' + str(channel) + '_' + str(id).zfill(4) + '.tif') for id in range(nSections)] + + # blobized paths + blobizedPaths = [os.path.join(blobizedFolder, 'stitchedRotatedSection_channel_' + str(channel) + '_' + str(id).zfill(4) + '.tif') for id in range(nSections)] + blobizedProjectPath = os.path.join(baseFolder, 'blobized_Project_' + channel + '.xml') + SIFTReorderedBlobizedProjectPath = os.path.join(baseFolder, 'SIFTReorderedBlobizedProject_' + channel + '.xml') + affineAlignedBlobizedProjectPath = os.path.join(baseFolder, 'affineAlignedBlobizedProject_' + channel + '.xml') + + # forCC paths + forCCPaths = [os.path.join(forCCFolder, 'stitchedRotatedSection_channel_' + str(channel) + '_' + str(id).zfill(4) + '.tif') for id in range(nSections)] + + + # sum project paths + sumProjectPath = os.path.join(baseFolder, 'sum_Project_' + channel + '.xml') + SIFTReorderedSumProjectPath = os.path.join(baseFolder, 'SIFTReorderedSumProject_' + channel + '.xml') + affineAlignedSumProjectPath = os.path.join(baseFolder, 'affineAlignedSumProject_' + channel + '.xml') + + # sum raw project paths + sumRawProjectPath = os.path.join(baseFolder, 'sum_Raw_Project_' + channel + '.xml') + SIFTReorderedSumRawProjectPath = os.path.join(baseFolder, 'SIFTReorderedSumRawProject_' + channel + '.xml') + affineAlignedSumRawProjectPath = os.path.join(baseFolder, 'affineAlignedSumRawProject_' + channel + '.xml') + + # sum blobized project paths + sumBlobizedProjectPath = os.path.join(baseFolder, 'sum_Blobized_Project_' + channel + '.xml') + SIFTReorderedSumBlobizedProjectPath = os.path.join(baseFolder, 'SIFTReorderedSumBlobizedProject_' + channel + '.xml') + affineAlignedSumBlobizedProjectPath = os.path.join(baseFolder, 'affineAlignedSumBlobizedProject_' + channel + '.xml') + + # getting list of paths to know whether parts of the script have already been executed + # nRawCheckStitchedSections = len(filter(lambda x: 'rawStitchedSection_' + channel + '_' in x, os.listdir(rawCheckFolder))) # these sections are raw + nStitchedSections = len(filter(lambda x: 'stitchedRotatedSection_channel_' + channel + '_' in x, os.listdir(preprocessedFolder))) # these sections are preprocessed + nRawStitchedSections = len(filter(lambda x: 'stitchedRotatedSection_channel_' + channel + '_' in x, os.listdir(rawFolder))) # these sections are raw rotated + nDogs = len(filter(lambda x: 'peaks_channel_' + channel + '_' in x, os.listdir(peaksFolder))) + + stitchedSectionPaths = [os.path.join(preprocessedFolder, 'stitchedRotatedSection_channel_' + channel + '_' + str(id).zfill(4) + '.tif') for id in range(nSections)] + + nBlobized = sum([os.path.isfile(blobizedPath) for blobizedPath in blobizedPaths]) + nforCC = sum([os.path.isfile(forCCPath) for forCCPath in forCCPaths]) + + ####################### + # Copying current channel and preprocessing (imToPeak) + ####################### + if len(filter(lambda x: 'channel_' + channel + '_' in x, [filename for root, dirnames, filenames in os.walk(inputFolder) for filename in filenames])) != len(filter(lambda x: 'channel_' + channel + '_' in x, [filename for root, dirnames, filenames in os.walk(preprocessedMosaicsFolder) for filename in filenames])): # is the number of copied files the same in the destination folder ? + + IJ.log('Copying input folder for preprocessing ...') + imPaths = [] + for id, sectionFolderName in enumerate(os.walk(inputFolder).next()[1]): # not using shutil.copytree as it yields an uncatchable error 20047 + sectionIndex = int(sectionFolderName.split('_')[1]) + preprocessedSectionMosaicsFolder = fc.mkdir_p(os.path.join(preprocessedMosaicsFolder, sectionFolderName)) + IJ.log('B Copying section ' + str(id) + ' ...') + for tileName in os.walk(os.path.join(inputFolder, sectionFolderName)).next()[2]: + if 'channel_' + channel + '_' in tileName: + sourcePath = os.path.join(inputFolder, sectionFolderName, tileName) + targetPath = os.path.join(preprocessedSectionMosaicsFolder, tileName) + shutil.copyfile(sourcePath, targetPath) + imPaths.append(targetPath) + + IJ.log('Preprocessing the mosaics ...') + atomicI = AtomicInteger(0) + IJ.log('Preprocessing ...') + fc.startThreads(preprocessImToPeak, fractionCores = 1, wait = 0, arguments = (imPaths, atomicI, 50, 50, 3, True, True, 2, [120,255]) ) # + + ####################### + # Stitching, rotating (according to section orientation), and exporting the stitched preprocessed sections to single files + ####################### + # # My numbering is a bit inconsistent. Why would it happen that there are sections with names different from range(nSections) ? + + if nStitchedSections != nSections: # + IJ.log('Stitching and rotating channel ' + channel) + sectionAngles = getSectionsAngles(sectionsCoordinates) + + with (open(mosaicAffineTransformsPath, 'r')) as f: + transforms = f.readlines() + + p, loader, layerset, _ = fc.getProjectUtils(fc.initTrakem(baseFolder, nSections)) + layerset.setDimensions(0, 0, worldSize, worldSize) + paths = [] + locations = [] + layers = [] + + for i in range(0, len(transforms), 8): + alignedPatchPath = transforms[i] # D:\ThomasT\Thesis\A7\A7_REORDERING\A7_Beads_100\A7_100\section_0000\section_0000_channel_brightfield_tileId_00-00-mag.tif + alignedPatchName = os.path.basename(alignedPatchPath) # section_0000_channel_brightfield_tileId_00-00-mag.tif + sectionPrefix = alignedPatchName.split('_')[0] + '_' + alignedPatchName.split('_')[1] + + toAlignPatchPath = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(alignedPatchPath))), 'preprocessedMosaics', sectionPrefix, alignedPatchName.replace('channel_' + stitchingChannel, 'channel_' + channel)) + toAlignPatchPath = toAlignPatchPath[:-1] # why is there a trailing something !? + IJ.log('toAlignPatchPath ' + toAlignPatchPath) + + l = int(transforms[i+1]) + paths.append(toAlignPatchPath) + locations.append([0,0]) + layers.append(l) + + importFilePath = fc.createImportFile(baseFolder, paths, locations, layers = layers, name = 'channel_' + channel) + IJ.log('Inserting all patches into a trakem project ...') + task = loader.importImages(layerset.getLayers().get(0), importFilePath, '\t', 1, 1, False, 1, 0) + task.join() + layerset.setDimensions(0, 0, worldSize, worldSize) + + IJ.log('Applying the transforms to all patches ...') + for i in range(0, len(transforms), 8): + alignedPatchPath = transforms[i] + alignedPatchName = os.path.basename(alignedPatchPath) + sectionPrefix = alignedPatchName.split('_')[0] + '_' + alignedPatchName.split('_')[1] + + toAlignPatchPath = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(alignedPatchPath))), 'preprocessedMosaics', sectionPrefix, alignedPatchName.replace('channel_' + stitchingChannel, 'channel_' + channel)) + toAlignPatchPath = toAlignPatchPath[:-1] # why is there a trailing something !? + IJ.log('os.path.normpath(toAlignPatchPath) ' + os.path.normpath(toAlignPatchPath)) + + l = int(transforms[i+1]) + aff = AffineTransform([float(transforms[i+2]), float(transforms[i+3]), float(transforms[i+4]), float(transforms[i+5]), float(transforms[i+6]), float(transforms[i+7])]) + layer = layerset.getLayers().get(l) + patches = layer.getDisplayables(Patch) + IJ.log('list of patch names --- ' + str([os.path.normpath(loader.getAbsolutePath(x)) for x in patches])) + thePatch = filter(lambda x: os.path.normpath(loader.getAbsolutePath(x)).replace(os.sep + os.sep, os.sep) == os.path.normpath(toAlignPatchPath), patches)[0] + thePatch.setAffineTransform(aff) + thePatch.updateBucket() + + fc.resizeDisplay(layerset) + Blending.blendLayerWise(layerset.getLayers(), True, None) + + for l, layer in enumerate(layerset.getLayers()): + # # without cropping + # cropRectangle = layerset.get2DBounds() + + # with cropping + center = [layerset.get2DBounds().width/2., layerset.get2DBounds().height/2.] + cropRectangle = Rectangle(int(center[0] - widthCropMag/2.), int(center[1] - heightCropMag/2.), widthCropMag, heightCropMag) + + im = loader.getFlatImage(layer, cropRectangle, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer.getAll(Patch), True, Color.black, None) + # # # # # IJ.run(im, 'Invert', '') + # # # # # im = fc.minMax(im, 200, 200) + IJ.save(im, os.path.join(preprocessedFolder, 'stitchedRotatedSection_channel_' + channel + '_' + str(l).zfill(4) + '.tif')) + fc.closeProject(p) + + ####################### + # Creating base trakem project + ####################### + if not os.path.isfile(retrievalProjectPath): + IJ.log('Creating the trakEM project for channel ' + channel + ' with all stitched sections ...') + p, loader, layerset, nLayers = fc.getProjectUtils(fc.initTrakem(baseFolder, nSections)) + p.saveAs(retrievalProjectPath, True) + importFilePath = fc.createImportFile(baseFolder, stitchedSectionPaths, [[0,0]] * nSections, layers = range(nSections)) + loader.importImages(layerset.getLayers().get(0), importFilePath, '\t', 1, 1, False, 1, 0).join() + p.save() + time.sleep(5) + fc.closeProject(p) + time.sleep(5) + ####################### + # Compute all peaks in parallel + ####################### + if nDogs != nSections: + p1, loader1, layerset1, nLayers1 = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + if channel != effectiveChannels[-1]: + IJ.log('Computing all dogs in parallel') + atomicI = AtomicInteger(0) + fc.startThreads(getPeaks, fractionCores = 0, arguments = (atomicI, stitchedSectionPaths)) # to check whether ok with more than 1 core ? + threads = [] + else: # this is the merged channel, simply add the peaks from the single channels + for i in range(nSections): + allPeaks = [] + for beadChannel in beadChannels: + peaks = loader1.deserialize(os.path.join(peaksFolder, 'peaks_channel_' + beadChannel + '_' + str(i).zfill(4))) + allPeaks = allPeaks + peaks + loader1.serialize(allPeaks, os.path.join(peaksFolder, 'peaks_channel_' + channel + '_' + str(i).zfill(4))) + fc.closeProject(p1) + IJ.log('extracting dogs done') + ####################### + # Create the blobized images + ####################### + if nBlobized != nSections: + + newMosaicSize = getNewMosaicSize() + + ### Load all peaks + pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + IJ.log('Loading all peaks ...') + allPeaks = [ loaderZ.deserialize(os.path.join(peaksFolder, 'peaks_channel_' + channel + '_' + str(i).zfill(4))) for i in range(nSections)] + # allPeaks = [loaderZ.deserialize(os.path.join(peaksFolder, name)) for name in fc.naturalSort([fileName for fileName in os.listdir(peaksFolder) if ('peaks_channel_' + channel + '_') in fileName])] + IJ.log('All peaks have been loaded') + fc.closeProject(pZ) + IJ.log('allPeaks --- ' + str(len(allPeaks))) + # IJ.log('allPeaks *** ' + str(allPeaks[:])) + + for id, sectionFolderName in enumerate(os.walk(preprocessedMosaicsFolder).next()[1]): + sectionIndex = int(sectionFolderName.split('_')[1]) + blobizedPath = os.path.join(blobizedFolder, 'stitchedRotatedSection_channel_' + str(channel) + '_' + str(id).zfill(4) + '.tif') + # IJ.log('newMosaicSize --- ' + str(newMosaicSize)) + # IJ.log('allPeaks[id]' + str(allPeaks[id])) + blobizedIm = createBlobs(newMosaicSize[0], newMosaicSize[1], allPeaks[id]) + IJ.save(blobizedIm, blobizedPath) + blobizedIm.close() + del allPeaks + + # 8/0 + + # ####################### + # # Create images for CC + # ####################### + # IJ.log('forCC ?') + # if nforCC != nSections: + # newMosaicSize = getNewMosaicSize() + # for id in range(nSections): + # rawPath = os.path.join(rawFolder, 'stitchedRotatedSection_channel_' + str(channel) + '_' + str(id).zfill(4) + '.tif') + # forCCPath = os.path.join(forCCFolder, 'stitchedRotatedSection_channel_' + str(channel) + '_' + str(id).zfill(4) + '.tif') + # rawIm = IJ.openImage(rawPath) + # forCCIm = fc.normLocalContrast(rawIm, 50, 50, 3, True, True) + # forCCIm = fc.minMax(forCCIm, 150, 255) + # # IJ.run(forCCIm, 'Median...', 'radius=' + str(2)) + # forCCIm = fc.blur(forCCIm, 3) + # IJ.save(forCCIm, forCCPath) + # forCCIm.close() + + ####################### + # Compute pairwise events + ####################### + if not os.path.isfile(SIFTMatchesPath): + IJ.log('Calculating low resolution matches') + corrMat = initMat(nSections, initValue = 50000) + affineDict = {} # writing to a dic is thread safe + matchPointsDict = {} + + newMosaicSize = getNewMosaicSize() + cropBBoxMatching = crop([newMosaicSize[0], newMosaicSize[1]], cropForMatching) # used for the pairwise initial screening + cropBBoxSimilarity = crop([newMosaicSize[0], newMosaicSize[1]], cropForSimilarity) + + pairs = [] + for id1 in range(nSections): + # for id1 in [10,20,30,40,50]: + for id2 in range(id1 + 1, nSections, 1): + pairs.append([id1, id2]) + + pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + IJ.log('Loading allDogs ...') + # allPeaks = [ loaderZ.deserialize(os.path.join(peaksFolder, 'peaks_' + channel + '_' + str(i).zfill(4))) for i in range(nSections)] + allPeaks = [loaderZ.deserialize(os.path.join(peaksFolder, name)) for name in fc.naturalSort([fileName for fileName in os.listdir(peaksFolder) if ('peaks_channel_' + channel + '_') in fileName])] # sorry for whoever reads that + IJ.log('All peaks have been loaded') + + allDogs = [pointsToDogs(peaks) for peaks in allPeaks] + allCropedDogs = [pointsToDogs(cropPeaks(peaks, cropBBoxMatching)) for peaks in allPeaks] + + IJ.log('allDogs allCropedDogs allPeaks ' + str(len(allDogs)) + ', ' + str(len(allCropedDogs)) + ', ' + str(len(allPeaks))) + + atomN = AtomicInteger(0) + fc.startThreads(getEvents, fractionCores = 0.9, arguments = [atomN, pairs, allPeaks, allCropedDogs, cropBBoxSimilarity, corrMat, affineDict, matchPointsDict]) + + loaderZ.serialize([corrMat, affineDict, matchPointsDict], SIFTMatchesPath) + fc.closeProject(pZ) + pickleSave(matToList(corrMat), SIFTMatchesPicklePath) + # 8/0 + # ####################### + # # Compute CC similarity matrix + # ####################### + + # # with lock: # only one trakem working at a time + + # if not os.path.isfile(CCMatchesPath): + # pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 2)) + # lock = threading.Lock() # for operations with the trakem project pZ + + # corrMat, affineDict, matchPointsDict = loaderZ.deserialize(SIFTMatchesPath) + # IJ.log('The loaded SIFT corrMat: ' + str(corrMat)) + # ccMat = initMat(nSections, initValue = 50000) + # atom = AtomicInteger(0) + # print 'affineDict.keys()', affineDict.keys() + # IJ.log('There are ' + str(len(affineDict.keys())) + ' pairs ') + # # 8/0 + # # fc.startThreads(getCCCorrMat, fractionCores = 0, arguments = [atom, affineDict.keys(), affineDict, preprocessedPaths, ccMat]) + # # fc.startThreads(getCCCorrMat, fractionCores = 0, arguments = [atom, affineDict.keys(), affineDict, rawPaths, ccMat]) + # fc.startThreads(getCCCorrMat, fractionCores = 0, arguments = [atom, affineDict.keys(), affineDict, forCCPaths, ccMat]) + # # getHighResCorrMat(atom, affineDict.keys(), affineDict, preprocessedPaths, ccMat) + + # IJ.log('The final ccMat: ' + str(ccMat)) + # loaderZ.serialize([ccMat, affineDict, matchPointsDict], CCMatchesPath) + # pickleSave(matToList(ccMat), CCMatchesPicklePath) + + # loaderZ.serialize([ccMat, affineDict, matchPointsDict], SIFTMatchesPath) + # pickleSave(matToList(ccMat), SIFTMatchesPicklePath) + + # fc.closeProject(pZ) + # # 8/0 + ####################### + # Compute order + ####################### + if not os.path.isfile(SIFTOrderPicklePath): + IJ.log('Computing order ...') + pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + corrMat, affineDict, matchPointsDict = loaderZ.deserialize(SIFTMatchesPath) + SIFTOrder, SIFTCosts = orderFromMat(corrMat, calculationFolder, solutionName = channel) + pickleSave([SIFTOrder, SIFTCosts], SIFTOrderPicklePath) + IJ.log('The SIFT order is: ' + str(SIFTOrder)) + fc.closeProject(pZ) + # 8/0 + ####################### + # Save the corrMat as a 32-bit 0-1 image for Fiji + ####################### + corrMatImagePath = os.path.join(calculationFolder, 'corrMatImage_' + channel + '.tif') + if not os.path.isfile(corrMatImagePath): + IJ.log('Saving the corrMat as a 32-bit 0-1 image for Fiji ...') + pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + corrMat, affineDict, matchPointsDict = loaderZ.deserialize(SIFTMatchesPath) + + # symmetrize + for a in range(nSections): + for b in range(a, nSections): + corrMat[b][a] = corrMat[a][b] + + SIFTOrder = pickleLoad(SIFTOrderPicklePath)[0] + corrMat = reorderM(pythonToJamaMatrix(corrMat), SIFTOrder) + + im = ImagePlus('mm', FloatProcessor(nSections, nSections)) + ip = im.getProcessor() + theArray = ip.getFloatArray() + + theMax = 0 + for a in range(nSections): + for b in range(nSections): + val = int(corrMat.get(a, b)) + if val != 50000: + theMax = max(theMax, val) + + for x in range(nSections): + for y in range(nSections): + val = corrMat.get(x, y) + if x == y: + theArray[x][x] = 1 + elif int(val) == 50000: + theArray[x][y] = Float.NaN + else: + theArray[x][y] = 1 - float(val/float(theMax)) + + ip.setFloatArray(theArray) + IJ.save(im, corrMatImagePath) + fc.closeProject(pZ) + + ####################### + # Reorder and align project + ####################### + if not os.path.isfile(SIFTReorderedProjectPath): + IJ.log('Reordering the SIFT aligned project ...') + SIFTOrder = pickleLoad(SIFTOrderPicklePath)[0] + fc.reorderProject(retrievalProjectPath, SIFTReorderedProjectPath, SIFTOrder) + if not os.path.isfile(affineAlignedSIFTReorderedProjectPath): + IJ.log('Realigning the SIFT aligned project ...') + affineRealignProject(SIFTReorderedProjectPath, affineAlignedSIFTReorderedProjectPath, SIFTMatchesPath) + + ####################### + # Create, reorder, and align the blobized project + ####################### + if not os.path.isfile(blobizedProjectPath): # create + IJ.log('Creating the trakEM blobized project for channel ' + channel + ' with all stitched blobized sections ...') + p, loader, layerset, nLayers = fc.getProjectUtils(fc.initTrakem(baseFolder, nSections)) + p.saveAs(blobizedProjectPath, True) + importFilePath = fc.createImportFile(baseFolder, blobizedPaths, [[0,0]] * nSections, layers = range(nSections)) + loader.importImages(layerset.getLayers().get(0), importFilePath, '\t', 1, 1, False, 1, 0).join() + p.save() + time.sleep(5) + fc.closeProject(p) + time.sleep(5) + if not os.path.isfile(SIFTReorderedBlobizedProjectPath): # reorder + IJ.log('Reordering the SIFT aligned blobized project ...') + SIFTOrder = pickleLoad(SIFTOrderPicklePath)[0] + fc.reorderProject(blobizedProjectPath, SIFTReorderedBlobizedProjectPath, SIFTOrder) + if not os.path.isfile(affineAlignedBlobizedProjectPath): # align + IJ.log('Realigning the SIFT aligned blobized project ...') + affineRealignProject(SIFTReorderedBlobizedProjectPath, affineAlignedBlobizedProjectPath, SIFTMatchesPath) + + ####################### + # Create, reorder, and align the raw project + ####################### + if not os.path.isfile(rawProjectPath): # create + IJ.log('Creating the trakEM raw project for channel ' + channel + ' with all stitched raw sections ...') + p, loader, layerset, nLayers = fc.getProjectUtils(fc.initTrakem(baseFolder, nSections)) + p.saveAs(rawProjectPath, True) + importFilePath = fc.createImportFile(baseFolder, rawPaths, [[0,0]] * nSections, layers = range(nSections)) + loader.importImages(layerset.getLayers().get(0), importFilePath, '\t', 1, 1, False, 1, 0).join() + p.save() + time.sleep(5) + fc.closeProject(p) + time.sleep(5) + if not os.path.isfile(SIFTReorderedRawProjectPath): # reorder + IJ.log('Reordering the SIFT aligned raw project ...') + SIFTOrder = pickleLoad(SIFTOrderPicklePath)[0] + fc.reorderProject(rawProjectPath, SIFTReorderedRawProjectPath, SIFTOrder) + if not os.path.isfile(affineAlignedRawProjectPath): # align + IJ.log('Realigning the SIFT aligned raw project ...') + affineRealignProject(SIFTReorderedRawProjectPath, affineAlignedRawProjectPath, SIFTMatchesPath) + + +####################### +# Calculate the sum order +####################### +# # # print 555, fc +# # # pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) +# # # fc.closeProject(pZ) + +if not os.path.isfile(sumSIFTOrderPicklePath): +# if True: + IJ.log('Calculating the sumChannel event order') + pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + + # Average of the matrices + allMats = [] + for channel in beadChannels: + SIFTMatchesPath = os.path.join(calculationFolder, 'SIFTMatches_' + channel) + [corrMat, affineDict, matchPointsDict] = loaderZ.deserialize(SIFTMatchesPath) + allMats.append(corrMat) + + sumMat = matAverage(allMats) + + sumOrder, sumCosts = orderFromMat(sumMat, calculationFolder, 'sum') + pickleSave([sumOrder, sumCosts], sumSIFTOrderPicklePath) + IJ.log('The sum order is: ' + str(sumOrder)) + fc.closeProject(pZ) +# 8/0 +# # # # ####################### +# # # # # Two-steps reordering: recalculate the order comparing only the 5-neighborhood with all peaks (large crop) +# # # # ####################### +# # # # if twoStepsReordering: # + # # # # [sumOrder, sumCosts] = pickleLoad(sumSIFTOrderPicklePath) + + # # # # if 'B6' in inputFolder: + # # # # channel = '488' + # # # # SIFTMatchesPath = os.path.join(calculationFolder, 'SIFTMatches_2ndStep_' + channel) + # # # # SIFTMatchesPicklePath = os.path.join(calculationFolder, 'SIFTMatchesPickle_2ndStep_' + channel) + # # # # # SIFTOrderPicklePath = os.path.join(calculationFolder, 'SIFTOrderPickle_2ndStep_' + channel) # actually no, I can simply override the sum order + + # # # # cropBoxFactor = 3 + # # # # cropForMatching = [cropBoxFactor * cropForMatching[0], cropBoxFactor * cropForMatching[0]] # use a larger cropping window with many beads + # # # # cropForSimilarity = [cropBoxFactor * cropForSimilarity[0], cropBoxFactor * cropForSimilarity[1]] + # # # # neighborhood = 5 + + # # # # corrMat = initMat(nSections, initValue = 50000) + # # # # affineDict = {} # writing to a dic is thread safe + # # # # matchPointsDict = {} + + # # # # newMosaicSize = getNewMosaicSize() + # # # # cropBBoxMatching = crop([newMosaicSize[0], newMosaicSize[1]], cropForMatching) # used for the pairwise initial screening + # # # # cropBBoxSimilarity = crop([newMosaicSize[0], newMosaicSize[1]], cropForSimilarity) + + # # # # pairs = [] + # # # # for id1 in range(nSections): + # # # # for id2 in range(id1 + 1, min(id1 + 1 + neighborhood, nSections), 1): # /!\ make only pairs in the neighborhood + # # # # pairs.append([sumOrder[id1], sumOrder[id2]]) + + # # # # pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + # # # # IJ.log('Loading allDogs ...') + # # # # # allPeaks = [ loaderZ.deserialize(os.path.join(peaksFolder, 'peaks_' + channel + '_' + str(i).zfill(4))) for i in range(nSections)] + # # # # allPeaks = [loaderZ.deserialize(os.path.join(peaksFolder, name)) for name in fc.naturalSort([fileName for fileName in os.listdir(peaksFolder) if ('peaks_channel_' + channel + '_') in fileName])] # sorry for whoever reads that + # # # # IJ.log('All peaks have been loaded') + + # # # # allDogs = [pointsToDogs(peaks) for peaks in allPeaks] + # # # # allCropedDogs = [pointsToDogs(cropPeaks(peaks, cropBBoxMatching)) for peaks in allPeaks] + # # # # print 'cropForMatching', cropForMatching + + # # # # IJ.log('allDogs allCropedDogs allPeaks ' + str(len(allDogs)) + ', ' + str(len(allCropedDogs)) + ', ' + str(len(allPeaks))) + + # # # # atomN = AtomicInteger(0) + # # # # # fc.startThreads(getEvents, fractionCores = 0.9, arguments = [atomN, pairs, allPeaks, allCropedDogs, cropBBoxSimilarity, corrMat, affineDict, matchPointsDict]) + + # # # # # loaderZ.serialize([corrMat, affineDict, matchPointsDict], SIFTMatchesPath) + # # # # fc.closeProject(pZ) + # # # # # pickleSave(matToList(corrMat), SIFTMatchesPicklePath) + + # # # # ####################### + # # # # # Compute 2nd step order + # # # # ####################### + # # # # # if os.path.isfile(sumSIFTOrderPicklePath): + # # # # IJ.log('Computing order in 2nd step...') + # # # # pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + # # # # corrMat, affineDict, matchPointsDict = loaderZ.deserialize(SIFTMatchesPath) + # # # # SIFTOrder, SIFTCosts = orderFromMat(corrMat, calculationFolder, solutionName = channel) + # # # # pickleSave([SIFTOrder, SIFTCosts], sumSIFTOrderPicklePath) + # # # # IJ.log('The SIFT order is: ' + str(SIFTOrder)) + # # # # fc.closeProject(pZ) + +####################### +# Create, reorder, and align the sum project +####################### +if not os.path.isfile(affineAlignedSumProjectPath): + IJ.log('Creating, reordering, and aligning the sum project') + IJ.log('Creating ... ') + shutil.copyfile(os.path.join(baseFolder, 'retrieval_Project_' + effectiveChannels[-1] + '.xml'), sumProjectPath) + time.sleep(3) + p, loader, layerset, nLayers = fc.openTrakemProject(sumProjectPath) + time.sleep(3) + print p.saveAs(sumProjectPath, True) + time.sleep(3) + fc.closeProject(p) + + IJ.log('Reordering ...') + [sumOrder, sumCosts] = pickleLoad(sumSIFTOrderPicklePath) + fc.reorderProject(sumProjectPath, SIFTReorderedSumProjectPath, sumOrder) + + IJ.log('Affine aligning ...') + mergedChannelMatchesPath = os.path.join(calculationFolder, 'SIFTMatches_' + effectiveChannels[-1]) + singleChannelMatchesPaths = [os.path.join(calculationFolder, 'SIFTMatches_' + channel) for channel in beadChannels] + + affineRealignProject(SIFTReorderedSumProjectPath, affineAlignedSumProjectPath, mergedChannelMatchesPath, optionalMatchesPath = singleChannelMatchesPaths) + +####################### +# Create, reorder, and align the sum raw project (using the affine transforms of the mergedChannel) +# WARNING: it seems to hang after Creating ... +####################### +if not os.path.isfile(affineAlignedSumRawProjectPath): + IJ.log('Creating, reordering, and aligning the sum project') + IJ.log('Creating ... ') + + shutil.copyfile(os.path.join(baseFolder, 'raw_Project_' + effectiveChannels[-1] + '.xml'), sumRawProjectPath) + p, loader, layerset, nLayers = fc.openTrakemProject(sumRawProjectPath) + time.sleep(2) + p.saveAs(sumRawProjectPath, True) + time.sleep(2) + fc.closeProject(p) + time.sleep(2) + + IJ.log('Reordering ...') + [sumOrder, sumCosts] = pickleLoad(sumSIFTOrderPicklePath) + fc.reorderProject(sumRawProjectPath, SIFTReorderedSumRawProjectPath, sumOrder) + + IJ.log('Affine aligning ...') + mergedChannelMatchesPath = os.path.join(calculationFolder, 'SIFTMatches_' + effectiveChannels[-1]) + singleChannelMatchesPaths = [os.path.join(calculationFolder, 'SIFTMatches_' + channel) for channel in beadChannels] + affineRealignProject(SIFTReorderedSumRawProjectPath, affineAlignedSumRawProjectPath, mergedChannelMatchesPath, optionalMatchesPath = singleChannelMatchesPaths) + +####################### +# Create the blobized merged images (should look different compared to the merged blobized images) +####################### +if len(filter(lambda x: 'stitchedRotatedSection_Merged_' in x, os.listdir(blobizedFolder))) != nSections: + IJ.log('Create the blobized merged images') + newMosaicSize = getNewMosaicSize() + pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + for l in range(nSections): + mergedBeads = [] + for channel in beadChannels: + mergedBeads = mergedBeads + loaderZ.deserialize(os.path.join(peaksFolder, 'peaks_channel_' + str(channel) + '_' + str(l).zfill(4))) + # IJ.log('mergedBeads --- ' + str(mergedBeads)) + + blobizedPath = os.path.join(blobizedFolder, 'stitchedRotatedSection_Merged_' + str(effectiveChannels[-1]) + '_' + str(l).zfill(4) + '.tif') + blobizedIm = createBlobs(newMosaicSize[0], newMosaicSize[1], mergedBeads) + IJ.save(blobizedIm, blobizedPath) + blobizedIm.close() + fc.closeProject(pZ) + del mergedBeads + +####################### +# Compute the consecutive affine transforms on the sumReordered merged blobized images +####################### +sumAffineFolder = fc.mkdir_p(os.path.join(baseFolder, 'sumAffine')) +if len(os.listdir(sumAffineFolder)) != (nSections-1): + IJ.log('Compute the affine transforms on the sumReordered merged blobized images') + [sumOrder, sumCosts] = pickleLoad(sumSIFTOrderPicklePath) + pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(baseFolder, 1)) + + for i, index1 in enumerate(sumOrder[:-1]): + index2 = sumOrder[i+1] + + mergedBeads1 = [] + mergedBeads2 = [] + for channel in beadChannels: + mergedBeads1 = mergedBeads1 + loaderZ.deserialize(os.path.join(peaksFolder, 'peaks_channel_' + str(channel) + '_' + str(index1).zfill(4))) + mergedBeads2 = mergedBeads2 + loaderZ.deserialize(os.path.join(peaksFolder, 'peaks_channel_' + str(channel) + '_' + str(index2).zfill(4))) + + dogsContainer = ArrayList() + dogs1 = pointsToDogs(mergedBeads1) + dogs2 = pointsToDogs(mergedBeads2) + dogsContainer.add(dogs1) + dogsContainer.add(dogs2) + comparePairs = Matching.descriptorMatching(dogsContainer, 2, dp, 0) + inliers = comparePairs[0].inliers + affineT = AffineTransform() + affineT.setToIdentity() + + if len(inliers) > 0: # it should because it is sum reordered + affineT = comparePairs[0].model.createAffine().createInverse() + else: + IJ.log('*** WARNING *** No matching found in the separately merged channel for pair (' + str(index1) + ',' + str(index2) + ')') + # is there a matching in the single channels ? + # lowResMatchesPickle_488-546 # the matches have been saved, should be easy to look it up + # if not, try to loosen the matching parameters + + thereIsATransform = False + for channel in reversed(effectiveChannels): # reversed to start with the merged channel which is likely to have a better transform + if not thereIsATransform: + SIFTMatchesPath = os.path.join(calculationFolder, 'SIFTMatches_' + channel) + corrMat, affineDict, matchPointsDict = loaderZ.deserialize(SIFTMatchesPath) + if (index1, index2) in affineDict: + IJ.log(str((index1, index2)) + ' found in channel ' + str(channel)) + thereIsATransform = True + affineT = affineDict[(index1, index2)].createInverse() + elif (index2, index1) in affineDict: + IJ.log(str((index2, index1)) + ' found in channel ' + str(channel)) + thereIsATransform = True + affineT = affineDict[(index2, index1)] + if not thereIsATransform: #still no transform found + IJ.log('*** Urg *** Absolutely no matching found for pair (' + str(index1) + ',' + str(index2) + '). A manual affine would be needed.') + + loaderZ.serialize(affineT, os.path.join(sumAffineFolder, 'sumAffine_' + str(i).zfill(4))) # write a transform in any case, even if no match is found, in which case an identity is written + + fc.closeProject(pZ) +####################### +# Create sumAffineRealigned projects +####################### +[sumOrder, sumCosts] = pickleLoad(sumSIFTOrderPicklePath) +consecAffineTransformPaths = [os.path.join(sumAffineFolder, 'sumAffine_' + str(l).zfill(4)) for l in range(nSections-1)] +for channel in effectiveChannels: + projectPath = os.path.join(baseFolder, 'sumAffineRealignedRaw_' + channel + '.xml') + orderedImagePaths = [os.path.join(rawFolder, 'stitchedRotatedSection_channel_' + channel + '_' + str(sumOrder[l]).zfill(4) + '.tif') for l in range(nSections)] + if not os.path.isfile(projectPath): + sumAffineProject(projectPath, orderedImagePaths, consecAffineTransformPaths) + +for channel in effectiveChannels: + projectPath = os.path.join(baseFolder, 'sumAffineRealignedBlobized_' + channel + '.xml') + orderedImagePaths = [os.path.join(blobizedFolder, 'stitchedRotatedSection_channel_' + channel + '_' + str(sumOrder[l]).zfill(4) + '.tif') for l in range(nSections)] + if not os.path.isfile(projectPath): + sumAffineProject(projectPath, orderedImagePaths, consecAffineTransformPaths) + +for channel in effectiveChannels: + projectPath = os.path.join(baseFolder, 'sumAffineRealigned_' + channel + '.xml') + orderedImagePaths = [os.path.join(preprocessedFolder, 'stitchedRotatedSection_channel_' + channel + '_' + str(sumOrder[l]).zfill(4) + '.tif') for l in range(nSections)] + if not os.path.isfile(projectPath): + sumAffineProject(projectPath, orderedImagePaths, consecAffineTransformPaths) + +IJ.log('SOR has run entirely.') \ No newline at end of file diff --git a/alignElastic_EM.py b/alignElastic_EM.py new file mode 100644 index 0000000..fc31d73 --- /dev/null +++ b/alignElastic_EM.py @@ -0,0 +1,159 @@ +#align rigid least square affine for a TrakEM project +from __future__ import with_statement +import ij +from ij import IJ +from ij import Macro +import os, time +# import subprocess +import fijiCommon as fc + +from mpicbg.trakem2.align import ElasticLayerAlignment +from java.awt.geom import AffineTransform +from java.awt import Rectangle +from java.util import HashSet +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch + +from ini.trakem2.utils import Filter + +namePlugin = 'alignElastic_EM' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +MagCParameters = fc.readMagCParameters(MagCFolder) + +preProjectPath = fc.cleanLinuxPath(os.path.join(MagCFolder, 'MagC_EM', 'ToBeElasticAlignedEMProject.xml')) +projectPath = fc.cleanLinuxPath(os.path.join(MagCFolder, 'MagC_EM', 'ElasticAlignedEMProject.xml')) +saveExistingProjectInCasePath = fc.cleanLinuxPath(os.path.join(MagCFolder, 'MagC_EM', 'ElasticAlignedEMProject_ReplacedByLastRun.xml')) + +IJ.log('preProjectPath :' + str(preProjectPath)) + +# check whether the pipeline had been interrupted +if os.path.isfile(preProjectPath): + IJ.log('preProjectPath exists: ' + str(preProjectPath)) + if os.path.isfile(projectPath): + IJ.log('projectPath exists: ' + str(projectPath)) + if os.path.isfile(saveExistingProjectInCasePath): + os.remove(saveExistingProjectInCasePath) + shutil.copyfile(projectPath, saveExistingProjectInCasePath) + os.remove(projectPath) + os.rename(preProjectPath, projectPath) # to have consistency in file naming when the pipeline is interrupted +nLayers = len(os.listdir(os.path.join(MagCFolder, 'MagC_EM', 'export_stitchedEMForAlignment'))) + +layerOverlap = MagCParameters[namePlugin]['layerOverlap'] +nLayersAtATime = MagCParameters[namePlugin]['nLayersAtATime'] + +currentLayerPath = os.path.join(os.path.dirname(projectPath), 'currentLayer_' + namePlugin + '.txt') +currentLayer = fc.incrementCounter(currentLayerPath, increment = (nLayersAtATime - layerOverlap)) # increment = 1 + + +# # # # TO DELETE UNLESS PARALLEL ELASTIC NEEDED AT SOME POINT +# # # # pluginsFolder = IJ.getDirectory('plugins') +# # # # l = 0 +# # # # fijiPath = os.path.join(IJ.getDirectory('imagej'), 'ImageJ-win64.exe') +# # # # pluginPath = os.path.join(pluginsFolder, 'alignElastic_EMBash.bsh') +# # # # IJ.log('WARNING should first allow multiple fiji instances') +# # # # while l < nLayers: + # # # # command = fijiPath + \ + # # # # " -Dl1=" + str(l) + \ + # # # # " -Dl2=" + str(min(l + nLayersAtATime, nLayers - 1)) + \ + # # # # " -Dprojectpath=" + projectPath + \ + # # # # " -- " + pluginPath + # # # # IJ.log(str(command)) + # # # # result = subprocess.call(command, shell=True) + # # # # l = l + (nLayersAtATime - layerOverlap) +# # # # fc.terminatePlugin(namePlugin, MagCFolder) + +############################ +# Transformation parameters +############################ +p = {} + +# Block Matching +p[27] = ['layerScale', 0.2] +p[40] = ['searchRadius', 100] +p[25] = ['blockRadius', 500] +p[38] = ['resolutionSpringMesh', 32] + +# Correlation filters +p[37] = ['minR', 0.1] +p[30] = ['maxCurvatureR', 100] +p[39] = ['rodR', 1] + +# Local smoothness filter +# p[42] = ['useLocalSmoothnessFilter', False] +p[42] = ['useLocalSmoothnessFilter', True] +p[28] = ['localModelIndex', 3] +p[29] = ['localRegionSigma', 1000] +p[32] = ['maxLocalEpsilon', 1000] +p[33] = ['maxLocalTrust', 100] + +# Miscellaneous +p[12] = ['isAligned', True] +# p[16] = ['maxNumNeighbors', 2] # should be changed for larger stacks +p[16] = ['maxNumNeighbors', 1] # should be changed for larger stacks + +# Approximate optimizer +p[9] = ['desiredModelIndex', 3] # unsure +p[10] = ['expectedModelIndex', 3] # unsure + +p[14] = ['maxIterationsOptimize', 1000] +p[18] = ['maxPlateauwidthOptimize', 200] + +# Spring mesh +p[41] = ['stiffnessSpringMesh', 0.1] +p[36] = ['maxStretchSpringMesh', 2000] +p[31] = ['maxIterationsSpringMesh', 1000] +p[34] = ['maxPlateauwidthSpringMesh', 200] +p[35] = ['useLegacyOptimizer', True] + + +p[0] = ['SIFTfdBins',8] +p[1] = ['SIFTfdSize', 8] +p[2] = ['SIFTinitialSigma', 1.6] +p[3] = ['SIFTmaxOctaveSize', 1024] +p[4] = ['SIFTminOctaveSize', 64] +p[5] = ['SIFTsteps', 1] +p[6] = ['clearCache', True] +p[7] = ['maxNumThreadsSift', 56] +p[8] = ['rod', 0.92] +p[11] = ['identityTolerance', 1] +p[13] = ['maxEpsilon', 30] +p[15] = ['maxNumFailures', 3] +p[17] = ['maxNumThreads', 56] # should be changed for workstations (or automatically infered ?) +p[19] = ['minInlierRatio', 0] +p[20] = ['minNumInliers', 12] +p[21] = ['multipleHypotheses', False] +p[22] = ['widestSetOnly', False] +p[23] = ['rejectIdentity', False] +p[24] = ['visualize', False] + +p[26] = ['dampSpringMesh', 0.9] #/!\ attention, value copied from internet, not tested + +params = ElasticLayerAlignment.Param( *[ a[1] for a in [p[i] for i in range(len(p))] ] ) + +IJ.log('4. Opening the real scale EM project for elastic alignment') +project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) + +IJ.log('4. Starting alignment') +IJ.log('The layer range is: ' + str(currentLayer) + '-' + str(min(currentLayer + nLayersAtATime - 1, nLayers -1)) ) +IJ.log('The fixed layers are: ' + str(0) + '-' + str(currentLayer - layerOverlap + 1)) + +layerRange = layerset.getLayers(currentLayer, min(currentLayer + nLayersAtATime - 1, nLayers -1)) # nLayers -1 because starts at 0 +fixedLayers = HashSet(layerset.getLayers(0, currentLayer - layerOverlap + 1)) +emptyLayers = HashSet() +propagateTransformBefore = False +propagateTransformAfter = False +thefilter = None + +ElasticLayerAlignment().exec(params, project, layerRange, fixedLayers, emptyLayers, layerset.get2DBounds(), propagateTransformBefore, propagateTransformAfter, thefilter) +# ElasticLayerAlignment().exec(paramElastic, project, layerRange, fixedLayers, emptyLayers, layerset.get2DBounds(), propagateTransformBefore, propagateTransformAfter, thefilter) +IJ.log('Elastic layer alignment is done') +fc.resizeDisplay(layerset) +time.sleep(2) +project.save() +time.sleep(2) + +fc.closeProject(project) + +fc.shouldRunAgain(namePlugin, currentLayer, nLayers, MagCFolder, '', increment = (nLayersAtATime - layerOverlap)) diff --git a/alignRigid_EM.py b/alignRigid_EM.py new file mode 100644 index 0000000..fcdf7c3 --- /dev/null +++ b/alignRigid_EM.py @@ -0,0 +1,101 @@ +#align rigid least square affine for a TrakEM project +from __future__ import with_statement +from ij import IJ, Macro, WindowManager +import os, time +import fijiCommon as fc +# from mpicbg.trakem2.align import RegularizedAffineLayerAlignment + +from register_virtual_stack import Transform_Virtual_Stack_MT +from register_virtual_stack import Register_Virtual_Stack_MT + +from java.awt.geom import AffineTransform +from java.awt import Rectangle +from java.util import HashSet, ArrayList +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from java.util.concurrent.atomic import AtomicInteger + +namePlugin = 'alignRigid_EM' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) +MagC_EM_Folder = os.path.join(MagCFolder, 'MagC_EM','') +MagCParameters = fc.readMagCParameters(MagCFolder) + + +inputFolder = fc.findFoldersFromTags(MagCFolder, ['export_stitchedEMForAlignment'])[0] +imagePaths = filter(lambda x: os.path.splitext(x)[1] == '.tif', fc.naturalSort([os.path.join(inputFolder, x) for x in os.listdir(inputFolder)])) + +regParams = Register_Virtual_Stack_MT.Param() +regParams.minInlierRatio = 0 +regParams.registrationModelIndex = 3 # 1-Rigid, 2-Similarity, 3-Affine +regParams.featuresModelIndex = 3 + +exportForRigidAlignmentFolder = inputFolder +resultRigidAlignmentFolder = fc.mkdir_p(os.path.join(MagC_EM_Folder, 'resultRigidAlignment')) + +################################################ +# rigid alignment outside trakem2 with register virtual stack plugin because bug in trakem2 +transformsPath = os.path.join(MagC_EM_Folder, 'rigidAlignmentTransforms_' + namePlugin + '.txt') +referenceName = fc.naturalSort(os.listdir(exportForRigidAlignmentFolder))[0] +use_shrinking_constraint = 0 +IJ.log('Rigid alignment with register virtual stack') +Register_Virtual_Stack_MT.exec(exportForRigidAlignmentFolder, resultRigidAlignmentFolder, resultRigidAlignmentFolder, referenceName, regParams, use_shrinking_constraint) +time.sleep(2) +# IJ.getImage().close() +WindowManager.closeAllWindows() +IJ.log('Rigid Alignment done') +################################################ + +########################################### +IJ.log('Aligning the lowEM with the new rigid transforms') +projectPath = fc.cleanLinuxPath(fc.findFilesFromTags(MagCFolder,['EMProject_'])[0]) # this is the low res EM +project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) # the low res EM +for l, layer in enumerate(layerset.getLayers()): + transformPath = os.path.join(resultRigidAlignmentFolder, 'stitchedDownsampledEM_' + str(l).zfill(4) + '.xml') + theList = ArrayList(HashSet()) + aff = fc.getAffFromRVSTransformPath(transformPath) + + for patch in layer.getDisplayables(Patch): + patch.setLocation(patch.getX(), patch.getY()) # compensate for the extracted bounding box + currentAff = patch.getAffineTransform() + currentAff.preConcatenate(aff) + patch.setAffineTransform(currentAff) + +fc.resizeDisplay(layerset) +project.save() +fc.closeProject(project) +IJ.log('All LM layers have been aligned in: ' + projectPath) + +time.sleep(1) + +# High EM +################################################ +IJ.log('Aligning the highEM with the new rigid transforms') +downsamplingFactor = MagCParameters['downsample_EM']['downsamplingFactor'] +IJ.log('The LM/EM pixel scale factor is ' + str(downsamplingFactor)) + +projectPath = fc.cleanLinuxPath(os.path.join(MagCFolder, 'MagC_EM', 'EMProject.xml')) # the high res EM project + +afterProjectPath = fc.cleanLinuxPath(projectPath.replace('EMProject.', 'ToBeElasticAlignedEMProject.')) +IJ.log('afterProjectPath ' + str(afterProjectPath)) + +project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) + +for l, layer in enumerate(layerset.getLayers()): + transformPath = os.path.join(resultRigidAlignmentFolder, 'stitchedDownsampledEM_' + str(l).zfill(4) + '.xml') + aff = fc.getAffFromRVSTransformPath(transformPath) + # aff.scale(float(downsamplingFactor), float(downsamplingFactor)) # cannot be used because it is not a simple scaling + aff = AffineTransform(aff.getScaleX(), aff.getShearY(), aff.getShearX(), aff.getScaleY(), aff.getTranslateX() * float(downsamplingFactor), aff.getTranslateY() * float(downsamplingFactor)) + + for patch in layer.getDisplayables(Patch): + currentAff = patch.getAffineTransform() + currentAff.preConcatenate(aff) + patch.setAffineTransform(currentAff) +IJ.log('The real EM project is now rigidly aligned. Saving and closing the project.') +fc.resizeDisplay(layerset) +project.save() +project.saveAs(afterProjectPath, True) +fc.closeProject(project) +time.sleep(2) + +fc.terminatePlugin(namePlugin, MagCFolder) diff --git a/alignRigid_LM.py b/alignRigid_LM.py new file mode 100644 index 0000000..abf3e6e --- /dev/null +++ b/alignRigid_LM.py @@ -0,0 +1,55 @@ +from ij import IJ +from ij import Macro +import os +import time +import fijiCommon as fc +from mpicbg.trakem2.align import RegularizedAffineLayerAlignment +from java.awt.geom import AffineTransform +from java.awt import Rectangle +from java.util import HashSet +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch + +from register_virtual_stack import Register_Virtual_Stack_MT + +namePlugin = 'alignRigid_LM' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +# get mosaic size +MagCParameters = fc.readMagCParameters(MagCFolder) +executeAlignment = MagCParameters[namePlugin]['executeAlignment'] +boxFactor = MagCParameters[namePlugin]['boxFactor'] # e.g. 0.5, use only the center part of the layer to compute alignment: 0.5 divides the x and y dimensions by 2 + +projectPath = fc.findFilesFromTags(MagCFolder,['LMProject'])[0] + +# alignment parameters +regParams = Register_Virtual_Stack_MT.Param() +regParams.minInlierRatio = 0 +regParams.registrationModelIndex = 1 +regParams.featuresModelIndex = 1 + +regParams.sift.fdBins = 8 +regParams.sift.fdSize = 4 +regParams.sift.initialSigma = 1.6 +regParams.sift.maxOctaveSize = 1024 +regParams.sift.minOctaveSize = 64 +regParams.sift.steps = 6 + +regParams.interpolate = True +regParams.maxEpsilon = 25 +regParams.minInlierRatio = 0 +regParams.rod = 0.92 + +# perform alignment +if executeAlignment: + fc.rigidAlignment(projectPath, regParams, name = namePlugin, boxFactor = boxFactor) + +# open the project and save all transforms of all tiles in all sections +IJ.log('Saving the coordinates transforms of each patch of each layer') +project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) +transformsPath = os.path.join(os.path.dirname(projectPath), namePlugin + 'transforms.txt') +fc.writeAllAffineTransforms(project,transformsPath) +fc.closeProject(project) + +fc.terminatePlugin(namePlugin, MagCFolder) diff --git a/assembly_EM.py b/assembly_EM.py new file mode 100644 index 0000000..95ca5a6 --- /dev/null +++ b/assembly_EM.py @@ -0,0 +1,92 @@ +#this script puts the acquired EM tiles into Trakem at the right positions +from __future__ import with_statement +import os, re, errno, string, shutil, time + +from java.awt.event import MouseAdapter, KeyEvent, KeyAdapter +from jarray import zeros, array +from java.util import HashSet, ArrayList +from java.awt.geom import AffineTransform +from java.awt import Color + +import ij +from ij import IJ, Macro +from fiji.tool import AbstractTool +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch, Display +from ini.trakem2.utils import Utils +from mpicbg.trakem2.align import Align, AlignTask + +import fijiCommon as fc + +namePlugin = 'assembly_EM' +MagCFolder = fc.startPlugin(namePlugin) +MagCParameters = fc.readMagCParameters(MagCFolder) + +ControlWindow.setGUIEnabled(False) +MagC_EM_Folder = os.path.join(MagCFolder, 'MagC_EM','') + +# reading transforms from the low-resolution montage +IJ.log('reading the affine transform parameters from the affine montage') +transformsPath = os.path.join(MagC_EM_Folder, 'assembly_lowEM_Transforms.txt') +with (open(transformsPath, 'r')) as f: + transforms = f.readlines() +nLayers = max([int(transforms[i]) for i in range(1, len(transforms),8) ]) + 1 # why not, could simply read the EM_Metadata parameters ... + +# getting downsamplingFactor +downsamplingFactor = MagCParameters['downsample_EM']['downsamplingFactor'] +factorString = str(int(1000000*downsamplingFactor)).zfill(8) + +# create the normal resolution project +IJ.log('Creating project with the full size EM images') +projectPath = fc.cleanLinuxPath(os.path.join(MagC_EM_Folder,'EMProject.xml')) +project, loader, layerset, _ = fc.getProjectUtils(fc.initTrakem(fc.cleanLinuxPath(os.path.dirname(projectPath)), 1)) +project.saveAs(projectPath, True) + +# Assembling all images in the project with the transforms computed on the low res and adjusted with the scale factor +IJ.log('Assembling all images in the project with the transforms computed on the low res and adjusted with the scale factor') +paths = [] +locations = [] +layers = [] +for i in range(0, len(transforms), 8): + alignedPatchPath = transforms[i] + alignedPatchName = os.path.basename(alignedPatchPath) + toAlignPatchName = alignedPatchName.replace('_' + factorString, '').replace('_resized', '') + toAlignPatchPath = os.path.join(MagCFolder, 'EMData', os.path.basename(os.path.dirname(alignedPatchPath)), toAlignPatchName) + toAlignPatchPath = fc.cleanLinuxPath(toAlignPatchPath[:-1]) # mysterious trailing character ... + IJ.log('toAlignPatchPath ' + toAlignPatchPath) + l = int(transforms[i+1]) + paths.append(toAlignPatchPath) + locations.append([0,0]) + layers.append(l) + +importFilePath = fc.createImportFile(MagC_EM_Folder, paths, locations, layers = layers, name = namePlugin) + +# insert the tiles in the project +IJ.log('I am going to insert many files at factor ' + str(downsamplingFactor) + ' ...') +task = loader.importImages(layerset.getLayers().get(0), importFilePath, '\t', 1, 1, False, 1, 0) +task.join() + +# apply the transforms +for i in range(0, len(transforms), 8): + alignedPatchPath = transforms[i] + alignedPatchName = os.path.basename(alignedPatchPath) + toAlignPatchName = alignedPatchName.replace('_' + factorString, '').replace('_resized', '') + toAlignPatchPath = os.path.join(MagCFolder, 'EMData', os.path.basename(os.path.dirname(alignedPatchPath)), toAlignPatchName) + toAlignPatchPath = toAlignPatchPath[:-1] # why is there a trailing something !? + if toAlignPatchPath[:2] == os.sep + os.sep: + toAlignPatchPath = toAlignPatchPath[1:] + IJ.log('toAlignPatchPath ' + toAlignPatchPath) + l = int(transforms[i+1]) + aff = AffineTransform([float(transforms[i+2]), float(transforms[i+3]), float(transforms[i+4]), float(transforms[i+5]), float(transforms[i+6])*float(downsamplingFactor), float(transforms[i+7])*float(downsamplingFactor) ]) + layer = layerset.getLayers().get(l) + patches = layer.getDisplayables(Patch) + thePatch = filter(lambda x: os.path.normpath(loader.getAbsolutePath(x)) == os.path.normpath(toAlignPatchPath), patches)[0] + thePatch.setAffineTransform(aff) + thePatch.updateBucket() + +time.sleep(2) +fc.resizeDisplay(layerset) +time.sleep(2) +project.save() +fc.closeProject(project) +fc.terminatePlugin(namePlugin, MagCFolder) diff --git a/assembly_LM.py b/assembly_LM.py new file mode 100644 index 0000000..777d795 --- /dev/null +++ b/assembly_LM.py @@ -0,0 +1,162 @@ +from __future__ import with_statement +from ij import IJ +from ij import Macro +import os, time +import fijiCommon as fc +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from java.util.concurrent.atomic import AtomicInteger +from ij.measure import Measurements + +def contrastImage(): + while atomicI.get() < nPaths: + k = atomicI.getAndIncrement() + if k < nPaths: + im = IJ.openImage(toContrastPaths[k][0]) + im = fc.normLocalContrast(im, normLocalContrastSize, normLocalContrastSize, 3, True, True) + IJ.save(im,toContrastPaths[k][1]) + im.close() + +namePlugin = 'assembly_LM' +MagCFolder = fc.startPlugin(namePlugin) + +ControlWindow.setGUIEnabled(False) + +# get some parameters +MagCParameters = fc.readMagCParameters(MagCFolder) +normLocalContrastSize = MagCParameters[namePlugin]['normLocalContrastSize'] # size of the neighborhood for local contrast for the brightfield channel +overlap = MagCParameters[namePlugin]['overlap'] # overlap between tiles, typically 0.1 +refChannelIdentifier = str(MagCParameters[namePlugin]['refChannelIdentifier']) + +normLocalContrastSizeFluo = MagCParameters[namePlugin]['normLocalContrastSizeFluo'] # for contrasting the fluo channels +minMaxFluo = MagCParameters[namePlugin]['minMaxFluo'] # for thresholding the fluo channels +flipHorizontally = MagCParameters[namePlugin]['flipHorizontally'] # flip horizontally the LM tiles + +# initialize folders +LMDataFolder = os.path.join(MagCFolder, 'LMDataReordered') +LMFolder = fc.mkdir_p(os.path.join(MagCFolder, 'MagC_LM')) + +# read metadata +LMMetadataPath = os.path.join(LMDataFolder, 'LM_Metadata.txt') +width, height, nChannels, xGrid, yGrid, scaleX, scaleY, channels = fc.readSessionMetadata(MagCFolder) + +# get reference channel name +IJ.log('The reference channel identifier is: ' + refChannelIdentifier) +refChannel = filter(lambda x: refChannelIdentifier in x, channels)[0] +IJ.log('channels ' + str(channels)) +IJ.log('refChannel: ' + refChannel) + + +# Preprocess the reference brightfield channel: 8-bit with mean sensible range pulled from all images +# find thresholding range +IJ.log('Reading all images of the channel to find a good intensity range: ' + str(refChannel) ) +mins, maxs, imPaths = [], [], [] +for (dirpath, dirnames, filenames) in os.walk(LMDataFolder): + for filename in filenames: + if (os.path.splitext(filename)[1] == '.tif') and (refChannel in filename): + IJ.log('Reading to determine 8-biting thresholding range: ' + filename) + imPath = os.path.join(dirpath, filename) + imPaths.append(imPath) + im = IJ.openImage(imPath) + stats = im.getStatistics(Measurements.MIN_MAX) + mins.append(stats.min) + maxs.append(stats.max) +meanMin = sum(mins)/float(len(mins)) +meanMax = sum(maxs)/float(len(maxs)) +IJ.log('The channel min/max is ' + str(meanMin) + ' - ' + str(meanMax)) +# apply thresholding +for imPath in imPaths: + im = IJ.openImage(imPath) + im = fc.minMax(im, meanMin, meanMax) + IJ.run(im, '8-bit', '') + IJ.run(im, 'Flip Horizontally', '') # {Leica DMI, NikonTiEclipse} to Merlin + IJ.save(im, imPath) + IJ.log('Image ' + imPath + ' thresholded 8-bited') + +# process all other channels that are not refChannel: normLocalContrast, threshold, 8-bit +for (dirpath, dirnames, filenames) in os.walk(LMDataFolder): + for filename in filenames: + if (os.path.splitext(filename)[1] == '.tif') and not(refChannel in filename): + imPath = os.path.join(dirpath, filename) + im = IJ.openImage(imPath) + im = fc.normLocalContrast(im, normLocalContrastSizeFluo, normLocalContrastSizeFluo, 3, True, True) + im = fc.minMax(im, minMaxFluo[0], minMaxFluo[1]) + IJ.run(im, '8-bit', '') + if flipHorizontally: + IJ.run(im, 'Flip Horizontally', '') # Leica DMI to Merlin + IJ.save(im, imPath) + IJ.log('Image ' + imPath + ' processed') + +# add a contrasted reference channel (e.g., contrast the brightfield channel) +IJ.log('Adding a contrasted channel') +contrastedChannel = 'contrasted' + refChannel +channels.append(contrastedChannel) +toContrastPaths = [] + +for (dirpath, dirnames, filenames) in os.walk(LMDataFolder): + for filename in filenames: + IJ.log('ToContrast: ' + str(filename)) + if (os.path.splitext(filename)[1] == '.tif') and (refChannel in filename): + imagePath = os.path.join(dirpath, filename) + contrastedPath = os.path.join(dirpath, filename.replace(refChannel, contrastedChannel) ) + toContrastPaths.append([imagePath, contrastedPath]) +IJ.log('toContrastPaths : ' + str(toContrastPaths)) +nPaths = len(toContrastPaths) +atomicI = AtomicInteger(0) +fc.startThreads(contrastImage) + +# Update metadata with the new contrasted channel +f = open(LMMetadataPath, 'r') +lines = f.readlines() +for idLine, line in enumerate(lines): + if 'nChannels' in line: + lines[idLine] = 'nChannels = ' + str(nChannels + 1) + if 'channels' in line: + lines[idLine] = 'channels = [' + ','.join( map(lambda x: "'" + x + "'", channels) ) + ']' +f.close() +f = open(LMMetadataPath, 'w') +for line in lines: + f.write(line + '\n') +f.close() +IJ.log('addContrastedChannel done') + +# Create LM project with the contrastedChannel +nLayers = len(next(os.walk(LMDataFolder))[1]) +IJ.log('nLayers is ' + str(nLayers)) +IJ.log('Creating trakem project "LMProject" ') + +project, loader, layerset, nLayers = fc.getProjectUtils(fc.initTrakem(LMFolder, nLayers)) +projectPath = os.path.join(os.path.normpath(LMFolder) , 'LMProject.xml') +project.saveAs(projectPath, True) + +# determining tiles locations taking into account the overlap +paths, locations, layers = [], [], [] +widthEffective = int((1-overlap) * width) +heightEffective = int((1-overlap) * height) + +for channel in [contrastedChannel]: + IJ.log('Assembling LM sections from all layers') + for l in range(nLayers): + IJ.log('Each section consists of ' + str(xGrid) + ' x ' + str(yGrid) + ' patches') + for y in range(yGrid): + for x in range(xGrid): + sectionFolder = os.path.join(LMDataFolder, 'section_' + str(l).zfill(4)) + patchName = 'section_' + str(l).zfill(4) + '_channel_' + channel + '_tileId_' + str(x).zfill(2) + '-' + str(y).zfill(2) + '-tissue.tif' + patchPath = os.path.join(sectionFolder, patchName) + paths.append(patchPath) + locations.append([x*widthEffective, y*heightEffective]) + layers.append(l) +# import all tiles +importFile = fc.createImportFile(LMFolder, paths, locations, layers = layers) +task = loader.importImages(layerset.getLayers().get(0), importFile, '\t', 1, 1, False, 1, 0) +task.join() + +# resize display and save +fc.resizeDisplay(layerset) +time.sleep(5) +project.save() +time.sleep(1) +fc.closeProject(project) +IJ.log('Assembling the LM project done and saved into ' + projectPath) + +fc.terminatePlugin(namePlugin, MagCFolder) diff --git a/assembly_LMProjects.py b/assembly_LMProjects.py new file mode 100644 index 0000000..b921d60 --- /dev/null +++ b/assembly_LMProjects.py @@ -0,0 +1,90 @@ +from __future__ import with_statement + +from mpicbg.imglib.algorithm.correlation import CrossCorrelation +from mpicbg.imglib.image import ImagePlusAdapter + +from ij import IJ, Macro, ImagePlus +from ij.process import ByteProcessor +import os, shutil, time +from java.awt import Rectangle, Color +from java.util import HashSet, ArrayList + +from java.awt.geom import AffineTransform +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from bunwarpj.bUnwarpJ_ import computeTransformationBatch, elasticTransformImageMacro +from bunwarpj import MiscTools +import fijiCommon as fc + +from register_virtual_stack import Transform_Virtual_Stack_MT + +from ini.trakem2.io import CoordinateTransformXML + + +namePlugin = 'assembly_LMProjects' +MagCFolder = fc.startPlugin(namePlugin) +# MagCFolder = r'E:\Users\Thomas\MixturesClean\MinimalPipelineTest5Sections_OneMoreTest04_07_17' +ControlWindow.setGUIEnabled(False) + +EMFolder = os.path.join(MagCFolder, 'MagC_EM','') +LMFolder = os.path.join(MagCFolder, 'MagC_LM', '') + +width, height, nChannels, xGrid, yGrid, scaleX, scaleY, channels = fc.readSessionMetadata(MagCFolder) +# channels = ['Brightfield', 'GFP', 'DsRed', 'contrastedBrightfield'] + +projectPath = fc.findFilesFromTags(MagCFolder,['EM', 'Project'])[0] # should I make 2 projects ? One for rigid, one for warped ? + +exportedEMFolder = fc.findFoldersFromTags(MagCFolder, ['export_alignedEMForRegistration'])[0] +nLayers = len(os.listdir(exportedEMFolder)) + +registrationFolder = os.path.join(os.path.dirname(projectPath), 'LMEMRegistration') + + +for idChannel, channel in enumerate(channels): + affineCroppedFolder = os.path.join(LMFolder, 'affineCropped_' + channel) + + # the dimensions of the first affineCropped determine the size of the layerset of the trakem project (and for the export) + firstImagePath = os.path.join(affineCroppedFolder, os.walk(affineCroppedFolder).next()[2][0]) + im0 = IJ.openImage(firstImagePath) + width0 = im0.getWidth() + height0 = im0.getHeight() + im0.close() + + roiExport = Rectangle(0, 0, width0, height0) + + projectPath = os.path.join(EMFolder, 'LMProject_' + channel + '.xml') + p, loader, layerset, nLayers = fc.getProjectUtils(fc.initTrakem(LMFolder, nLayers)) + p.saveAs(projectPath, True) + layerset.setDimensions(0, 0, width0, height0) + + + for l, layer in enumerate(layerset.getLayers()): + layerFolder = os.path.join(registrationFolder, 'layer_' + str(l).zfill(4)) + registeredFolder = os.path.join(layerFolder, 'registered') + MLSTPath = os.path.join(registeredFolder, 'MLST.xml') + if os.path.isfile(MLSTPath): + MLSTransform = CoordinateTransformXML().parse(MLSTPath) + affineCroppedImPath = os.path.join(affineCroppedFolder, 'affineCropped_' + channel + '_' + str(l).zfill(4) + '.tif') + patch = Patch.createPatch(p, affineCroppedImPath) + layer.add(patch) + patch.setCoordinateTransform(MLSTransform) # does the order matter ? apparently yes, but I have to be sure that it is not an offset problem + IJ.log('Setting the mlsTransform in layer ' + str(l) + ' ' + str(MLSTransform)) + patch.updateBucket() + + if idChannel < len(channels)-2: # if it is a fluochannel + MLSTransformedFolder = fc.mkdir_p(os.path.join(LMFolder, 'MLS_Transformed_' + str(channel), '')) + imp = loader.getFlatImage(layer, roiExport, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer.getAll(Patch), True, Color.black, None) + impPath = os.path.join(MLSTransformedFolder, 'MLSTransformed_' + channel + '_' + str(l).zfill(4) + '.tif') + IJ.save(imp, impPath) + + + IJ.log('Project ' + channel + ' assembled') + + # # Warning ! Should I resize or not ? does this not create an offset ? + # fc.resizeDisplay(layerset) + + p.save() + fc.closeProject(p) + +IJ.log('Done') +fc.terminatePlugin(namePlugin, MagCFolder) diff --git a/assembly_lowEM.py b/assembly_lowEM.py new file mode 100644 index 0000000..6b7a8b8 --- /dev/null +++ b/assembly_lowEM.py @@ -0,0 +1,119 @@ +#this script puts the acquired EM tiles into Trakem at the right positions +from __future__ import with_statement +import os, re, errno, string, shutil, time + +from java.awt.event import MouseAdapter, KeyEvent, KeyAdapter +from jarray import zeros, array +from java.util import HashSet, ArrayList +from java.awt.geom import AffineTransform +from java.awt import Color + +from ij import IJ, Macro +from ij.io import Opener, FileSaver +from fiji.tool import AbstractTool +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from ini.trakem2.utils import Utils +from ini.trakem2.display import Display, Patch +from mpicbg.trakem2.align import Align, AlignTask + +import fijiCommon as fc + +from java.lang import Thread +from java.util.concurrent.atomic import AtomicInteger +from java.lang import Runtime + +def parallelStitch(atom, foldersToStitch, allPatchCoordinates): + while atom.get() < len(foldersToStitch): + k = atom.getAndIncrement() + if (k < len(foldersToStitch)): + sectionFolder = foldersToStitch[k] + + tileConfigurationPath = os.path.join(sectionFolder, 'TileConfiguration_' + str(k).zfill(4) + '.registered.txt') + + stitchCommand = 'type=[Filename defined position] order=[Defined by filename ] grid_size_x=' + str(numTilesX) + ' grid_size_y=' + str(numTilesY) + ' tile_overlap=' + str(100 * (tileOverlapX + tileOverlapY)/2.) + ' first_file_index_x=0 first_file_index_y=0 directory=' + sectionFolder + ' file_names=Tile_{x}-{y}_resized_' + factorString + '.tif output_textfile_name=TileConfiguration_' + str(k).zfill(4) +'.txt fusion_method=[Do not fuse images (only write TileConfiguration)] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap subpixel_accuracy computation_parameters=[Save computation time (but use more RAM)] image_output=[Write to disk] output_directory=' + sectionFolder + IJ.log(stitchCommand) + IJ.run('Grid/Collection stitching', stitchCommand) + + f = open(tileConfigurationPath, 'r') + lines = f.readlines()[4:] # trimm the heading + f.close() + + for line in lines: + # paths + path = os.path.join(sectionFolder, line.replace('\n', '').split(';')[0]) + #locations + x = float(line.replace('\n', '').split(';')[2].split(',')[0].split('(')[1]) + y = float(line.replace('\n', '').split(';')[2].split(',')[1].split(')')[0]) + + allPatchCoordinates.append([path, [x,y], k]) + +namePlugin = 'assembly_lowEM' +MagCFolder = fc.startPlugin(namePlugin) + +ControlWindow.setGUIEnabled(False) +MagC_EM_Folder = fc.mkdir_p(os.path.join(MagCFolder, 'MagC_EM','')) +MagCParameters = fc.readMagCParameters(MagCFolder) + +downsamplingFactor = MagCParameters['downsample_EM']['downsamplingFactor'] +factorString = str(int(1000000*downsamplingFactor)).zfill(8) + +# read some metadata +EMMetadataPath = fc.findFilesFromTags(MagCFolder,['EM', 'Metadata'])[0] +EMMetadata = fc.readParameters(EMMetadataPath) +numTilesX = EMMetadata['numTilesX'] +numTilesY = EMMetadata['numTilesY'] +xPatchEffectiveSize = EMMetadata['xPatchEffectiveSize'] +yPatchEffectiveSize = EMMetadata['yPatchEffectiveSize'] +tileOverlapX = EMMetadata['tileOverlapX'] +tileOverlapY = EMMetadata['tileOverlapY'] +nbLayers = EMMetadata['nSections'] +IJ.log('There are ' + str(nbLayers) + ' EM layers') + +########################################## +# Stitching of the low-res EM project +########################################## +allPatchCoordinates = [] + +downsampledFolder = os.path.join(MagC_EM_Folder, 'MagC_EM_' + factorString) +IJ.log('downsampledFolder ' + downsampledFolder) + +foldersToStitch = [os.path.join(downsampledFolder, folderName) for folderName in os.walk(downsampledFolder).next()[1]] + +# stitching should be done in parallel but the stitching plugin does not seem to run in parallel, so fractionCores=0 -> only one core used ... +atom = AtomicInteger(0) +fc.startThreads(parallelStitch, fractionCores = 0, wait = 0, arguments = (atom, foldersToStitch, allPatchCoordinates)) + +paths = [coordinates[0] for coordinates in allPatchCoordinates] +locations = [coordinates[1] for coordinates in allPatchCoordinates] +layers = [coordinates[2] for coordinates in allPatchCoordinates] + +# create the low-res trakem project with the computed stitching coordinates +projectName = 'EMProject_' + factorString + '.xml' +projectPath = fc.cleanLinuxPath(os.path.join(MagC_EM_Folder , projectName)) +IJ.log('Creating the Trakem project ' + projectName) + +project, loader, layerset, nbLayers = fc.getProjectUtils(fc.initTrakem(fc.cleanLinuxPath(MagC_EM_Folder), nbLayers)) +project.saveAs(projectPath, True) +time.sleep(1) # probably useless +loader.setMipMapsRegeneration(False) + +importFilePath = fc.createImportFile(MagC_EM_Folder, paths, locations, layers = layers, name = namePlugin + factorString) + +IJ.log('I am going to insert many files at factor ' + str(downsamplingFactor)) +task = loader.importImages(layerset.getLayers().get(0), importFilePath, '\t', 1, 1, False, 1, 0) +task.join() + +time.sleep(2) +fc.resizeDisplay(layerset) +time.sleep(2) +project.save() +time.sleep(2) + +# save all transforms into one file +transformsPath = os.path.join(MagC_EM_Folder , namePlugin + '_Transforms.txt') +fc.writeAllAffineTransforms(project, transformsPath) + +fc.closeProject(project) +IJ.log('Assembling the low EM project done and saved into ' + projectPath) +fc.terminatePlugin(namePlugin, MagCFolder) \ No newline at end of file diff --git a/compute_RegistrationMovingLeastSquares.py b/compute_RegistrationMovingLeastSquares.py new file mode 100644 index 0000000..1f47ee6 --- /dev/null +++ b/compute_RegistrationMovingLeastSquares.py @@ -0,0 +1,282 @@ +from __future__ import with_statement +from ij import IJ +from ij import ImagePlus +from ij import WindowManager +from ij.process import ByteProcessor +import os, time, pickle, threading +import fijiCommon as fc +from java.awt import Rectangle, Color +from java.awt.geom import AffineTransform +from java.util import HashSet, ArrayList +from java.lang import Runtime +from java.util.concurrent.atomic import AtomicInteger +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from ini.trakem2.imaging import Blending +from mpicbg.trakem2.align import RegularizedAffineLayerAlignment +from bunwarpj.bUnwarpJ_ import computeTransformationBatch, elasticTransformImageMacro +from bunwarpj import MiscTools + +from mpicbg.ij import SIFT +from mpicbg.imagefeatures import FloatArray2DSIFT + +from register_virtual_stack import Transform_Virtual_Stack_MT +from register_virtual_stack import Register_Virtual_Stack_MT + +from mpicbg.models import PointMatch +from mpicbg.ij import FeatureTransform +from mpicbg.models import RigidModel2D, AffineModel2D +from mpicbg.models import NotEnoughDataPointsException + +from mpicbg.trakem2.transform import MovingLeastSquaresTransform +from mpicbg.trakem2.transform import CoordinateTransform +from mpicbg.trakem2.transform import CoordinateTransformList +from mpicbg.trakem2.transform import TranslationModel2D +from mpicbg.trakem2.transform import TransformMesh + +import java +from Jama import Matrix +from Jama import SingularValueDecomposition +import jarray + + +def pythonToJamaMatrix(m): + a = Matrix(jarray.array([[0]*len(m) for id in range(len(m))], java.lang.Class.forName("[D"))) + for x, col in enumerate(m): + for y, val in enumerate(col): + a.set(x, y, m[x][y]) + return a + +def getSIFTMatchingParameters(steps, initialSigma, minOctaveSize, maxOctaveSize, fdBins, fdSize): + p = FloatArray2DSIFT.Param().clone() + p.steps = steps + p.initialSigma = initialSigma + p.minOctaveSize = minOctaveSize + p.maxOctaveSize = maxOctaveSize + p.fdBins = fdBins + p.fdSize = fdSize + return p + +def getFeatures(imPath, p): + features = HashSet() + im = IJ.openImage(imPath) + SIFT(FloatArray2DSIFT(p)).extractFeatures(im.getProcessor(), features) + IJ.log(str(features.size()) + ' features extracted' ) + im.close() + return features + +def getMatchingResults(features1, features2): + candidates = ArrayList() + inliers = ArrayList() + FeatureTransform.matchFeatures(features1, features2, candidates, 0.92) + # FeatureTransform.matchFeatures(features1, features2, candidates, 0.95) + model = AffineModel2D() + try: + modelFound = model.filterRansac(candidates, inliers, 1000, 10, 0, 7) # (candidates, inliers, iterations, maxDisplacement, ratioOfConservedFeatures, minNumberOfConservedFeatures) + except NotEnoughDataPointsException, e: + modelFound = False + IJ.log('NotEnoughDataPointsException') + return None + if not modelFound: + IJ.log('model not found ') + return None + else: + IJ.log('model found') + return [model, inliers] + +def getScalingFactors(aff): + m = pythonToJamaMatrix([[aff.getScaleX(), aff.getShearX()], [aff.getShearY(), aff.getScaleY()]]) + SVD = SingularValueDecomposition(m) + S = SVD.getS().getArrayCopy() + return S[0][0], S[1][1] + +def computeRegistration(): + while atomicI.get() < nSections: + k = atomicI.getAndIncrement() + if k < nSections: + l = k + IJ.log('Computing EM/LM registration for layer ' + str(l).zfill(4)) + + layerFolder = fc.mkdir_p(os.path.join(registrationFolder, 'layer_' + str(l).zfill(4))) + toRegisterFolder = fc.mkdir_p(os.path.join(layerFolder, 'toRegister')) + registeredFolder = fc.mkdir_p(os.path.join(layerFolder, 'registered')) + + # Applying appropriate filters to make lowresEM and LM look similar for layer l + imLM = IJ.openImage(imPaths['LM'][l]) + imLM = fc.localContrast(imLM) + imLMPath = os.path.join(toRegisterFolder, 'imLM_' + str(l).zfill(4) + '.tif') + IJ.save(imLM, imLMPath) + + imEM = IJ.openImage(imPaths['EM'][l]) + imEM = fc.localContrast(imEM) + imEMPath = os.path.join(toRegisterFolder, 'imEM_' + str(l).zfill(4) + '.tif') + IJ.save(imEM, imEMPath) + + # Compute first a simple affine registration on the non-cropped images + IJ.log('Computing affine and moving least squares alignment for layer ' + str(l).zfill(4)) + firstStepRegistered = False + + # registration at first step with 1step/octave (less features) + pLowRes = getSIFTMatchingParameters(nOctaves[0], 1.6, 16, 4000, 8, 4) + + featuresLM = getFeatures(imLMPath, pLowRes) + featuresEM = getFeatures(imEMPath, pLowRes) + + matchingResults = getMatchingResults(featuresLM, featuresEM) + if matchingResults is None: + IJ.log('No registration matching at low resolution matching step 1 in layer ' + str(l).zfill(4)) + else: + model, inliers = matchingResults + distance = PointMatch.meanDistance(inliers) # mean displacement of the remaining matching features + IJ.log('---Layer ' + str(l).zfill(4) + ' distance ' + str(distance) + ' px with ' + str(len(inliers)) + ' inliers') + if distance > matchingThreshold[0]: + IJ.log('Matching accuracy is lower than the threshold at the low resolution step 1 - ' + str(l).zfill(4) + ' - distance - ' + str(distance)) + else: + affTransform = model.createAffine() + s1, s2 = getScalingFactors(affTransform) + IJ.log('Layer ' + str(l).zfill(4) + ' scaling factors - step 1 - ' + str(s1) + ' - ' + str(s2) + '--' + str(s1*s2) + ' affDeterminant ' + str(affTransform.getDeterminant()) + ' nInliers ' + str(len(inliers))) + if (abs(s1-1) < 0.2) and (abs(s2-1) < 0.2): # scaling in both directions should be close to 1 + IJ.log('First step ok - layer ' + str(l).zfill(4)) + firstStepRegistered = True + loaderZ.serialize(affTransform, os.path.join(registeredFolder, 'affineSerialized')) + + if not firstStepRegistered: + IJ.log('First step registration in layer ' + str(l).zfill(4) + ' with few features has failed. Trying with more features.') + # registration at first step with 3steps/octave (more features) + # pLowRes = getSIFTMatchingParameters(3, 1.6, 64, 4000, 8, 4) + pLowRes = getSIFTMatchingParameters(nOctaves[0], 1.6, 16, 4000, 8, 4) # for BIB + + + featuresLM = getFeatures(imLMPath, pLowRes) + featuresEM = getFeatures(imEMPath, pLowRes) + + matchingResults = getMatchingResults(featuresLM, featuresEM) + if matchingResults is None: + IJ.log('No registration matching at low resolution matching step 1bis in layer ' + str(l).zfill(4)) + else: + model, inliers = matchingResults + distance = PointMatch.meanDistance(inliers) # mean displacement of the remaining matching features + IJ.log('---Layer ' + str(l).zfill(4) + ' distance ' + str(distance) + ' px with ' + str(len(inliers)) + ' inliers') + if distance > matchingThreshold[0]: + IJ.log('Matching accuracy is lower than the threshold at the high resolution step 1bis - ' + str(l).zfill(4) + ' - distance - ' + str(distance)) + else: + affTransform = model.createAffine() + s1, s2 = getScalingFactors(affTransform) + IJ.log('Layer ' + str(l).zfill(4) + ' scaling factors - step 1bis - ' + str(s1) + ' - ' + str(s2) + '--' + str(s1*s2) + ' affDeterminant ' + str(affTransform.getDeterminant()) + ' nInliers ' + str(len(inliers))) + if (abs(s1-1) < 0.2) and (abs(s2-1) < 0.2): # scaling in both directions should be close to 1 + IJ.log('First step 1bis ok - layer ' + str(l).zfill(4)) + firstStepRegistered = True + loaderZ.serialize(affTransform, os.path.join(registeredFolder, 'affineSerialized')) + + if not firstStepRegistered: + IJ.log('The two first step trials in layer ' + str(l).zfill(4) + ' have failed') + else: + # Affine transform and crop the LM, and compute a high res MLS matching + with lock: # only one trakem working at a time + # apply affTransform + patch = Patch.createPatch(pZ, imLMPath) + layerZ.add(patch) + patch.setAffineTransform(affTransform) + patch.updateBucket() + + # crop and export + bb = Rectangle(0, 0, widthEM, heightEM) + affineCroppedIm = loaderZ.getFlatImage(layerZ, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layerZ.getAll(Patch), True, Color.black, None) + affineCroppedImPath = os.path.join(toRegisterFolder, 'affineCroppedLM_' + str(l).zfill(4) + '.tif') + IJ.save(affineCroppedIm, affineCroppedImPath) + affineCroppedIm.close() + + layerZ.remove(patch) + layerZ.recreateBuckets() + + pHighRes = getSIFTMatchingParameters(nOctaves[1], 1.6, 64, 4096, 8, 4) + featuresLM = getFeatures(affineCroppedImPath, pHighRes) + featuresEM = getFeatures(imEMPath, pHighRes) + + # get the MLS + matchingResults = getMatchingResults(featuresLM, featuresEM) + if matchingResults is None: + IJ.log('It cannot be, there should be a good match given that an affine was computed. Layer ' + str(l).zfill(4)) + else: + model, inliers = matchingResults + affTransform = model.createAffine() + s1, s2 = getScalingFactors(affTransform) + IJ.log('Second step determinant - layer ' + str(l).zfill(4) + ' - determinant - ' + str(affTransform.getDeterminant()) + ' nInliers ' + str(len(inliers)) + 'Scaling factors - step 2 - ' + str(s1) + ' - ' + str(s2)) + if (abs(s1-1) < 0.2) and (abs(s2-1) < 0.2) and len(inliers) > 50: # scaling in both directions should be close to 1 + distance = PointMatch.meanDistance(inliers) # mean displacement of the remaining matching features + if distance > matchingThreshold[1]: + IJ.log('Weird: matching accuracy is lower than the threshold at the high resolution step 2 - ' + str(l).zfill(4) + ' - distance - ' + str(distance)) + else: + mlst = MovingLeastSquaresTransform() + mlst.setModel(AffineModel2D) + mlst.setAlpha(1) + mlst.setMatches(inliers) + + xmlMlst = mlst.toXML('\t') + with open(os.path.join(registeredFolder, 'MLST.xml'), 'w') as f: + f.write(xmlMlst) + + loaderZ.serialize(mlst, os.path.join(registeredFolder, 'mlstSerialized')) + + registrationStats.append([l, distance, len(inliers)]) + +namePlugin = 'compute_RegistrationMovingLeastSquares' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +MagCParams = fc.readMagCParameters(MagCFolder) +matchingThreshold = MagCParams[namePlugin]['matchingThreshold'] # rejection threshold for the mean displacement of the transforms for the low and high resolution steps, respectively +nOctaves = MagCParams[namePlugin]['nOctaves'] + +width, height, nChannels, xGrid, yGrid, scaleX, scaleY, channels = fc.readSessionMetadata(MagCFolder) + +EMMetadataPath = fc.findFilesFromTags(MagCFolder,['EM', 'Metadata'])[0] + +nSections = fc.readParameters(EMMetadataPath)['nSections'] +# nSections = 20 + +projectPath = fc.findFilesFromTags(MagCFolder,['EM', 'Project'])[0] +exportedEMFolder = fc.findFoldersFromTags(MagCFolder, ['export_alignedEMForRegistration'])[0] +exportedLMFolder = fc.findFoldersFromTags(MagCFolder,['exported_downscaled_1_' + channels[-1] ])[0] # finds the brightfield contrasted channel +temporaryFolder = fc.mkdir_p(os.path.join(os.path.dirname(projectPath), 'temporary_LMEMRegistration')) # to save contrasted images +registrationFolder = fc.mkdir_p(os.path.join(os.path.dirname(projectPath), 'LMEMRegistration')) # to save contrasted images + + +imPaths = {} +imPaths['EM'] = [os.path.join(exportedEMFolder, imageName) for imageName in fc.naturalSort(os.listdir(exportedEMFolder)) if os.path.splitext(imageName)[1] == '.tif'] +imPaths['LM'] = [os.path.join(exportedLMFolder, imageName) for imageName in fc.naturalSort(os.listdir(exportedLMFolder)) if os.path.splitext(imageName)[1] == '.tif'] + +# surfaceIds = [0,16,32,48,65,81,97,113,129,145,162,179,195,211,227,243,260,276,293,310] # optimal 16-17 +# imPaths['EM'] = [imPaths['EM'][i] for i in surfaceIds] + +# get the dimensions of the EM layerset by looking at the dimensions of the first EM image, save for next script +imEM0 = IJ.openImage(imPaths['EM'][0]) +widthEM = imEM0.width +heightEM = imEM0.height +imEM0.close() +f = open(os.path.join(registrationFolder, 'lowResEMBounds'), 'w') +pickle.dump([widthEM, heightEM], f) +f.close() + +registrationStatsPath = os.path.join(registrationFolder, 'registrationStats') +registrationStats = [] + +# create dummy trkem for applying affine and cropping LM in the first registration step +pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(temporaryFolder, 1)) +layersetZ.setDimensions(0, 0, widthEM * 5, heightEM * 5) +layerZ = layersetZ.getLayers().get(0) + +lock = threading.Lock() + +# Setting up the parallel threads and starting them +atomicI = AtomicInteger(0) +fc.startThreads(computeRegistration, fractionCores = 1, wait = 0.5) + +fc.closeProject(pZ) # close dummy trakem + +# save some stats on the registration +with open(registrationStatsPath, 'w') as f: + pickle.dump(registrationStats, f) + +fc.terminatePlugin(namePlugin, MagCFolder) +# fc.shouldRunAgain(namePlugin, currentLayer, nSections, MagCFolder, '', increment = nLayersAtATime) \ No newline at end of file diff --git a/downsample_EM.py b/downsample_EM.py new file mode 100644 index 0000000..4f052eb --- /dev/null +++ b/downsample_EM.py @@ -0,0 +1,102 @@ +from __future__ import with_statement +import fijiCommon as fc +import os, time, shutil, pickle +import ij +from ij import IJ +from ij.io import Opener, FileSaver +from java.lang import Thread, Runtime +from java.util.concurrent.atomic import AtomicInteger +from ini.trakem2 import ControlWindow +from ij.gui import Roi + +def resizeAndSave(filePaths, l): + while l.get() < min(len(filePaths), currentWrittenLayer + nTilesAtATime + 1) : + k = l.getAndIncrement() + if k < min(len(filePaths), currentWrittenLayer + nTilesAtATime): + + filePath = filePaths[k] + + imageName = os.path.basename(filePath) + resizedImageName = os.path.splitext(imageName)[0] + '_resized_' + factorString + os.path.splitext(imageName)[1] + + imageFolderName = os.path.basename(os.path.dirname(filePath)) + + resizedFilePath = fc.cleanLinuxPath(os.path.join(downSampledEMFolder, imageFolderName, resizedImageName)) + + im = Opener().openImage(filePath) + IJ.log('Am I going to process the image: im.height = ' + str(im.height) + ' - tileHeight = ' + str(tileHeight) + ' tile number ' + str(k)) + if im.height == tileHeight: # crop a few lines at the top only if it has not already been done (sometimes the pipeline gets rerun) + im = fc.crop(im,cropRoi) + im = fc.normLocalContrast(im, normLocalContrastSize, normLocalContrastSize, 3, True, True) + # IJ.run(im, 'Replace value', 'pattern=0 replacement=1') # only for final waferOverview + FileSaver(im).saveAsTiff(filePath) + + if not os.path.isfile(resizedFilePath): + im = fc.resize(im, scaleFactor) + FileSaver(im).saveAsTiff(resizedFilePath) + IJ.log('Image resized to ' + resizedFilePath) + im.close() + +namePlugin = 'downsample_EM' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) +MagCParameters = fc.readMagCParameters(MagCFolder) + +EMDataFolder = os.path.join(MagCFolder, 'EMData') +MagCEMFolder = os.path.join(MagCFolder, 'MagC_EM') + +# read metadata +EMMetadataPath = fc.findFilesFromTags(MagCFolder,['EM_Metadata'])[0] +EMMetadata = fc.readParameters(EMMetadataPath) +tileWidth = int(EMMetadata['tileWidth']) +tileHeight = int(EMMetadata['tileHeight']) +IJ.log('TileWidth ' + str(tileWidth)) +IJ.log('TileHeight ' + str(tileHeight)) +cropRoi = Roi(100, 20, tileWidth - 2*100, tileHeight-20) # remove first lines because the Zeiss API + +# read downsampling factor +downsamplingFactor = MagCParameters[namePlugin]['downsamplingFactor'] +scaleFactor = 1./downsamplingFactor +factorString = str(int(1000000*downsamplingFactor)).zfill(8) +filePathsPath = os.path.join(MagCEMFolder, 'imagePathsForDownsampling' + factorString + '.txt') + +nTilesAtATime = MagCParameters[namePlugin]['nTilesAtATime'] + +# create or read the file with the paths to process +if not os.path.isfile(filePathsPath): + filePaths = [] + for (dirpath, dirnames, filenames) in os.walk(EMDataFolder): + for filename in filenames: + if filename.endswith('.tif'): + imPath = fc.cleanLinuxPath(os.path.join(dirpath, filename)) + filePaths.append(imPath) + with open(filePathsPath,'w') as f: + for path in filePaths: + f.write(path + '\n') + # pickle.dump(filePaths,f) +else: + filePaths = [] + with open(filePathsPath,'r') as f: + lines = f.readlines() + for line in lines: + filePaths.append(line.replace('\n', '')) + # filePaths = pickle.load(f) + + +#Create all the subfolders +downSampledEMFolder = fc.mkdir_p(os.path.join(MagCEMFolder, 'MagC_EM_' + factorString, '')) +for sectionFolderName in os.walk(EMDataFolder).next()[1]: + fc.mkdir_p(os.path.join(downSampledEMFolder, sectionFolderName)) + +normLocalContrastSize = MagCParameters[namePlugin]['normLocalContrastSize'] +# downsample in parallel +threads = [] +currentLayerPath = os.path.join(MagCEMFolder, 'currentLayer_' + namePlugin + '.txt') +currentWrittenLayer = fc.incrementCounter(currentLayerPath, increment = nTilesAtATime) +IJ.log(namePlugin + ' layer ' + str(currentWrittenLayer)) +atomicI = AtomicInteger(currentWrittenLayer) +fc.startThreads(resizeAndSave, fractionCores = 0.9, wait = 0, arguments = (filePaths, atomicI)) + +# terminate or rerun if more tiles to be processed +time.sleep(1) +fc.shouldRunAgain(namePlugin, atomicI.get(), len(filePaths), MagCFolder, '') \ No newline at end of file diff --git a/export_LMChannels.py b/export_LMChannels.py new file mode 100644 index 0000000..c49b25a --- /dev/null +++ b/export_LMChannels.py @@ -0,0 +1,73 @@ +from __future__ import with_statement +from ij import IJ +from ij import Macro +import os +import time +import fijiCommon as fc +from mpicbg.trakem2.align import RegularizedAffineLayerAlignment +from java.awt.geom import AffineTransform +from java.awt import Rectangle +from java.util import HashSet +from ini.trakem2.display import Patch +from ini.trakem2.imaging import Blending +from ini.trakem2 import Project, ControlWindow + +namePlugin = 'export_LMChannels' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +transformsPath = fc.findFilesFromTags(MagCFolder, ['LM', 'transforms'])[0] +with (open(transformsPath, 'r')) as f: + transforms = f.readlines() +nLayers = max([int(transforms[i]) for i in range(1, len(transforms),8) ]) + 1 +IJ.log('nLayers = ' + str(nLayers)) + +width, height, nChannels, xGrid, yGrid, scaleX, scaleY, channels = fc.readSessionMetadata(MagCFolder) + +MagCParameters = fc.readMagCParameters(MagCFolder) +scaleFactors = MagCParameters[namePlugin]['scaleFactors'] # scale factor for export, typically 1 and 0.1 + +IJ.log('Iterating over the LM channels') +for channel in channels: + IJ.log('Processing channel ' + str(channel)) + IJ.log('Creating a TrakEM project') + + # create trakem project for the channel + trakemFolder = os.path.join(os.path.dirname(transformsPath), '') + project = fc.initTrakem(trakemFolder,nLayers) + loader = project.getLoader() + loader.setMipMapsRegeneration(False) # disable mipmaps + layerset = project.getRootLayerSet() + + # insert the tiles according to the transforms computed on the reference brightfield channel + IJ.log('Inserting all patches') + for i in range(0, len(transforms), 8): + alignedPatchPath = transforms[i] + l = int(transforms[i+1]) + alignedPatchName = os.path.basename(alignedPatchPath) + + toAlignPatchPath = fc.cleanLinuxPath(os.path.join(os.path.dirname(alignedPatchPath), alignedPatchName.replace(channels[-1], channel))) + toAlignPatchPath = toAlignPatchPath[:-1] # remove a mysterious trailing character ... + IJ.log('In channel ' + str(channel) + ', inserting this image: ' + str(toAlignPatchPath)) + aff = AffineTransform([float(transforms[a]) for a in range(i+2, i+8)]) + patch = Patch.createPatch(project, toAlignPatchPath) + layer = layerset.getLayers().get(l) + layer.add(patch) + patch.setAffineTransform(aff) + patch.updateBucket() + + time.sleep(1) + IJ.log('Readjusting display') + fc.resizeDisplay(layerset) + IJ.log('Blending all layers') + Blending.blendLayerWise(layerset.getLayers(), True, None) + + IJ.log('Exporting') + for scaleFactor in scaleFactors: + theBaseName = 'exported_downscaled_' + str(int(1/float(scaleFactor))) + '_' + channel + outputFolder = fc.mkdir_p( os.path.join(os.path.dirname(transformsPath), theBaseName)) + fc.exportFlat(project,outputFolder,scaleFactor, baseName = theBaseName, bitDepth = 8) + + fc.closeProject(project) + +fc.terminatePlugin(namePlugin, MagCFolder) \ No newline at end of file diff --git a/export_TransformedCroppedLM.py b/export_TransformedCroppedLM.py new file mode 100644 index 0000000..fe25279 --- /dev/null +++ b/export_TransformedCroppedLM.py @@ -0,0 +1,127 @@ +from __future__ import with_statement +# MLSTWithoutAffine + # wrong, unscaled, contractViolated bug +# StandardMLST + # now completely fails on the first 20 slices ... a bit surprising ... +# MLSTAffine + # ok +# AffineFromMLSTXMLRead + # is simply identity + +# how to exclude non-matching registration ? + # size of the transformed LM + # would sometimes fail + # compute correlation of the cropped registered pair + # does not work + # I also do not know trivially where to crop + # rerun a SIFT in parallel and get the displacement + # check the amount of shear in the computed transform + + +from mpicbg.imglib.algorithm.correlation import CrossCorrelation +from mpicbg.imglib.image import ImagePlusAdapter + +from ij import IJ, Macro, ImagePlus +from ij.process import ByteProcessor +import os, shutil, time, pickle +from java.awt import Rectangle, Color +from java.util import HashSet, ArrayList + +from java.awt.geom import AffineTransform +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +import fijiCommon as fc + +from register_virtual_stack import Transform_Virtual_Stack_MT + +from ini.trakem2.io import CoordinateTransformXML + +from distutils.dir_util import copy_tree + +namePlugin = 'export_TransformedCroppedLM' +MagCFolder = fc.startPlugin(namePlugin) +# MagCFolder = r'E:\Users\Thomas\MixturesClean\MinimalPipelineTest5Sections_OneMoreTest04_07_17' +ControlWindow.setGUIEnabled(False) + +width, height, nChannels, xGrid, yGrid, scaleX, scaleY, channels = fc.readSessionMetadata(MagCFolder) +# channels = ['Brightfield', 'GFP', 'DsRed', 'contrastedBrightfield'] + +LMFolder = os.path.join(MagCFolder, 'MagC_LM') + +projectPath = fc.findFilesFromTags(MagCFolder,['EM', 'Project'])[0] # should I make 2 projects ? One for rigid, one for warped ? + +exportedEMFolder = fc.findFoldersFromTags(MagCFolder, ['export_alignedEMForRegistration'])[0] +nLayers = len(os.listdir(exportedEMFolder)) + +registrationFolder = os.path.join(os.path.dirname(projectPath), 'LMEMRegistration') + +f = open(os.path.join(registrationFolder, 'lowResEMBounds'), 'r') +widthEM, heightEM = pickle.load(f) +f.close() + +pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils(fc.initTrakem(registrationFolder, 1)) +layersetZ.setDimensions(0, 0, widthEM * 5, heightEM * 5) +layerZ = layersetZ.getLayers().get(0) + +# create the folders +for channel in channels: + affineCroppedFolder = fc.mkdir_p(os.path.join(LMFolder, 'affineCropped_' + channel)) + +for l in range(nLayers): + layerFolder = os.path.join(registrationFolder, 'layer_' + str(l).zfill(4)) + registeredFolder = os.path.join(layerFolder, 'registered') + affTransformPath = os.path.join(registeredFolder, 'affineSerialized') + if os.path.isfile(affTransformPath): + affTransform = loaderZ.deserialize(affTransformPath) + + for channel in channels: + affineCroppedFolder = os.path.join(LMFolder, 'affineCropped_' + channel) + + LMMosaicsPath = fc.cleanLinuxPath(os.path.join(LMFolder, 'exported_downscaled_1_' + channel, 'exported_downscaled_1_' + channel + '_' + str(l).zfill(4) + '.tif')) + + patch = Patch.createPatch(pZ, LMMosaicsPath) + layerZ.add(patch) + IJ.log('Setting the affineTransform ' + str(affTransform)) + patch.setAffineTransform(affTransform) + patch.updateBucket() + + bb = Rectangle(0, 0, widthEM, heightEM) + affineCroppedIm = loaderZ.getFlatImage(layerZ, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layerZ.getAll(Patch), True, Color.black, None) + affineCroppedImPath = os.path.join(affineCroppedFolder, 'affineCropped_' + channel + '_' + str(l).zfill(4) + '.tif') + IJ.save(affineCroppedIm, affineCroppedImPath) + affineCroppedIm.close() + + layerZ.remove(patch) + layerZ.recreateBuckets() + IJ.log('Has been written: ' + str(affineCroppedImPath)) +fc.closeProject(pZ) # close dummy trakem + +# # # # # # create the median folders +# # # # # for channel in channels[:-2]: # the fluorescent channels, excluding the brightfield and contrastedBrightfield channels + # # # # # affineCroppedFolder = os.path.join(LMFolder, 'affineCropped_' + channel) + # # # # # finalLMFolder = fc.mkdir_p(os.path.join(LMFolder, 'finalLM_' + channel)) + # # # # # imPaths = [os.path.join(affineCroppedFolder, imName) for imName in fc.naturalSort(os.listdir(affineCroppedFolder))] + # # # # # imStack = fc.stackFromPaths(imPaths) + # # # # # IJ.run(imStack, 'Median 3D...', 'x=2 y=2 z=2') + # # # # # stack = imStack.getImageStack() + + # # # # # for imId, imPath in enumerate(imPaths): + # # # # # layerId = int(os.path.splitext((os.path.basename(imPath)))[0].split('_')[-1]) + + # # # # # tileIndex = imStack.getStackIndex(0, 0, imId + 1) # to access the slice in the stack + # # # # # finalIm = ImagePlus('finalLM_' + channel + '_' + str(layerId).zfill(4), stack.getProcessor(tileIndex).convertToByteProcessor()) + # # # # # finalImPath = os.path.join(finalLMFolder, 'finalLM_' + channel + '_' + str(layerId).zfill(4) + '.tif') + # # # # # IJ.save(finalIm, finalImPath) + +# # # # # # copy the brightfield and contrasted brightfield channels +# # # # # for channel in channels[-2:]: + # # # # # affineCroppedFolder = os.path.join(LMFolder, 'affineCropped_' + channel) + # # # # # finalLMFolder = fc.mkdir_p(os.path.join(LMFolder, 'finalLM_' + channel)) + # # # # # copy_tree(affineCroppedFolder, finalLMFolder) + # # # # # for imName in os.listdir(finalLMFolder): + # # # # # imPath = os.path.join(finalLMFolder, imName) + # # # # # newImName = imName.replace('affineCropped_', 'finalLM_') + # # # # # newImPath = os.path.join(finalLMFolder, newImName) + # # # # # os.rename(imPath, newImPath) + +fc.terminatePlugin(namePlugin, MagCFolder) diff --git a/export_alignedEMForRegistration.py b/export_alignedEMForRegistration.py new file mode 100644 index 0000000..62b76ee --- /dev/null +++ b/export_alignedEMForRegistration.py @@ -0,0 +1,51 @@ +from __future__ import with_statement +from ij import IJ +from ij import Macro +import os +import time +import fijiCommon as fc +from mpicbg.trakem2.align import RegularizedAffineLayerAlignment +from java.awt.geom import AffineTransform +from java.awt import Rectangle +from java.util import HashSet +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from ini.trakem2.imaging import Blending + +from java.lang import Runtime, Thread +from java.util.concurrent.atomic import AtomicInteger + +def exportLayer(): + while atom.get() < min(nLayers, currentWrittenLayer + nLayersAtATime + 1) : + k = atom.getAndIncrement() + if k < min(nLayers, currentWrittenLayer + nLayersAtATime): + IJ.log('Start exporting layer ' + str(k) + ' currentWrittenLayer - ' + str(currentWrittenLayer)) + fc.exportFlat(project, exportFolder, 1/float(LMEMFactor), baseName = 'alignedDownsampledEM', bitDepth = 8, layers = [k]) + +namePlugin = 'export_alignedEMForRegistration' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +MagCParams = fc.readMagCParameters(MagCFolder) +nLayersAtATime = MagCParams[namePlugin]['nLayersAtATime'] +nThreads = MagCParams[namePlugin]['nThreads'] + +LMEMFactor = fc.getLMEMFactor(MagCFolder) +IJ.log('Exporting with LMEMFactor = ' + str(LMEMFactor)) + +projectPath = fc.findFilesFromTags(MagCFolder,['EM', 'Project'])[0] +exportFolder = fc.mkdir_p(os.path.join(os.path.dirname(projectPath), namePlugin)) +project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) +temporaryFolder = fc.mkdir_p(os.path.join(os.path.dirname(projectPath), 'temporary_LMEMRegistration')) # to save contrasted images + +# currentLayerPath stores in a file the current layer being processed by the script which is run several times +currentLayerPath = os.path.join(os.path.dirname(projectPath), 'currentLayer_' + namePlugin + '.txt') +currentWrittenLayer = fc.incrementCounter(currentLayerPath, increment = nLayersAtATime) + +atom = AtomicInteger(currentWrittenLayer) +fc.startThreads(exportLayer, wait = 0, nThreads = nThreads) + +# project.save() # why do I save the project here ? +time.sleep(3) + +fc.shouldRunAgain(namePlugin, currentWrittenLayer, nLayers, MagCFolder, project, increment = nLayersAtATime) \ No newline at end of file diff --git a/export_stitchedEMForAlignment.py b/export_stitchedEMForAlignment.py new file mode 100644 index 0000000..f9d410e --- /dev/null +++ b/export_stitchedEMForAlignment.py @@ -0,0 +1,54 @@ +from __future__ import with_statement + +import ij +from ij import IJ, Macro +import os, time + +import fijiCommon as fc + +from mpicbg.trakem2.align import RegularizedAffineLayerAlignment + +from java.lang import Runtime, Thread +from java.util.concurrent.atomic import AtomicInteger +from java.util import HashSet +from java.awt import Rectangle +from java.awt.geom import AffineTransform + +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from ini.trakem2.imaging import Blending + +def exportLayer(): + while atom.get() < min(nLayers, currentWrittenLayer + nLayersAtATime + 1) : + k = atom.getAndIncrement() + if k < min(nLayers, currentWrittenLayer + nLayersAtATime): + IJ.log('Start exporting layer ' + str(k) + ' currentWrittenLayer - ' + str(currentWrittenLayer)) + fc.exportFlat(project, exportFolder, 1/float(downsamplingFactor), baseName = 'stitchedDownsampledEM', bitDepth = 8, layers = [k]) + +namePlugin = 'export_stitchedEMForAlignment' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +MagCParams = fc.readMagCParameters(MagCFolder) +nLayersAtATime = min (MagCParams[namePlugin]['nLayersAtATime'], Runtime.getRuntime().availableProcessors()) +nThreads = MagCParams[namePlugin]['nThreads'] + +# getting downsamplingFactor +downsamplingFactor = MagCParams['downsample_EM']['downsamplingFactor'] + +projectPath = fc.cleanLinuxPath(fc.findFilesFromTags(MagCFolder,['EM', 'Project'])[0]) +exportFolder = fc.mkdir_p(os.path.join(os.path.dirname(projectPath), namePlugin)) +project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) + +fc.resizeDisplay(layerset) # has been done in previous script but in case ... + +currentLayerPath = os.path.join(os.path.dirname(projectPath), 'currentLayer_' + namePlugin + '.txt') +currentWrittenLayer = fc.incrementCounter(currentLayerPath, increment = nLayersAtATime) + +atom = AtomicInteger(currentWrittenLayer) +fc.startThreads(exportLayer, wait = 0, nThreads = nThreads) + +# project.save() # why do I save the project here ? To save mipmaps for subsequent faster processing ? Probably not needed ... +time.sleep(3) + +fc.shouldRunAgain(namePlugin, currentWrittenLayer, nLayers, MagCFolder, project, increment = nLayersAtATime) \ No newline at end of file diff --git a/fijiCommon.py b/fijiCommon.py new file mode 100644 index 0000000..edd63b5 --- /dev/null +++ b/fijiCommon.py @@ -0,0 +1,967 @@ +from __future__ import with_statement +# Python imports +import os, re, errno, string, shutil, ntpath, sys, time, threading +from sets import Set + +# Java imports +from java.util import ArrayList, HashSet +from java.awt import Rectangle, Color +from java.awt.geom import AffineTransform +from jarray import zeros, array +from java.lang import Math, Thread, Runtime +from java.util.concurrent.atomic import AtomicInteger + +# Fiji imports +import ij +from ij import IJ, Macro, ImagePlus, WindowManager, ImageStack +from ij.gui import WaitForUserDialog +from ij.process import ImageStatistics as IS +from ij.process import ImageConverter +import ij.io.OpenDialog +from ij.io import DirectoryChooser, FileSaver +from ij.gui import GenericDialog, NonBlockingGenericDialog +from ij.plugin.filter import GaussianBlur as Blur +from ij.plugin.filter import Filters +from ij.process import ByteProcessor, FloatProcessor + +from mpicbg.ij.clahe import Flat +from mpicbg.ij.plugin import NormalizeLocalContrast +from mpicbg.models import RigidModel2D, AffineModel2D, Point, PointMatch +from mpicbg.trakem2.transform import CoordinateTransformList + +from fiji.tool import AbstractTool +from fiji.selection import Select_Bounding_Box + +# TrakEM imports +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Display, Patch +from ini.trakem2.utils import Utils +from ini.trakem2.io import CoordinateTransformXML +from ini.trakem2.tree import LayerTree +from mpicbg.trakem2.align import Align, AlignTask + +from register_virtual_stack import Register_Virtual_Stack_MT +from register_virtual_stack import Transform_Virtual_Stack_MT + + +################ +# File and I/O operations +################ + +def folderFromPath(path): #folders have an ending os.sep + head, tail = ntpath.split(path) + return head + os.sep + +def nameFromPath(path): + head, tail = ntpath.split(path) + return os.path.splitext(tail)[0] + +def folderNameFromFolderPath(path): + head, tail = ntpath.split(path) + head, tail = ntpath.split(head) + return tail + +def mkdir_p(path): + path = os.path.join(path, '') + try: + os.mkdir(path) + IJ.log('Folder created: ' + path) + except Exception, e: + if e[0] == 20047: + # IJ.log('Nothing done: folder already existing: ' + path) + pass + else: + IJ.log('Exception during folder creation :' + str(e)) + return path + +def promptDir(text): + folder = DirectoryChooser(text).getDirectory() + content = naturalSort(os.listdir(folder)) + IJ.log('Prompted for ' + text) + IJ.log('Selected folder :' + folder) + return folder, content + +def makeNeighborFolder(folder, name): + neighborFolder = folderFromPath(folder.rstrip(os.sep)) + name + os.sep + mkdir_p(neighborFolder) + IJ.log('NeighborFolder created: ' + neighborFolder) + return neighborFolder + +def getPath(text): + path = IJ.getFilePath(text) + IJ.log('File selected: ' + path) + return path + +def naturalSort(l): + convert = lambda text: int(text) if text.isdigit() else text.lower() + alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] + return sorted(l, key = alphanum_key) + +def getOK(text): + gd = GenericDialog('User prompt') + gd.addMessage(text) + gd.hideCancelButton() + gd.enableYesNoCancel() + gd.showDialog() + return gd.wasOKed() + +def getName(text, defaultName = ''): + gd = GenericDialog(text) + gd.addStringField(text, defaultName) + gd.showDialog() + if gd.wasCanceled(): + print 'User canceled dialog!' + return + return gd.getNextString() + +def getNumber(text, default = 0, decimals = 3): + gd = GenericDialog(text) + gd.addNumericField(text, default, decimals) # show 6 decimals + gd.showDialog() + if gd.wasCanceled(): + IJ.log('User canceled dialog!') + return + return gd.getNextNumber() + +def getNamesFromFolderExt(folder, extension = '.tif'): + list = os.listdir(folder) + list = filter(lambda x: os.path.splitext(x)[1] == extension, list) + list = naturalSort(list) + # for i, name in enumerate(list): + # list[i] = folder + name + return list + +def findFilesFromTags(folder,tags): + IJ.log('Looking for files in ' + folder + ' that match the following tags: ' + str(tags)) + filePaths = [] + for (dirpath, dirnames, filenames) in os.walk(folder): + for filename in filenames: + if (all(map(lambda x:x in filename,tags)) == True): + path = os.path.join(dirpath, filename) + filePaths.append(path) + IJ.log('Found this file: ' + path) + filePaths = naturalSort(filePaths) + return filePaths + +def findFoldersFromTags(folder,tags): + IJ.log('Looking for folders in ' + folder + ' that match the following tags: ' + str(tags)) + folderPaths = [] + for (dirpath, dirnames, filenames) in os.walk(folder): + for dirname in dirnames: + if (all(map(lambda x:x in dirname,tags)) == True): + path = os.path.join(dirpath,dirname,'') + folderPaths.append(path) + IJ.log('Found this folder: ' + path) + folderPaths = naturalSort(folderPaths) + return folderPaths + +def displayInfoDialog(text, title = 'Info'): + global infoDialog + infoDialog = NonBlockingGenericDialog(title) + infoDialog.addMessage(text) + infoDialog.setLocation(0,0) + infoDialog.setSize(400,800) + infoDialog.show() + return infoDialog + +def toString(*a): + return ''.join(map(str,a)) + +def getRefChannel(channels, text = 'Reference Channel'): #dialog prompt to choose the reference channel + gd = GenericDialog(text) + gd.addChoice("output as", channels, channels[0]) + gd.showDialog() + if gd.wasCanceled(): + print "User canceled dialog!" + return + return gd.getNextChoice() + +def getMinMaxFor8Bit(minMaxs): + gd = GenericDialog('Enter the min and max for each channel for the 8 bit transformation') + for channel in minMaxs.keys(): + gd.addNumericField('Min ' + channel , minMaxs[channel][0], 2) # show 2 decimals + gd.addNumericField('Max ' + channel, minMaxs[channel][1], 2) + gd.showDialog() + if gd.wasCanceled(): + IJ.log('User canceled dialog!') + return + for channel in minMaxs.keys(): + minMaxs[channel][0] = gd.getNextNumber() + minMaxs[channel][1] = gd.getNextNumber() + return minMaxs + +def cleanLinuxPath(path): + if path[:2] == os.sep + os.sep: + returnPath = path[1:] + IJ.log('Path cleaned from ' + path + '\n to \n' + returnPath) + else: + returnPath = path + return returnPath + +#################### +# Affine Transforms +#################### + +def readWriteXMLTransform(projectPath,layerIndex,folder): + ''' + 1-Take the TrakEM project 'projectPath' + 2-Read the transformation of the first patch of layer 'layerIndex' + 3-Read the locations of the first patch of layer 'layerIndex' and layer '1-layerIndex'. This is used to calculate the offset of the EM and LM images in the initial transformation file + 4-Write as a simple text file in folder + title of the project + InitialTransform.txt + ''' + project = Project.openFSProject(projectPath, False) + layerset = project.getRootLayerSet() + layers = layerset.getLayers() + layer = layerset.getLayers().get(layerIndex) + patches = layer.getDisplayables(Patch) + t = patches.get(0).getAffineTransform() + + transPath = folder + project.getTitle() + 'InitialTransform.txt.txt' + f = open(transPath,'w') + f.write( str(t.getScaleX()) + "\n") + f.write( str(t.getShearY())+ "\n") + f.write( str(t.getShearX())+ "\n") + f.write( str(t.getScaleY())+ "\n") + f.write( str(t.getTranslateX())+ "\n") + f.write( str(t.getTranslateY())+ "\n") + + f.write ( str( layers.get(layerIndex).getDisplayables(Patch).get(0).getX() ) + '\n') + f.write ( str( layers.get(layerIndex).getDisplayables(Patch).get(0).getY() ) + '\n') + f.write ( str( layers.get(1-layerIndex).getDisplayables(Patch).get(0).getX() ) + '\n') + f.write ( str( layers.get(1-layerIndex).getDisplayables(Patch).get(0).getY() ) + '\n') + f.close() + IJ.log('Transformation saved in: ' + transPath) + # read the parameters of the transformations + trans = [] + f = open(transPath,'r') + while 1: + line = f.readline() + if not line: break + IJ.log(line) + trans.append(float(line)) + f.close + IJ.log('Transformation: ' + str(trans)) + closeProject(project) + return trans + +def writeAffineTransforms(project,path): + with open(path,'w') as f: + layerset = project.getRootLayerSet() + for k,layer in enumerate(layerset.getLayers()): + patch = layer.getDisplayables(Patch).get(0) + t = patch.getAffineTransform() + f.write( str(t.getScaleX()) + "\n") + f.write( str(t.getShearY())+ "\n") + f.write( str(t.getShearX())+ "\n") + f.write( str(t.getScaleY())+ "\n") + f.write( str(t.getTranslateX())+ "\n") + f.write( str(t.getTranslateY())+ "\n") + return + +def writeAllAffineTransforms(project,path): + layerset = project.getRootLayerSet() + loader = project.getLoader() + with open(path,'w') as f: + for l,layer in enumerate(layerset.getLayers()): + for patch in layer.getDisplayables(Patch): + f.write(os.path.normpath(loader.getAbsolutePath(patch)) + '\n') + f.write(str(l) + '\n') + t = patch.getAffineTransform() + f.write( str(t.getScaleX()) + '\n') + f.write( str(t.getShearY())+ '\n') + f.write( str(t.getShearX())+ '\n') + f.write( str(t.getScaleY())+ '\n') + f.write( str(t.getTranslateX())+ '\n') + f.write( str(t.getTranslateY())+ '\n') + IJ.log('All affine Transforms saved in: ' + path) + return + +def readTransform(path): + IJ.log('Reading transformation file: ' + path) + trans = [] + f = open(path,'r') + while 1: + line = f.readline() + if not line: break + IJ.log(line) + try: + line = float(line) + except Exception, e: + pass + trans.append(float(line)) + f.close + return trans + +def readCoordinates(folder,tags): + content = os.listdir(folder) + for i in content: + if (all(map(lambda x:x in i,tags)) == True): + path = folder + i + IJ.log('This file matched the tag --' + str(tags) + '-- in the folder ' + folder + ' : ' + path) + f = open(path,'r') + x = [] + y = [] + for i, line in enumerate(f): + x.append(int(line.split("\t")[0])) + y.append(int(line.split("\t")[1])) + #x,y = map(lambda u: np.array(u),[x,y]) #fiji version, no numpy + f.close() + IJ.log('x = ' + str(x)) + IJ.log('y = ' + str(y)) + return x,y + +def readSectionCoordinates(path): + sections = [] + try: + f = open(path, 'r') + lines = f.readlines() + for line in lines: + points = line.split('\t') + points.pop() + section = [ [int(float(point.split(',')[0])), int(float(point.split(',')[1]))] for point in points ] + sections.append(section) + f.close() + except Exception, e: + IJ.log('Section coordinates not found. It is probably a simple manual run.') + return sections + +def writeRectangle(rectangle,path): + with open(path,'w') as f: + f.write(str(rectangle.x) + '\n') + f.write(str(rectangle.y) + '\n') + f.write(str(rectangle.width) + '\n') + f.write(str(rectangle.height) + '\n') + +def readRectangle(path): + with open(path,'r') as f: + res = [] + for i, line in enumerate(f): + res.append(int(line)) + return Rectangle(res[0],res[1],res[2],res[3]) + +################# +# Image operations +################# + +def crop(im,roi): + ip = im.getProcessor() + ip.setRoi(roi) + im = ImagePlus(im.getTitle() + '_Cropped', ip.crop()) + return im + +def localContrast(im, block = 127, histobins = 256, maxslope = 3): + ipMaskCLAHE = ByteProcessor(im.getWidth(),im.getHeight()) + ipMaskCLAHE.threshold(-1) + bitDepth = im.getBitDepth() + if bitDepth == 8: + maxDisp = Math.pow(2,8) - 1 + else: + maxDisp = Math.pow(2,12) - 1 + + ip = im.getProcessor() + ip.setMinAndMax(0,maxDisp) + if bitDepth == 8: + ip.applyLut() + Flat.getFastInstance().run(im, block, histobins, maxslope, ipMaskCLAHE, False) + del ipMaskCLAHE + return im + +def edges(im): + filter = Filters() + ip = im.getProcessor() + filter.setup('edge',im) + filter.run(ip) + im = ImagePlus(os.path.splitext(im.getTitle())[0] + '_Edges',ip) + return im + +def blur(im,sigma): + blur = Blur() + ip = im.getProcessor() + blur.blurGaussian(ip,sigma,sigma,0.0005) + im = ImagePlus(os.path.splitext(im.getTitle())[0] + '_Blur',ip) + return im + +def normLocalContrast(im, x, y, stdev, center, stretch): + NormalizeLocalContrast().run(im.getProcessor(), x, y, stdev, center, stretch) # something like repaint needed ? + return im + +def resize(im,factor): + IJ.run(im, 'Size...', 'width=' + str(int(Math.floor(im.width * factor))) + ' height=' + str(int(Math.floor(im.height * factor))) + ' average interpolation=Bicubic') + return im + +def minMax(im, min, max): + # ip = im.getProcessor() + # ip.setMinAndMax(min,max) + # ip.applyLut() + IJ.setMinAndMax(im, min, max) + IJ.run(im, 'Apply LUT', '') + return im + +def to8Bit(*args): + im = args[0] + if len(args)==1: + min = 0 + max = 4095 + else: + min = args[1] + max = args[2] + ip = im.getProcessor() + ip.setMinAndMax(min,max) + IJ.run(im, '8-bit', '') + return im + +def stackFromPaths(paths): + firstIm = IJ.openImage(paths[0]) + width = firstIm.getWidth() + height = firstIm.getHeight() + firstIm.close() + + ims = ImageStack(width, height) # assemble the ImageStack of the channel + for path in paths: + ims.addSlice(IJ.openImage(path).getProcessor()) + imp = ImagePlus('Title', ims) + imp.setDimensions(1, 1, len(paths)) # these have to be timeframes for trackmate + return imp + +def rawToPeakEnhanced(im, min = 200, max = 255): + im.getProcessor().invert() + IJ.run(im, "Normalize Local Contrast", "block_radius_x=15 block_radius_y=15 standard_deviations=3 center stretch") + IJ.run(im, "Median...", "radius=1") + minMax(im, 200, 255) + return im + +def getModelFromPoints(sourcePoints, targetPoints): + rigidModel = RigidModel2D() + pointMatches = HashSet() + for a in zip(sourcePoints, targetPoints): + pm = PointMatch(Point([a[0][0], a[0][1]]), Point([a[1][0], a[1][1]])) + pointMatches.add(pm) + rigidModel.fit(pointMatches) + return rigidModel + +############## +# Trakem utils +############## + +def initTrakem(path, nbLayers, mipmaps = False): #initialize a project + path = cleanLinuxPath(path) + ControlWindow.setGUIEnabled(False) + project = Project.newFSProject("blank", None, path) + project.getLoader().setMipMapsRegeneration(mipmaps) + layerset = project.getRootLayerSet() + for i in range(nbLayers): # create the layers + layerset.getLayer(i, 1, True) + project.getLayerTree().updateList(layerset) #update the LayerTree + Display.updateLayerScroller(layerset) # update the display slider + IJ.log('TrakEM project initialized with ' + str(nbLayers) + ' layers and stored in ' + path + ' (but not saved yet)') + return project + +def initProject(path, nbLayers, mipmaps = False): #initialize a project + path = cleanLinuxPath(path) + ControlWindow.setGUIEnabled(False) + project = Project.newFSProject("blank", None, path) + loader = project.getLoader() + loader.setMipMapsRegeneration(mipmaps) + layerset = project.getRootLayerSet() + for i in range(nbLayers): # create the layers + layerset.getLayer(i, 1, True) + project.getLayerTree().updateList(layerset) #update the LayerTree + Display.updateLayerScroller(layerset) # update the display slider + IJ.log('TrakEM project initialized with ' + str(nbLayers) + ' layers and stored in ' + path + ' (but not saved yet)') + return project, loader, layerset + +def exportFlat(project,outputFolder,scaleFactor, baseName = '', bitDepth = 8, layers = [], roi = ''): + layerset = project.getRootLayerSet() + loader = project.getLoader() + for l,layer in enumerate(layerset.getLayers()): + if (layers ==[] ) or (l in layers): + IJ.log('Exporting layer ' + str(l)) + if roi == '': + roiExport = layerset.get2DBounds() + else: + roiExport = roi + if bitDepth == 8: + imp = loader.getFlatImage(layer,roiExport,scaleFactor, 0x7fffffff, ImagePlus.GRAY8, Patch, layer.getAll(Patch), True, Color.black, None) + elif bitDepth == 16: + imp = loader.getFlatImage(layer,roiExport,scaleFactor, 0x7fffffff, ImagePlus.GRAY16, Patch, layer.getAll(Patch), True, Color.black, None) + savePath = os.path.join(outputFolder, baseName + '_' + str(l).zfill(4) + '.tif') + IJ.save(imp, savePath) + IJ.log('Layer ' + str(l) +' flat exported to ' + savePath) + imp.close() + +def exportFlatForPresentations(project,outputFolder,scaleFactor,rectangle): + layerset = project.getRootLayerSet() + loader = project.getLoader() + for l,layer in enumerate(layerset.getLayers()): # import the patches + IJ.log('Exporting layer ' + str(l) + 'with rectangle ' + str(rectangle) + 'scale factor ' + str(scaleFactor)) + imp = loader.getFlatImage(layer,rectangle,scaleFactor, 0x7fffffff, ImagePlus.GRAY8, Patch, layer.getAll(Patch), True, Color.black, None) + IJ.save(imp,outputFolder + os.sep + nameFromPath(outputFolder.rstrip(os.sep)) + '_' + str(l) + '.tif') #use the name of the outputFolder to name the images + IJ.log('Layer ' + str(l)+' flat exported to ' + outputFolder + os.sep + nameFromPath(outputFolder.rstrip(os.sep)) + '_' + str(l) + '.tif') + imp.close() + +def exportFlatCloseFiji(project,outputFolder,scaleFactor): + #todo: check whether the output file already exists. If yes, skip + for l,layer in enumerate(layerset.getLayers()): + savePath = outputFolder + os.sep + nameFromPath(outputFolder.rstrip(os.sep)) + '_' + str(l) + '.tif' + savePathNext = outputFolder + os.sep + nameFromPath(outputFolder.rstrip(os.sep)) + '_' + str(l+1) + '.tif' + if os.isfile(savePathNext): + IJ.log('Skipping layer ' + str(l) + ': already processed') + else: + IJ.log('Exporting layer ' + str(layer) + '; layer number ' + str(l)) + layerset = project.getRootLayerSet() + loader = project.getLoader() + imp = loader.getFlatImage(layer,layerset.get2DBounds(),scaleFactor, 0x7fffffff, ImagePlus.GRAY8, Patch, layer.getAll(Patch), True, Color.black, None) + IJ.save(imp, savePath) + IJ.log('Layer ' + str(layerCurrent)+' flat exported to ' + savePath) + imp.close() + IJ.log('exportFlatCloseFiji has reached the end') + +def exportFlatRoi(project, scaleFactor, x, y, w, h, layer, saveName): + loader = project.getLoader() + rectangle = Rectangle(x-int(w/2),y-int(h/2),w,h) + patches = layer.find(Patch, x, y) + print patches + # IJ.log('patches' + str(patches)) + + for p, patch in enumerate(patches): + visible = patch.visible + patch.visible = True + tiles = ArrayList() + tiles.add(patch) + print 'tiles',tiles + print 'rectangle',rectangle + IJ.log('Patch ' + str(patch) + ' cropped with rectangle ' + str(rectangle) ) + imp = loader.getFlatImage(layer, rectangle, scaleFactor, 0x7fffffff, ImagePlus.GRAY8, Patch, tiles, True, Color.black, None) + exportName = saveName + '_' + str(int(p)) + '.tif' + IJ.save(imp, exportName) + patch.visible = visible + +def closeProject(project): + try: + project.getLoader().setChanged(False) #no dialog if there are changes + project.destroy() + except Exception, e: + IJ.log('Was asked to close a project, but failed (probably no open project available)') + pass + +def resizeDisplay(layerset): + # layerset.setDimensions(1,1,1,1) + # layerset.enlargeToFit(layerset.getDisplayables(Patch)) + layerset.setMinimumDimensions() + +def setChannelVisible(project,channel): + layerset = project.getRootLayerSet() + layers = layerset.getLayers() + for l,layer in enumerate(layers): + patches = layer.getDisplayables(Patch) + for patch in patches: + patchName = nameFromPath(patch.getImageFilePath()) + if patchName[0:len(channel)] == channel: + patch.setVisible(True, True) + # else: + # patch.setVisible(False, True) + try: + Display.getFront().updateVisibleTabs() + except Exception, e: + IJ.log('Did not succeed in updating the visible tabs') + pass + +def setChannelInvisible(project,channel): + layerset = project.getRootLayerSet() + layers = layerset.getLayers() + for l,layer in enumerate(layers): + patches = layer.getDisplayables(Patch) + for patch in patches: + patchName = nameFromPath(patch.getImageFilePath()) + if patchName[0:len(channel)] == channel: + patch.setVisible(False, True) + Display.getFront().updateVisibleTabs() + +def toggleChannel(project,channel): + layerset = project.getRootLayerSet() + layers = layerset.getLayers() + for l,layer in enumerate(layers): + patches = layer.getDisplayables(Patch) + for patch in patches: + # IJ.log('thepatch' + str(patch)) + patchName = nameFromPath(patch.getImageFilePath()) + # IJ.log(str(patchName[0:len(channel)]) + '=' + str(channel) + ' value' + str(patchName[0:len(channel)] == channel) ) + if patchName[0:len(channel)] == channel: + # IJ.log(str(patch) + ' toggled') + patch.setVisible((not patch.visible), True) + Display.getFront().updateVisibleTabs() + +def openTrakemProject(path, mipmap = False): + project = Project.openFSProject(cleanLinuxPath(path), False) + return getProjectUtils(project, mipmap) + +def getProjectUtils(project, mipmap = False): + loader = project.getLoader() + loader.setMipMapsRegeneration(mipmap) + layerset = project.getRootLayerSet() + nLayers = len(layerset.getLayers()) + return project, loader, layerset, nLayers + +def createImportFile(folder, paths, locations, factor = 1, layers = None, name = ''): + importFilePath = os.path.join(folder, 'trakemImportFile' + name + '.txt') + with open(importFilePath, 'w') as f: + for id, path in enumerate(paths): + xLocation = int(locations[id][0] * factor) + yLocation = int(locations[id][1] * factor) + path = cleanLinuxPath(path) + if layers: + IJ.log('Inserting image ' + path + ' at (' + str(xLocation) + ' ; ' + str(yLocation) + ' ; ' + str(layers[id]) + ')') + f.write(str(path) + '\t' + str(xLocation) + '\t' + str(yLocation) + '\t' + str(layers[id]) + '\n') + else: + IJ.log('Inserting image ' + path + ' at (' + str(xLocation) + ' ; ' + str(yLocation) + ')' ) + f.write(str(path) + '\t' + str(xLocation) + '\t' + str(yLocation) + '\t' + str(0) + '\n') + return importFilePath + +def rigidAlignment(projectPath, params, name = '', boxFactor = 1): + # rigid alignment outside trakem2 with register virtual stack plugin because bug in trakem2 + projectFolder = os.path.dirname(projectPath) + projectName = os.path.splitext(os.path.basename(projectPath))[0] + project, loader, layerset, nLayers = openTrakemProject(projectPath) + + exportForRigidAlignmentFolder = mkdir_p(os.path.join(projectFolder, 'exportForRigidAlignment')) + resultRigidAlignmentFolder = mkdir_p(os.path.join(projectFolder, 'resultRigidAlignment')) + + bb = layerset.get2DBounds() + roi = Rectangle(int(bb.width/2 * (1 - boxFactor)), int(bb.height/2 * (1 - boxFactor)), int(bb.width*boxFactor), int(bb.height*boxFactor)) + boxPath = os.path.join(resultRigidAlignmentFolder, 'alignmentBox.txt') + writeRectangle(roi, boxPath) + + exportFlat(project, exportForRigidAlignmentFolder, 1, baseName = 'exportForRigidAlignment', roi = roi) + + referenceName = naturalSort(os.listdir(exportForRigidAlignmentFolder))[0] + use_shrinking_constraint = 0 + IJ.log('Rigid alignment with register virtual stack') + Register_Virtual_Stack_MT.exec(exportForRigidAlignmentFolder, resultRigidAlignmentFolder, resultRigidAlignmentFolder, referenceName, params, use_shrinking_constraint) + time.sleep(2) + IJ.log('Warning: rigidAlignment closing all existing windows') + WindowManager.closeAllWindows() # problematic because it also closes the log window + + # IJ.getImage().close() + + for l, layer in enumerate(layerset.getLayers()): + imagePath = os.path.join(exportForRigidAlignmentFolder, 'exportForRigidAlignment_' + str(l).zfill(4) + '.tif') + transformPath = os.path.join(resultRigidAlignmentFolder, 'exportForRigidAlignment_' + str(l).zfill(4) + '.xml') + aff = getAffFromRVSTransformPath(transformPath) + + for patch in layer.getDisplayables(Patch): + patch.setLocation(patch.getX()-roi.x, patch.getY()-roi.y) # compensate for the extracted bounding box + # patch.setLocation(roi.x, roi.y) # compensate for the extracted bounding box + currentAff = patch.getAffineTransform() + currentAff.preConcatenate(aff) + patch.setAffineTransform(currentAff) + + resizeDisplay(layerset) + project.save() + closeProject(project) + IJ.log('All LM layers have been aligned in: ' + projectPath) + +def getAffFromRVSTransformPath(path): + theList = ArrayList(HashSet()) + read = CoordinateTransformXML.parse(path) + + if type(read) == CoordinateTransformList: + read.getList(theList) + else: + theList.add(read) + + if theList.size() == 1: # because RVS either stores only one affine or one affine and one translation + aff = theList.get(0).createAffine() + else: + aff = theList.get(0).createAffine() + aff1 = theList.get(1).createAffine() + aff.preConcatenate(aff1) # option 2 + + return aff + +################ +# Pipeline utils +################ + +def saveLog(path): + logWindows = WindowManager.getWindow('Log') + textPanel = logWindows.getTextPanel() + theLogText = textPanel.getText().encode('utf-8') + with open(path,'a') as f: + f.write('The log has been saved at this time: ' + time.strftime('%Y%m%d-%H%M%S') + '\n') + f.write(theLogText) + logWindows.close() + return + +# def readSessionMetadata(folder): + # sessionMetadataPath = findFilesFromTags(folder,['session','metadata'])[0] + # with open(sessionMetadataPath,'r') as f: + # lines = f.readlines() + # # IJ.log(str(lines[1].replace('\n','').split('\t'))) + # # IJ.log(str(len(lines[1].replace('\n','').split('\t')))) + # width, height, nChannels, xGrid, yGrid, scaleX, scaleY = lines[1].replace('\n','').split('\t') + # channels = [] + # # IJ.log(str(lines)) + # # IJ.log(nChannels) + # for i in range(int(nChannels)): + # # IJ.log(str(i)) + # l = lines[3+i] + # # IJ.log(str(l)) + # l = l.replace('\n','') + # # IJ.log(str(l)) + # l = l.split('\t') + # # IJ.log(str(l)) + # channels.append(l[0]) + # return int(width), int(height), int(nChannels), int(xGrid), int(yGrid), float(scaleX), float(scaleY), channels + +def readSessionMetadata(folder): + LMMetadataPath = findFilesFromTags(folder,['LM_Metadata'])[0] + LMMetadata = readParameters(LMMetadataPath) + channels = map(str, LMMetadata['channels']) + LMMetadata['channels'] = [str(channel.replace('"', '').replace("'",'')) for channel in channels] + + return [LMMetadata[parameter] for parameter in ['width', 'height', 'nChannels', 'xGrid', 'yGrid', 'scaleX' , 'scaleY', 'channels']] + +def readMagCParameters(MagCFolder): + MagCParametersPath = findFilesFromTags(MagCFolder, ['MagC_Parameters'])[0] + return readParameters(MagCParametersPath) + +def readParameters(path): + key0 = '' + d = {} + with open(path,'r') as f: + lines = f.readlines() + for line in lines: + if (len(line) > 12) and line[:12] == '##### Plugin': + key0 = line.replace(' ','').replace('#','').replace('Plugin','').replace('\n','').replace('\r','') + if key0 not in d: + d[key0] = {} + elif line[0] != '#' and '=' in line: + line = line.split('=') + cleanedLine = line[1].replace(' ','').replace('\n','').replace('[', '').replace(']', '').replace('\r', '') + key = line[0].replace(' ','') + values = [] + for value in cleanedLine.split(','): + try: + if '.' in value: + value = float(value) + else: + value = int(value) + except Exception, e: + pass + values.append(value) + + if len(values) == 1: # to return a single element if there is a single value + values = values[0] + + if key0 == '': # then this is not the real MagC parameter file with the plugin information. It is a 'normal' parameter file. + d[key] = values + else: + d[key0][key] = values + return d + +def startPlugin(namePlugin): # the argument received is always a folder + IJ.log('Plugin ' + namePlugin + ' started at ' + str(time.strftime('%Y%m%d-%H%M%S'))) + externalArguments = Macro.getOptions().replace('"','').replace(' ','') + externalArguments = os.path.normpath(externalArguments) + return externalArguments + +def terminatePlugin(namePlugin, MagCFolder, signalingMessage = 'kill me'): + signalingPath = os.path.join(MagCFolder, 'signalingFile_' + namePlugin + '.txt') + IJ.log('signalingPath ' + signalingPath) + logFolder = mkdir_p(os.path.join(MagCFolder, 'MagC_Logs')) + logPath = os.path.join(logFolder, 'log_' + namePlugin + '.txt') + IJ.log('Plugin ' + namePlugin + ' terminated at ' + str(time.strftime('%Y%m%d-%H%M%S'))) + saveLog(logPath) + f = open(signalingPath, 'w') + f.write(signalingMessage) + f.close() + # IJ.run('Quit') + +#ToDo: shouldRunAgain should receive the path of the counter file, so that it can delete the file when it is done +def shouldRunAgain(namePlugin, l, nLayers, MagCFolder, project, increment = 1): + logFolder = mkdir_p(os.path.join(MagCFolder, 'MagC_Logs')) + logPath = os.path.join(logFolder, 'log_' + namePlugin + '.txt') + if l + increment < nLayers: + IJ.log('Plugin ' + namePlugin + ' still running at ' + str(time.strftime('%Y%m%d-%H%M%S')) + '. ' + str(min(l + increment, nLayers)) + '/' + str(nLayers) + ' done.' ) + time.sleep(3) + closeProject(project) + time.sleep(1) + saveLog(logPath) + terminatePlugin(namePlugin, MagCFolder, signalingMessage = 'kill me and rerun me') + # sys.exit(2) + else: + IJ.log(namePlugin + ' done.') + saveLog(logPath) + time.sleep(2) + closeProject(project) + terminatePlugin(namePlugin, MagCFolder) + # sys.exit(0) + +def incrementCounter(path, increment = 1): + l='' + if not os.path.isfile(path): + l = 0 + else: + with open(path, 'r') as f: + l = int(f.readline()) + with open(path, 'w') as f: + f.write(str(l+increment)) + return l + +def getLMEMFactor(MagCFolder): + LMEMFactorPath = os.path.join(MagCFolder, 'LMEMFactor.txt') + if not os.path.isfile(LMEMFactorPath): + # LMEMFactor = getNumber('What is the scaling factor to go from LM to EM ? \n Open brightfield LM exported_scale_1 and an EM exportForAlignment and compare the scales \n (taking into account the factor 20)', default = 7, decimals = 1) + warningUserInputText = '''********************************************************************** + ********************************************************************** + ****************** WARNING ****************** USER INPUT NEEDED ****************** : + What is the scaling factor to go from LM to EM ? \n Open brightfield LM exported_scale_1 and an EM exportForAlignment and compare the scales \n (taking into account the factor 20) \n + Save a file named LMEMFactor.txt containing the factor, e.g., 7.4, in the root MagC folder then rerun the pipeline (probably starting from exportEMForRegistration) + ********************************************************************** + ********************************************************************** + ********************************************************************** + ''' + print warningUserInputText + IJ.log(warningUserInputText) + 8/0 + else: + with open(LMEMFactorPath, 'r') as f: + line = f.readlines()[0] + IJ.log(str(line)) + print line + LMEMFactor = float(line) + return LMEMFactor + + +def getEMLMScaleFactor(MagCFolder): + try: + IJ.log('Reading the EM pixel size') + EMMetadataPath = findFilesFromTags(MagCFolder,['EM', 'Metadata'])[0] + EMPixelSize = readParameters(EMMetadataPath)['pixelSize'] # in meters + IJ.log('The EM pixel size is ' + str(EMPixelSize) + ' m') + + IJ.log('Reading the LM pixel size') + LMMetadataPath = findFilesFromTags(MagCFolder,['LM_Metadata'])[0] + width, height, nChannels, xGrid, yGrid, scaleX, scaleY, channels = readSessionMetadata(MagCFolder) + LMPixelSize = scaleX * 1e-6 # in meters + EMLMScaleFactor = int(round(LMPixelSize / float(EMPixelSize))) + IJ.log('The EMLMScaleFactor is ' + str(EMLMScaleFactor)) + except Exception, e: + EMLMScaleFactor = 20 + IJ.log('Warning: the real EMLM scale factor could not be read. Outputing the default value instead: ' + str(EMLMScaleFactor)) + return EMLMScaleFactor + +def startThreads(function, fractionCores = 1, wait = 0, arguments = None, nThreads = None): + threads = [] + if nThreads == None: + threadRange = range(max(int(Runtime.getRuntime().availableProcessors() * fractionCores), 1)) + else: + threadRange = range(nThreads) + IJ.log('ThreadRange = ' + str(threadRange)) + for p in threadRange: + if arguments == None: + thread = threading.Thread(target = function) + else: + IJ.log('These are the arguments ' + str(arguments) + 'III type ' + str(type(arguments))) + thread = threading.Thread(group = None, target = function, args = arguments) + threads.append(thread) + thread.start() + IJ.log('Thread ' + str(p) + ' started') + time.sleep(wait) + for idThread, thread in enumerate(threads): + thread.join() + IJ.log('Thread ' + str(idThread) + 'joined') + +def readOrder(path): # read order from Concorde solution (custom format from script) or from manual solution (1 number per line) + with open(path, 'r') as f: + lines = f.readlines() + order = [] + if len(lines[0].split(' ')) == 2: # concorde TSP format + lines = lines[1:] + for line in lines: + order.append(int(line.split(' ')[0])) + # remove the dummy city 0 and apply a -1 offset + order.remove(0) + for id, o in enumerate(order): + order[id] = o-1 + + # save a human-readable file, will be used by the pipeline + saveFolder = os.path.join(os.path.dirname(os.path.normpath(path)), '') + orderPath = os.path.join(saveFolder, 'sectionOrder.txt') + if not os.path.isfile(orderPath): + with open(orderPath, 'w') as f: + for index in order: + f.write(str(index) + '\n') + else: + IJ.log('That is weird that I was asked to open the TSP solution file while a human-readable section order already exists') + else: # simple format, one index per line + for line in lines: + order.append(int(line.replace('\n', ''))) + IJ.log('Order read: ' + str(order)) + return order + +# def reorderProject(projectPath, reorderedProjectPath, order): + # folder = os.path.dirname(os.path.normpath(projectPath)) + + # pReordered, loaderReordered, layersetReordered, nLayers = getProjectUtils( initTrakem(folder, len(order)) ) + # pReordered.saveAs(reorderedProjectPath, True) + + # IJ.log('reorderedProjectPath ' + reorderedProjectPath) + + # project, loader, layerset, nLayers = openTrakemProject(projectPath) + + # for l,layer in enumerate(project.getRootLayerSet().getLayers()): + # IJ.log('Inserting layer ' + str(l) + '...') + # reorderedLayer = layersetReordered.getLayers().get(order.index(l)) + # for patch in layer.getDisplayables(): + # patchPath = loader.getAbsolutePath(patch) + # patchTransform = patch.getAffineTransform() + + # newPatch = Patch.createPatch(pReordered, patchPath) + # reorderedLayer.add(newPatch) + # newPatch.setAffineTransform(patchTransform) + # closeProject(project) + # resizeDisplay(layersetReordered) + # pReordered.save() + # closeProject(pReordered) + # IJ.log('Project reordering done') + +def reorderProject(projectPath, reorderedProjectPath, order): + if os.path.isfile(reorderedProjectPath): + os.remove(reorderedProjectPath) + shutil.copyfile(projectPath, reorderedProjectPath) + + project, loader, layerset, nLayers = openTrakemProject(reorderedProjectPath) + project.saveAs(reorderedProjectPath, True) + + for l,layer in enumerate(project.getRootLayerSet().getLayers()): + layer.setZ(order.index(l)) + project.getLayerTree().updateList(layerset) + + project.save() + closeProject(project) + IJ.log('Project reordering done') + +######### +# Garbage ? +######### + +def readWriteCurrentIndex(outputFolder,text): + layerFile = outputFolder + os.sep + 'currentNumber_' + text + '.txt' + if os.path.isfile(layerFile): + f = open(layerFile,'r') + layerCurrent = int(f.readline()) + f.close() + else: + layerCurrent = 1 + f = open(layerFile,'w') + f.write(str(layerCurrent + 1)) + f.close() + IJ.log('Current index called from ' + layerFile + ' : ' + str(layerCurrent)) + return layerCurrent + +def from32To8Bit(imPath): + im = IJ.openImage(imPath) + im.getProcessor().setMinAndMax(0,255) + im = ImagePlus(im.getTitle(),im.getProcessor().convertToByteProcessor()) + IJ.save(im,imPath) diff --git a/init_EM.py b/init_EM.py new file mode 100644 index 0000000..1ce0547 --- /dev/null +++ b/init_EM.py @@ -0,0 +1,65 @@ +#this script puts the acquired EM tiles into Trakem at the right positions +from __future__ import with_statement +import os, re, errno, string, shutil, time +import xml.etree.ElementTree as ET +from os import path + +from java.awt.event import MouseAdapter, KeyEvent, KeyAdapter +from jarray import zeros, array +from java.util import HashSet, ArrayList +from java.awt.geom import AffineTransform +from java.awt import Color + +from ij import IJ, Macro +from fiji.tool import AbstractTool +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from ini.trakem2.utils import Utils +from ini.trakem2.display import Display, Patch +from mpicbg.trakem2.align import Align, AlignTask + +import fijiCommon as fc + +namePlugin = 'init_EM' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +EMDataFolder = os.path.join(MagCFolder, 'EMData') +MagCEMFolder = fc.makeNeighborFolder(EMDataFolder, 'MagC_EM') + +imageFolders = [os.path.join(EMDataFolder, sectionFolderName) for sectionFolderName in fc.naturalSort(os.walk(EMDataFolder).next()[1])] + +nSections = len(imageFolders) +IJ.log('There are ' + str(nSections) + ' EM layers') + +# reading EM metadata +EMMetadataPath = os.path.join(MagCEMFolder, 'EM_Metadata.txt') +try: # old Atlas format + mosaicMetadata = os.path.join(imageFolders[0] , filter(lambda x: 'Mosaic' in x, os.listdir(imageFolders[0]))[0]) + root = ET.parse(mosaicMetadata).getroot() + + pixelSize = float(root.find('PixelSize').text) + tileinfo = root.find('TileInfo') + tileWidth = int(tileinfo.find('TileWidth').text) + tileHeight = int(tileinfo.find('TileHeight').text) + tileOverlapX = float(tileinfo.find('TileOverlapXum').text) + tileOverlapY = float(tileinfo.find('TileOverlapYum').text) + numTilesX = int(tileinfo.find('NumTilesX').text) + numTilesY = int(tileinfo.find('NumTilesY').text) + xPatchEffectiveSize = tileWidth - float(tileOverlapX * 1000 / float(pixelSize)) + yPatchEffectiveSize = tileHeight - float(tileOverlapX * 1000 / float(pixelSize)) + + # writing EM metadata + IJ.log('Writing the EM Metadata file') + parameterNames = ['pixelSize', 'tileWidth', 'tileHeight', 'tileOverlapX', 'tileOverlapY', 'numTilesX', 'numTilesY', 'xPatchEffectiveSize', 'yPatchEffectiveSize', 'nSections'] + with open(EMMetadataPath, 'w') as f: + f.write('# EM Metadata' + '\n') + for parameterName in parameterNames: + IJ.log('parameterName = ' + str(parameterName)) + parameterEntry = parameterName + ' = ' + str(vars()[parameterName]) + '\n' + IJ.log(parameterEntry) + f.write(parameterEntry) +except Exception, e: # standard metadata format from the EM_Imaging.py script + shutil.copyfile(os.path.join(EMDataFolder, 'EM_Metadata.txt'), EMMetadataPath) + +fc.terminatePlugin(namePlugin, MagCFolder) \ No newline at end of file diff --git a/mipmapToPrecomputed.py b/mipmapToPrecomputed.py new file mode 100644 index 0000000..7f2754f --- /dev/null +++ b/mipmapToPrecomputed.py @@ -0,0 +1,209 @@ +#This concatenates the render output to get all slices +#convention : [root directory]/[tile width]x[tile height]/[level]/[z]/[row]/[col].[format] + +# Inspired from: +# Copyright (c) 2016, 2017, Forschungszentrum Juelich GmbH +# Author: Yann Leprince +# +# This software is made available under the MIT licence, see LICENCE.txt. + +from functools import partial +from multiprocessing import Pool +from PIL import Image +import skimage.io +import argparse +import os +import sys +import copy +import json +import gzip +import pickle +import time +import numpy as np + + +#Get the list of immediate subdirectories +def getSubFoldersNames(folder): + return sorted([name for name in os.listdir(folder) + if os.path.isdir(os.path.join(folder, name))]) + +#Get all the row-col pairs +def getRowColPairs(folder): + pairs = set() + sliceFoldersNames = getSubFoldersNames(folder) + for sliceFolderName in sliceFoldersNames: + sliceFolder = os.path.join(folder, sliceFolderName) + rowFolderNames = getSubFoldersNames(sliceFolder) + for rowFolderName in rowFolderNames: + row = int(rowFolderName) + rowFolder = os.path.join(sliceFolder, rowFolderName) + for mipmapName in os.listdir(rowFolder): + col = int(os.path.splitext(mipmapName)[0]) + pairs.add((row, col)) + print('There are ', len(pairs), 'pairs') + return pairs + +def getMaxRowMaxCol(folder): + maxRow, maxCol = 0, 0 + sliceFoldersNames = getSubFoldersNames(folder) + for sliceFolderName in sliceFoldersNames: + sliceFolder = os.path.join(folder, sliceFolderName) + rowFolderNames = getSubFoldersNames(sliceFolder) + maxRow = max([*map(int, rowFolderNames)] + [maxRow]) + for rowFolderName in rowFolderNames: + rowFolder = os.path.join(sliceFolder, rowFolderName) + maxCol = max([*map(lambda x: int(os.path.splitext(x)[0]), os.listdir(rowFolder))] + [maxCol]) + return maxRow, maxCol + +def getMinRowsMinCols(folder): # to get the offset + allMins = [] + resolutionNames = getSubFoldersNames(folder) + #print('resolutionNames', resolutionNames) + for resolutionName in resolutionNames: + resolutionFolder = os.path.join(folder, resolutionName) + minRow, minCol = 999999999, 999999999 + sliceFoldersNames = getSubFoldersNames(resolutionFolder) + #print('sliceFoldersNames', sliceFoldersNames) + for sliceFolderName in sliceFoldersNames: + sliceFolder = os.path.join(resolutionFolder, sliceFolderName) + #print('sliceFolder', sliceFolder) + rowFolderNames = getSubFoldersNames(sliceFolder) + minRow = min([*map(int, rowFolderNames)] + [minRow]) + for rowFolderName in rowFolderNames: + rowFolder = os.path.join(sliceFolder, rowFolderName) + minCol = min([*map(lambda x: int(os.path.splitext(x)[0]), os.listdir(rowFolder))] + [minCol]) + allMins.append([minRow, minCol]) + print('allMins', allMins) + return allMins + +#Convert slices to raw chunks +def writer(rowColPair, level, mipmapLevelFolder, sliceMax, precomputedFolder, mipmapSize, info, blackImage, visualizationMode, offset): + if visualizationMode == 'local': + RAW_CHUNK_PATTERN = '{key}/{0}-{1}/{2}-{3}/{4}-{5}.gz' # for local viewing with the HBP docker + elif visualizationMode == 'online': + RAW_CHUNK_PATTERN = '{key}/{0}-{1}_{2}-{3}_{4}-{5}' # for online viewing + + row, col = rowColPair + + mipmapLocationRow = row * mipmapSize + mipmapLocationCol = col * mipmapSize + + chunkSize = info['scales'][level]['chunk_sizes'][0] + size = info['scales'][level]['size'] + + dataType = np.dtype(info['data_type']).newbyteorder('<') + + for sliceStart in range(0, sliceMax+1, chunkSize[2]): # or sliceMax + 1 + # for sliceStart in range(0, 3, chunkSize[2]): # or sliceMax + 1 + sliceEnd = min(sliceStart + chunkSize[2], size[2]) + + # load z-stack of mipmaps + mipmaps = [] + for sliceId in range(sliceStart, sliceEnd): + mipmapPath = os.path.join(mipmapLevelFolder, str(sliceId), str(row), str(col) + '.png') + if os.path.isfile(mipmapPath): + mipmap = skimage.io.imread(mipmapPath) + else: + mipmap = blackImage + # mipmap.T[-10:-5] = 255 # for debugging of the pixel shift + # theMax = max(theMax, max(mipmap)) # to know whether the complete block is black, in which case I should probably write nothing. But is this case already handled by render ? Render does not render black mipmaps no ? + mipmaps.append(mipmap) + + block = skimage.io.concatenate_images(mipmaps) + + if np.amax(block) > 0: + + # loop through all the chunks of this mipmap at this z chunk depth + nChunkRow = mipmapSize//chunkSize[1] + nChunkCol = mipmapSize//chunkSize[0] + + for chunkRow in range(nChunkRow): + rowSlicing = np.s_[chunkRow * chunkSize[1] : (chunkRow + 1) * chunkSize[1]] + chunkLocationRow = mipmapLocationRow + chunkSize[1] * chunkRow + + for chunkCol in range(nChunkCol): + chunkLocationCol = mipmapLocationCol + chunkSize[0] * chunkCol + + colSlicing = np.s_[chunkCol * chunkSize[0] : (chunkCol + 1) * chunkSize[0]] + + chunk = block[np.s_[:], rowSlicing, colSlicing] + if np.amax(chunk) > 0: + x_coords = chunkLocationCol, chunkLocationCol + chunkSize[0] + y_coords = chunkLocationRow, chunkLocationRow + chunkSize[1] + z_coords = sliceStart, sliceEnd + + #chunk_name = RAW_CHUNK_PATTERN.format(x_coords[0]-offset[0], x_coords[1]-offset[0], y_coords[0]-offset[1], y_coords[1]-offset[1], z_coords[0], z_coords[1], key = info['scales'][level]['key']) + chunk_name = RAW_CHUNK_PATTERN.format(x_coords[0], x_coords[1], y_coords[0], y_coords[1], z_coords[0], z_coords[1], key = info['scales'][level]['key']) + chunkPath = os.path.join(precomputedFolder, chunk_name) + + os.makedirs(os.path.dirname(chunkPath), exist_ok=True) + if not os.path.isfile(chunkPath): + chunk = np.asfortranarray(chunk) # could be done like this also + + # for id, ch in enumerate(chunk): + # if id<3: + # skimage.io.imshow(ch) + # skimage.io.show() + # 8/0 + + chunkByte = chunk.astype(dataType).tobytes() + # 8/0 + + if visualizationMode == 'local': + with gzip.open(chunkPath, 'wb') as f: + f.write(chunkByte) + elif visualizationMode == 'online': + with open(chunkPath, 'wb') as f: + f.write(chunkByte) + +#Main function +def mipmapToPrecomputed(mipmapFolder, precomputedFolder, mipmapSize, infoPath, nThreads, threadId, visualizationMode): + with open(infoPath) as f: + info = json.load(f) + blackImage = np.zeros((mipmapSize, mipmapSize)).astype('uint8') + # p = Pool(processes = 1) + + allMins = getMinRowsMinCols(mipmapFolder) + + for idLevel, level in enumerate(getSubFoldersNames(mipmapFolder)): # loop through resolution levels + if idLevel > -1: # for debug + mipmapLevelFolder = os.path.join(mipmapFolder, level) + level = int(level) + + offset = [allMins[idLevel][1] * mipmapSize, allMins[idLevel][0] * mipmapSize] + + sliceFolderIds = sorted(map(int, getSubFoldersNames(mipmapLevelFolder))) + # sliceLimits = (sliceFolderNumbers[0], sliceFolderNumbers[-1] + 1) # take simply 0 and the max. There is a problem when not taking 0, I do not understand why + # sliceLimits = (0, sliceFolderNumbers[-1] + 1) # take simply 0 and the max. There is a problem when not taking 0, I do not understand why + sliceMax = max(sliceFolderIds) + + rowColPairs = getRowColPairs(mipmapLevelFolder) + # maxRow, maxCol = getMaxRowMaxCol(mipmapLevelFolder) # for all sections + # rowColPairs = list(np.ndindex(maxRow + 1, maxCol + 1)) # all (row,col) pairs to process for current level + print('There are ', len(rowColPairs), 'pairs at level', level) + + rowColPairs = list(rowColPairs) + rowColPairs = rowColPairs[threadId::nThreads] # only subset of tasks done by this thread + + for rowColPair in rowColPairs: + writer(rowColPair, level, mipmapLevelFolder, sliceMax, precomputedFolder, mipmapSize, info, blackImage, visualizationMode, offset) + + +def parse_command_line(argv): + parser = argparse.ArgumentParser() + parser.add_argument('mipmapFolder', help = 'Folder of the render mipmap files') + parser.add_argument('precomputedFolder', help = 'Where do you want to save the precomputed raw data') + parser.add_argument('mipmapSize', help = 'mipmapSize') + parser.add_argument('infoPath', help = 'Where is the info file') + parser.add_argument('nThreads', help = 'nThreads : tells this thread what to process (only tasks with id%nThreads == i)') + parser.add_argument('threadId', help = 'threadId : tells this thread what to process (only tasks with id%nThreads == i)') + parser.add_argument('visualizationMode', help = '"local" for local viewing, "online" for online viewing') + args = parser.parse_args() + return args + +def main(argv): + args = parse_command_line(argv) + return mipmapToPrecomputed(args.mipmapFolder, args.precomputedFolder, int(args.mipmapSize), args.infoPath, int(args.nThreads), int(args.threadId), args.visualizationMode) or 0 + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/montage_ElasticEM.py b/montage_ElasticEM.py new file mode 100644 index 0000000..f7de667 --- /dev/null +++ b/montage_ElasticEM.py @@ -0,0 +1,102 @@ +from __future__ import with_statement +import os, sys, time + +import ij +from ij import IJ, Macro + +import fijiCommon as fc + +from mpicbg.trakem2.align import Align, AlignTask, ElasticMontage +from mpicbg.imagefeatures import FloatArray2DSIFT + +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch + +from java.lang import Runtime, Thread +from java.util.concurrent.atomic import AtomicInteger + +def elasticMontage(): + IJ.log('Thread called **************************') + while l.get() < min(nLayers, currentWrittenLayer + nLayersAtATime + 1) : + k = l.getAndIncrement() + if k < min(nLayers, currentWrittenLayer + nLayersAtATime): + IJ.log('Start montaging elastically layer ' + str(k)) + if layerset.getLayers().get(k).getNDisplayables() > 1: # some EM projects have a single large tile + AlignTask().montageLayers(params, layerset.getLayers(k, k)) + +namePlugin = 'montage_ElasticEM' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +MagCParams = fc.readMagCParameters(MagCFolder) +nLayersAtATime = MagCParams[namePlugin]['nLayersAtATime'] +nThreads = MagCParams[namePlugin]['nThreads'] + +projectPath = fc.cleanLinuxPath(fc.findFilesFromTags(MagCFolder,['EM', 'Project'])[0]) + +project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) + +IJ.log('Sleeping in case the opening of the large project takes some time ...') +time.sleep(20) + +# parameters for elastic montage +params = ElasticMontage.Param().clone() +params.bmScale = 0.5 +params.bmSearchRadius = 50 +params.bmBlockRadius = 50 + +params.bmMinR = 0.1 +params.bmMaxCurvatureR = 100 +params.bmRodR = 1 + +params.bmUseLocalSmoothnessFilter = True +params.bmLocalModelIndex = 3 +params.bmLocalRegionSigma = 100 +params.bmMaxLocalEpsilon = 12 +params.bmMaxLocalTrust = 3 + +params.isAligned = False +# params.isAligned = True # better to keep it to False +params.tilesAreInPlace = True + +params.springLengthSpringMesh = 100 +params.stiffnessSpringMesh = 0.1 +params.maxStretchSpringMesh = 2000 +params.maxIterationsSpringMesh = 1000 +params.maxPlateauwidthSpringMesh = 200 +params.useLegacyOptimizer = True + +# params.dampSpringMesh +# params.maxNumThreads +# params.visualize + +currentLayerPath = os.path.join(os.path.dirname(projectPath), 'currentLayer_' + namePlugin + '.txt') +currentWrittenLayer = fc.incrementCounter(currentLayerPath, increment = nLayersAtATime) +l = AtomicInteger(currentWrittenLayer) + +# fc.startThreads(elasticMontage(), wait = 1, nThreads = nThreads) /!\ it does not work I do not understand why. Probably a java6 issue because it works in other scripts in java8 ... + +threads = [] +for p in range(nThreads): + thread = Thread(elasticMontage) + threads.append(thread) + thread.start() + time.sleep(0.5) + +for thread in threads: + thread.join() + + +IJ.log( namePlugin + ' layer ' + str(currentWrittenLayer)) +fc.resizeDisplay(layerset) +project.save() + +IJ.log('Sleeping in case the saving of the large project takes some time ...') +time.sleep(20) + +# save all transforms +transformsPath = os.path.join(os.path.dirname(projectPath) , namePlugin + '_Transforms.txt') +if l.get() > nLayers-1: + fc.writeAllAffineTransforms(project,transformsPath) + +fc.shouldRunAgain(namePlugin, currentWrittenLayer, nLayers, MagCFolder, project, increment = nLayersAtATime) \ No newline at end of file diff --git a/montage_LM.py b/montage_LM.py new file mode 100644 index 0000000..81aa933 --- /dev/null +++ b/montage_LM.py @@ -0,0 +1,72 @@ +from ij import IJ +from ij import Macro +import os +import time +import fijiCommon as fc +from mpicbg.trakem2.align import Align, AlignTask +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from ini.trakem2.imaging import StitchingTEM +from ini.trakem2.imaging.StitchingTEM import PhaseCorrelationParam +from java.util.concurrent.atomic import AtomicInteger +from mpicbg.trakem2.align import Align, AlignTask +from ini.trakem2.imaging import Blending + +params = Align.ParamOptimize().clone() +params.correspondenceWeight = 1 +params.desiredModelIndex = 0 +params.expectedModelIndex = 0 +params.maxEpsilon = 1 +params.minInlierRatio = 0.05 +params.minNumInliers = 7 +params.regularize = False +params.regularizerModelIndex = 0 +params.lambda = 0.01 +params.maxIterations = 2000 +params.maxPlateauwidth = 200 +params.meanFactor = 3 +params.filterOutliers = False +params.sift.fdBins = 8 +params.sift.fdSize = 8 +params.sift.initialSigma = 1.6 +params.sift.maxOctaveSize = 1024 +params.sift.minOctaveSize = 60 +params.sift.steps = 6 +tilesAreInPlaceIn = True +largestGraphOnlyIn = False +hideDisconnectedTilesIn = False +deleteDisconnectedTilesIn = False + +def stitchLayers(): + while atomicI.get() < nLayers: + l = atomicI.getAndIncrement() + if l < nLayers: + IJ.log('Stitching layer ' + str(l)) + AlignTask().montageLayers(params, layerset.getLayers(l, l), tilesAreInPlaceIn, largestGraphOnlyIn, hideDisconnectedTilesIn, deleteDisconnectedTilesIn) + IJ.log('Blending layer ' + str(l)) + Blending.blendLayerWise(layerset.getLayers(l, l), True, None) + if l%10 == 0: # save project every 5 layers + project.save() + +namePlugin = 'montage_LM' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +# get mosaic size +MagCParameters = fc.readMagCParameters(MagCFolder) +mosaic = MagCParameters[namePlugin]['mosaic'] # e.g. [2,2] + +if mosaic !=[1,1]: + projectPath = fc.findFilesFromTags(MagCFolder,['LMProject'])[0] + projectName = os.path.basename(projectPath) + + project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) + + nLayers = len(layerset.getLayers()) + + atomicI = AtomicInteger(0) + fc.startThreads(stitchLayers, fractionCores = 0.1) + fc.resizeDisplay(layerset) + project.save() + fc.closeProject(project) +fc.terminatePlugin(namePlugin, MagCFolder) \ No newline at end of file diff --git a/preprocess_ForPipeline.py b/preprocess_ForPipeline.py new file mode 100644 index 0000000..4ab6136 --- /dev/null +++ b/preprocess_ForPipeline.py @@ -0,0 +1,101 @@ +from distutils.dir_util import copy_tree +import os, shutil, sys + +import xml.etree.ElementTree as ET +from xml.dom import minidom + +import ij +from ij import IJ + +import fijiCommon as fc + +namePlugin = 'preprocess_ForPipeline' +MagCFolder = fc.startPlugin(namePlugin) + +try: # look in priority for sectionOrder, which means that it has already been processed + orderPath = os.path.join(MagCFolder, filter(lambda x: 'sectionOrder' in x, os.listdir(MagCFolder))[0]) +except Exception, e: + orderPath = os.path.join(MagCFolder, filter(lambda x: 'solution' in x, os.listdir(MagCFolder))[0]) + +sectionOrder = fc.readOrder(orderPath) +IJ.log('sectionOrder: ' + str(sectionOrder)) +print 'sectionOrder', sectionOrder + +MagCParams = fc.readMagCParameters(MagCFolder) + +executeLM = MagCParams[namePlugin]['executeLM'] +executeEM = MagCParams[namePlugin]['executeEM'] + +########################## +########## LM ########## +########################## +if executeLM: + sourceLMDataFolder = os.path.join(MagCFolder, 'LMData') + targetLMDataFolder = os.path.join(MagCFolder, 'LMDataReordered') + + try: + os.makedirs(targetLMDataFolder) + except Exception, e: + print 'Folder not created', targetLMDataFolder + pass + + # copy the metadata into working folder. Rename the original file so that it is not used later (only the file in the working folder will be found if correct naming is used) + for fileName in os.listdir(sourceLMDataFolder): + if ('ata.txt' in fileName) and ('LM' in fileName): + sourceLMMetadataPath = os.path.join(sourceLMDataFolder, fileName) + shutil.copyfile(sourceLMMetadataPath, os.path.join(targetLMDataFolder, 'LM_Metadata.txt')) + os.rename(sourceLMMetadataPath, os.path.join(sourceLMDataFolder, 'LM_Meta_Data.txt')) # the second '_' is important so that this file is not used later + + # copy in the correct oder the LMData + for sourceId, targetId in enumerate(sectionOrder): + IJ.log('LM prepipeline: processing section ' + str(sourceId)) + sourceFolder = os.path.join(sourceLMDataFolder, 'section_' + str(targetId).zfill(4)) + targetFolder = os.path.join(targetLMDataFolder, 'section_' + str(sourceId).zfill(4)) + try: + os.makedirs(targetFolder) + except Exception, e: + print 'Folder not created', targetFolder + pass + + for sourceImageName in os.listdir(sourceFolder): + targetImageName = sourceImageName.replace('section_' + str(targetId).zfill(4) , 'section_' + str(sourceId).zfill(4)) + sourceImagePath = os.path.join(sourceFolder, sourceImageName) + targetImagePath = os.path.join(targetFolder, targetImageName) + shutil.copyfile(sourceImagePath, targetImagePath) + +############################## +########## EM ########## +############################## +if executeEM: + sourceEMDataFolder = os.path.join(MagCFolder, 'EMDataRaw') + targetEMDataFolder = os.path.join(MagCFolder, 'EMData') + try: + os.makedirs(targetEMDataFolder) + except Exception, e: + print 'Folder not created', targetEMDataFolder + pass + + # Copy image files + for sourceId, targetId in enumerate(sectionOrder): + IJ.log('EM prepipeline: processing section ' + str(sourceId)) + # sourceSliceFolder = os.path.join(sourceEMDataFolder, 'section_' + str(targetId).zfill(4)) # with reordering + sourceSliceFolder = os.path.join(sourceEMDataFolder, 'section_' + str(sourceId).zfill(4)) # without reordering + targetSliceFolder = os.path.join(targetEMDataFolder, 'section_' + str(sourceId).zfill(4)) + try: + os.makedirs(targetSliceFolder) + except Exception, e: + pass + for fileName in os.listdir(sourceSliceFolder): + shutil.copy(os.path.join(sourceSliceFolder, fileName), targetSliceFolder) + # copy_tree(sourceSliceFolder, targetSliceFolder) # copytree is very slow ... + + # Copy metadata + metadataName = filter(lambda x: 'EM_Metadata' in x, os.listdir(sourceEMDataFolder))[0] + print 'metadataName', metadataName + sourceMetaData = os.path.join(sourceEMDataFolder, metadataName) + targetMetaData = os.path.join(targetEMDataFolder, 'EM_Metadata.txt') + print 'sourceMetaData', sourceMetaData + print 'targetMetaData', targetMetaData + shutil.copyfile(sourceMetaData, targetMetaData) + +fc.terminatePlugin(namePlugin, MagCFolder) \ No newline at end of file diff --git a/reorder_postElasticMontage.py b/reorder_postElasticMontage.py new file mode 100644 index 0000000..fe0bde9 --- /dev/null +++ b/reorder_postElasticMontage.py @@ -0,0 +1,108 @@ +# section reorder after elastic montage +from __future__ import with_statement +from ij import IJ +import os, time, shutil +import fijiCommon as fc +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch +from distutils.dir_util import copy_tree + +namePlugin = 'reorder_postElasticMontage' +MagCFolder = fc.startPlugin(namePlugin) +ControlWindow.setGUIEnabled(False) + +# read the order. If there is no order file, use default order 0,1,2,... +try: # look in priority for sectionOrder, which means that the solution from concorde has already been processed + orderPath = os.path.join(MagCFolder, filter(lambda x: 'sectionOrder' in x, os.listdir(MagCFolder))[0]) +except Exception, e: + try: + orderPath = os.path.join(MagCFolder, filter(lambda x: 'solution' in x, os.listdir(MagCFolder))[0]) + except Exception, e: + orderPath = '' + +if os.path.isfile(orderPath): + newOrder = fc.readOrder(orderPath) +else: + newOrder = range(10000) + +# Reorder both the low EM and the EM projects +## low EM + +# downsamplingFactor +downsamplingFactor = MagCParameters['downsample_EM']['downsamplingFactor'] +factorString = str(int(1000000*downsamplingFactor)).zfill(8) + +MagCEMFolder = os.path.dirname(fc.cleanLinuxPath(fc.findFilesFromTags(MagCFolder,['montage_ElasticEM_Transforms.txt'])[0])) + +projectPath = os.path.join(MagCEMFolder, 'EMProject_' + factorString + '.xml') # this is the low res EM project +unorderedProjectPath = os.path.join(MagCEMFolder, 'LowEMProjectUnordered.xml') + +# if a reordering had already been made, reinitialize the unordered project +if os.path.isfile(unorderedProjectPath): + if os.path.isfile(projectPath): + os.remove(projectPath) + shutil.copyfile(unorderedProjectPath, projectPath) + project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) + project.saveAs(projectPath, True) + fc.closeProject(project) + os.remove(unorderedProjectPath) + +project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) +project.saveAs(unorderedProjectPath, True) +fc.closeProject(project) +os.remove(projectPath) + +# reorder low-res project +fc.reorderProject(unorderedProjectPath, projectPath, newOrder) + +## high EM +projectPath = fc.cleanLinuxPath(os.path.join(MagCFolder, 'MagC_EM', 'EMProject.xml')) # the high res EM project +unorderedProjectPath = os.path.join(os.path.dirname(projectPath), 'HighEMProjectUnordered.xml') + +# if a reordering had already been made, reinitialize the unordered project +if os.path.isfile(unorderedProjectPath): + if os.path.isfile(projectPath): + os.remove(projectPath) + shutil.copyfile(unorderedProjectPath, projectPath) + project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) + project.saveAs(projectPath, True) + fc.closeProject(project) + os.remove(unorderedProjectPath) + +project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) +project.saveAs(unorderedProjectPath, True) +fc.closeProject(project) +os.remove(projectPath) + +# reorder high-res project +fc.reorderProject(unorderedProjectPath, projectPath, newOrder) + +# Reorder the exported files: a few checks to know whether a reordering had already taken place or not. +exportFolder = fc.findFoldersFromTags(MagCFolder, ['export_stitchedEMForAlignment'])[0] +unorderedExportFolder = os.path.join(os.path.dirname(os.path.normpath(exportFolder)), 'unorderedExport_stitchedEMForAlignment') +print 'exportFolder', exportFolder +print 'unorderedExportFolder', unorderedExportFolder + +if os.path.exists(unorderedExportFolder): + IJ.log('### Unordered folder exists') + shutil.rmtree(exportFolder) + time.sleep(3) + copy_tree(unorderedExportFolder, exportFolder) +else: + IJ.log('### Unordered folder does not exist: create it') + copy_tree(exportFolder, unorderedExportFolder) +# at that stage the two folders are identical and unordered +for imName in os.listdir(exportFolder): + imPath = os.path.join(exportFolder, imName) + imIndex = int(imName.split('.')[0][-4:]) + newName = imName.replace(str(imIndex).zfill(4), str(newOrder.index(imIndex)).zfill(5)) + newPath = os.path.join(exportFolder, newName) + os.rename(imPath, newPath) +for imName in os.listdir(exportFolder): + imPath = os.path.join(exportFolder, imName) + imIndex = int(imName.split('.')[0][-5:]) + newName = imName.replace(str(imIndex).zfill(5), str(imIndex).zfill(4)) + newPath = os.path.join(exportFolder, newName) + os.rename(imPath, newPath) + +fc.terminatePlugin(namePlugin, MagCFolder) \ No newline at end of file diff --git a/sectionSegmentation.py b/sectionSegmentation.py new file mode 100644 index 0000000..cb15cd4 --- /dev/null +++ b/sectionSegmentation.py @@ -0,0 +1,1927 @@ +# doing simple morpghological operations might be easier than WEKA ... + +# What do I want exactly ? + # approximate locations are ok + # CCs are ok, simply need to fine tune a bit more + + # make a double CC on the two channels, and use a weight a.BF + (1-a).Fluo + + # --preprocess correctly at the beginning + # --blend the BF channel + # --get the edge channel yes, that is a useful channel + + # look for salient point in corner neighborhood ? + # (Corners are too difficult, depends on block shape ?) + + +# Fiji script that aligns the patches of the silicon wafer overview. It outputs subpatches x_y_imageName (normal, thesholded, edges). The overlap of the subpatches depends on the size of the template. + +# # ToDos 05/2017 +# --user prompt to offset the channels + # then insert the dapi channel too +# --activate automated montage +# manual section at the end should give the real section, not the mag ? tant qu'a faire, the user gives the exact section ... + # but it is annoying, because the affine mechanism is in the preImaging script, so that either I should transfer the affine mechanism into this first script or I should keep track of which sections are mag sections and which sections are real sections + +# /!\ -- actually I need the affine mechanism in this script because I need to assess the orientation of the sections + # manually ? + # the affine mechanism is almost there. I can already transform sections. I need to find an affine given source and target points. It most probably already exists. + # asymmetric trimming ? + # difficult because otherwise the section terminates with mag resin + +# - understand why so many sections missing that should be easy to get + # -- 0.95 was clearly too high for the area threshold + # clustering issue also ? +# weka on the brightfield to find the orientation or manual entry with a key press ? + # without systematic detachment it might not be easy to weka the orientation ... + # compare the quantity of edges on the two sides ... should be rather robust +# final GUI: export the sections to landmarks, adjust the landmarks and add new ones + +from __future__ import with_statement +import os, time, pickle, shutil + +# import subprocess # currently broken, use os.system instead +from java.lang import ProcessBuilder + +import threading +import ij +from ij import IJ, ImagePlus, WindowManager +from ij.gui import Roi, PolygonRoi, PointRoi, WaitForUserDialog +from ij.process import ImageStatistics, ImageProcessor +from ij.measure import Measurements +from ij.plugin import ImageCalculator +from ij.plugin.frame import RoiManager +from ij.plugin.tool import RoiRotationTool + +from java.awt import Frame +from java.lang import Double + +import fijiCommon as fc + +from ini.trakem2 import Project, ControlWindow +from ini.trakem2.display import Patch, Display, AreaList, Displayable +from ini.trakem2.imaging import StitchingTEM +from ini.trakem2.imaging import Blending +from ini.trakem2.imaging.StitchingTEM import PhaseCorrelationParam +from ij.plugin.filter import ParticleAnalyzer + +from mpicbg.imglib.algorithm.correlation import CrossCorrelation +from mpicbg.trakem2.align import Align, AlignTask +from mpicbg.imglib.image import ImagePlusAdapter + +from java.awt import Rectangle, Color, Polygon +from java.awt.geom import Area, AffineTransform +from java.awt.event import MouseAdapter, KeyAdapter, KeyEvent +from java.lang import Math, Runtime +from java.lang.Math import hypot, sqrt, atan2, PI, abs +from java.util.concurrent.atomic import AtomicInteger + +from jarray import zeros, array + +from trainableSegmentation import WekaSegmentation + +from xml.dom import minidom + +from operator import itemgetter + + +######################################################################### +# BEGIN README +######################################################################### +# 1. Start Fiji +# 2. Run this script in the Script Editor of Fiji (setting language to python) + +######################################################################### +# END README +######################################################################### + + +def xlim(a, lbb): + return max(min(a, lbb.width), 0) + +def ylim(a, lbb): + return max(min(a, lbb.height), 0) + +colors = [Color.red, Color.blue, Color.green, Color.yellow, Color.cyan, Color.magenta, Color.orange] + +def convertTo8BitAndResize(imagePaths, newImagePaths, downFactor, atomicI): + while atomicI.get() < len(imagePaths): + k = atomicI.getAndIncrement() + if (k < len(imagePaths)): + imagePath = imagePaths[k] + newImagePath = newImagePaths[k] + im = IJ.openImage(imagePath) + IJ.run(im, '8-bit', '') + # if 'BF' in os.path.basename(newImagePath): # normalize only the BF channel + # fc.normLocalContrast(im, 500, 500, 3, True, True) + im = fc.resize(im, float(1/float(downFactor))) + IJ.save(im, newImagePath) + IJ.log(str(k) + ' of ' + str(len(imagePaths)) + ' processed') + im.close() + + +######################### +### TrakEM2 operations +######################### +def addLandmarkOverlays(project, landmarks): + layerset = project.getRootLayerSet() + layer = layerset.getLayers().get(0) + layerId = layer.getId() + arealists = [] + for l, landmark in enumerate(landmarks): + ali = AreaList(project, 'landmark' + '_' + str(l), 0, 0) + layerset.add(ali) + lbb = layerset.get2DBounds() + + s = 500 + sw = 30 + + poly = Polygon(map(lambda x: xlim(x, lbb), [landmark[0]-s, landmark[0]+s, landmark[0]+s, landmark[0]-s]), map(lambda x: ylim(x, lbb), [landmark[1]-sw, landmark[1]-sw, landmark[1]+sw, landmark[1]+sw]), 4) + ali.addArea(layerId, Area(poly)) + poly = Polygon(map(lambda x: xlim(x, lbb), [landmark[0]-sw, landmark[0]+sw, landmark[0]+sw, landmark[0]-sw]), map(lambda x: ylim(x, lbb), [landmark[1]-s, landmark[1]-s, landmark[1]+s, landmark[1]+s]), 4) + ali.addArea(layerId, Area(poly)) + + ali.alpha = 0.5 + ali.color = colors[l%len(colors)] + ali.visible = True + ali.locked = False + ali.calculateBoundingBox(layer) + arealists.append(ali) + ali.updateBucket() + project.getProjectTree().insertSegmentations([ali]) + + displays = Display.getDisplays() + if displays.isEmpty(): + disp = Display(project, layer) + else: + disp = displays[0] + disp.repaint() + project.getLayerTree().updateList(layerset) + layer.recreateBuckets() + + factor = 1 + canvas = disp.getCanvas() + disp.repaint() + + # disp.show(layer, ali, False, False) + Display.showCentered(layer, ali, False, False) + disp.repaint() + + w = 500 + h = 500 + bb = Rectangle(int(round(landmark[0]-w/2)), int(round(landmark[1]-h/2)), w, h) + bb = bb.createIntersection(layerset.get2DBounds()) + im = project.getLoader().getFlatImage(layer, bb, factor, 0x7fffffff, ImagePlus.COLOR_RGB, Displayable, True) + IJ.save(im, os.path.join(preImagingFolder, 'landmark_' + str(l) + '_zoom_1.png')) + + w = 5000 + h = 5000 + ali.alpha = 0.8 + bb = Rectangle(int(round(landmark[0]-w/2)), int(round(landmark[1]-h/2)), w, h) + bb = bb.createIntersection(layerset.get2DBounds()) + disp.repaint() + im = project.getLoader().getFlatImage(layer, bb, 0.5 * factor, 0x7fffffff, ImagePlus.COLOR_RGB, Displayable, True) + IJ.save(im, os.path.join(preImagingFolder, 'landmark_' + str(l) + '_zoom_2.png')) + + # # # w = 15000 + # # # h = 15000 + # # # ali.alpha = 1 + # # # bb = Rectangle(int(landmark[0]-w/2), int(landmark[1]-h/2), w, h) + # # # bb = bb.createIntersection(layerset.get2DBounds()) + # # # disp.repaint() + # # # im = project.getLoader().getFlatImage(layer, bb, 0.03 * factor, 0x7fffffff, ImagePlus.COLOR_RGB, Displayable, True) + # # # IJ.save(im, os.path.join(preImagingFolder, 'landmark_' + str(l) + '_zoom_3.png')) + # # # ali.visible = False + + for ali in arealists: + ali.visible = True + im = project.getLoader().getFlatImage(layer, layerset.get2DBounds(), 0.5 * factor, 0x7fffffff, ImagePlus.COLOR_RGB, Displayable, True) + IJ.save(im, os.path.join(preImagingFolder, 'allLandmarks.png')) + + # project.save() + +def forceAlphas(layerset): + for ali in layerset.getZDisplayables(AreaList): + if int(ali.getFirstLayer().getZ()) == 0: + ali.alpha = 0.5 + elif int(ali.getFirstLayer().getZ()) == 1: + ali.alpha = 1 + +# def addSectionOverlays(project, layers, sections, colors, alphas, name): + # layerset = project.getRootLayerSet() + # for l in layers: + # layer = layerset.getLayers().get(l) + # layerId = layer.getId() + # segmentations = [] + # ali = AreaList(project, name + '_' + str(l), 0, 0) + # layerset.add(ali) + # for id, section in enumerate(sections): + # ali.addArea(layerId, Area(sectionToPoly(section))) + # ali.alpha = alphas[l] + # ali.color = colors[l] + # ali.visible = True + # ali.locked = False + # # ali.setColor(colors[l]) + # # ali.setAlpha(alphas[l]) + # ali.calculateBoundingBox(None) + # print 'alpha', ali.getAlpha(), 'layer', l + # ali.updateBucket() + # segmentations.append(ali) + # project.getProjectTree().insertSegmentations([ali]) + + # displays = Display.getDisplays() + # if displays.isEmpty(): + # disp = Display(project, layer) + # else: + # disp = displays[0] + # disp.repaint() + # # project.getProjectTree().insertSegmentations(segmentations) + + # project.getLayerTree().updateList(layerset) + # layer.recreateBuckets() + +def addSectionOverlays(project, layers, sections, colors, alphas, name): + layerset = project.getRootLayerSet() + for l in layers: + layer = layerset.getLayers().get(l) + layerId = layer.getId() + segmentations = [] + for id, section in enumerate(sections): + ali = AreaList(project, name + '_' + str(id), 0, 0) + layerset.add(ali) + ali.addArea(layerId, Area(sectionToPoly(section))) + ali.alpha = alphas[l] + ali.color = colors[l] + ali.visible = True + ali.locked = False + # ali.setColor(colors[l]) + # ali.setAlpha(alphas[l]) + ali.calculateBoundingBox(None) + print 'alpha', ali.getAlpha(), 'layer', l + ali.updateBucket() + segmentations.append(ali) + project.getProjectTree().insertSegmentations(segmentations) + + displays = Display.getDisplays() + if displays.isEmpty(): + disp = Display(project, layer) + else: + disp = displays[0] + disp.repaint() + # project.getProjectTree().insertSegmentations(segmentations) + + project.getLayerTree().updateList(layerset) + layer.recreateBuckets() + + + +def createImportFile(folder, paths, locations, factor = 1, layer = 0): + importFilePath = os.path.join(folder, 'trakemImportFile.txt') + with open(importFilePath, 'w') as f: + for id, path in enumerate(paths): + xLocation = int(round(locations[id][0] * factor)) + yLocation = int(round(locations[id][1] * factor)) + IJ.log('Inserting image ' + path + ' at (' + str(xLocation) + ' ; ' + str(yLocation) + ')' ) + f.write(str(path) + '\t' + str(xLocation) + '\t' + str(yLocation) + '\t' + str(layer) + '\n') + return importFilePath + +def getPointsFromUser(project, l, fov = None, text = 'Select points'): + points = None + layerset = project.getRootLayerSet() + layer = layerset.getLayers().get(l) + displays = Display.getDisplays() + if displays.isEmpty(): + disp = Display(project, layer) + else: + disp = displays[0] + disp.repaint() + disp.showFront(layer) + # print 'disp.getCanvas().getMagnification()', disp.getCanvas().getMagnification() + # disp.getCanvas().center(Rectangle(int(round(fov[0])), int(round(fov[1])), int(round(effectivePatchSize)), int(round(effectivePatchSize))), 0.75) + WaitForUserDialog(text).show() + roi = disp.getRoi() + if roi: + poly = disp.getRoi().getPolygon() + points = [list(a) for a in zip(poly.xpoints, poly.ypoints)] + disp.getCanvas().getFakeImagePlus().deleteRoi() + disp.update(layerset) + return points + +def getTemplates(project): + layer = project.getRootLayerSet().getLayers().get(0) # the brightfield layer + disp = Display(project, layer) + disp.showFront(layer) + + WaitForUserDialog('Select in order : A. 4 corners of a section. B. 4 corners of the mag region of the same section. Then click OK.').show() + + poly = disp.getRoi().getPolygon() + X = poly.xpoints + Y = poly.ypoints + nSections = len(X)/8 # the script is general enough to let the user give more sections, but only one used here ... + + print 'X', X + print 'Y', Y + print 'nSections', nSections + + # # # From the old pipeline, probably not useful any more + # # 1. Determining the size of the subpatches + # sectionExtent = longestDiagonal([ [X[0], Y[0]] , [X[1], Y[1]], [X[2], Y[2]], [X[3], Y[3]] ]) + # patchSize = int(round(3 * sectionExtent)) + # overlap = int(round(1 * sectionExtent)) + # effectivePatchSize = patchSize - overlap + # print 'effectivePatchSize', effectivePatchSize + # writePoints(patchSizeAndOverlapFullResPath, [[patchSize, overlap]]) + # writePoints(patchSizeAndOverlapLowResPath, [[patchSize/float(downsizingFactor), overlap/float(downsizingFactor)]]) + + templateSections = [] + templateMags = [] + userTemplateInput = [] # the points the user gave: will be used to create template with real images + + for s in range(nSections): # the script is general enough to let the user give more sections, but only one used here ... + # the user has to use the naming convention + templateSection = [ [X[8*s +0], Y[8*s +0]] , [X[8*s +1], Y[8*s +1]], [X[8*s +2], Y[8*s +2]], [X[8*s +3], Y[8*s +3]] ] + userTemplateInput.append(templateSection) + templateMag = [ [X[8*s +4], Y[8*s +4]] , [X[8*s +5], Y[8*s +5]], [X[8*s +6], Y[8*s +6]], [X[8*s +7], Y[8*s +7]] ] + userTemplateInput.append(templateMag) + + print 'templateSection', templateSection + print 'templateMag', templateMag + + # calculate angle of the template section (based on the magnetic part) + angle = getAngle([templateMag[0][0], templateMag[0][1], templateMag[1][0], templateMag[1][1]]) #the angle is calculated on the mag box + rotTransform = AffineTransform.getRotateInstance(-angle) + + # rotate the template so that tissue is on the left, magnetic on the right (could be rotate 90deg actually ... but backward compatibility ...) + templateSection = applyTransform(templateSection, rotTransform) + templateMag = applyTransform(templateMag, rotTransform) + + # after rotation, the template points are likely negative, or at least far from (0,0): translate the topleft corner of the tissue bounding box (when magnetic pointing to right) to (100,100) + bb = sectionToPoly(templateSection).getBounds() # the offset is calculated on the section box + translateTransform = AffineTransform.getTranslateInstance(- bb.x + 100, - bb.y + 100) + + templateSection = applyTransform(templateSection, translateTransform) + templateMag = applyTransform(templateMag, translateTransform) + + print 'templateSection', templateSection + print 'templateMag', templateMag + templateSections.append(templateSection) + templateMags.append(templateMag) + + writeSections(templateSectionsPath, templateSections) + writeSections(templateMagsPath, templateMags) + writeSections(userTemplateInputPath, userTemplateInput) # to create a template image for template matching + writeSections(sourceTissueMagDescriptionPath, [templateSections[0], templateMags[0]]) + + +def getLandmarks(project, savePath, text): + landmarks = [] + layer = project.getRootLayerSet().getLayers().get(0) + disp = Display(project, layer) + disp.showFront(layer) + + WaitForUserDialog(text).show() + roi = disp.getRoi() + IJ.log('ROI landmarks' + str(roi)) + if roi: + poly = roi.getPolygon() + landmarks = [list(a) for a in zip(poly.xpoints, poly.ypoints)] + IJ.log('landmarks' + str(landmarks)) + + writePoints(savePath, landmarks) + return landmarks + +def getROIDescription(project): + layer = project.getRootLayerSet().getLayers().get(0) + disp = Display(project, layer) + disp.showFront(layer) + + WaitForUserDialog('Click on the 4 corners of a section. Then click on the 4 corners defining the ROI to be imaged. You can postpone that task to later').show() + roi = disp.getRoi() + if roi: + poly = roi.getPolygon() + section = [list(a) for a in zip(poly.xpoints[:4], poly.ypoints[:4])] + ROI = [list(a) for a in zip(poly.xpoints[4:], poly.ypoints[4:])] + + writeSections(sourceROIDescriptionPath, [section, ROI]) + +######################### +### Section operations +######################### + +def sectionToPoly(l): + return Polygon( [int(round(a[0])) for a in l] , [int(round(a[1])) for a in l], len(l)) + +def writePoints(path, points): + with open(path, 'w') as f: + for point in points: + line = str(int(round(point[0]))) + '\t' + str(int(round(point[1]))) + '\n' + IJ.log(line) + f.write(line) + IJ.log('The point coordinates have been written') + +def readPoints(path): + points = [] + with open(path, 'r') as f: + lines = f.readlines() + for point in lines: + points.append(map(int,point.split('\t'))) + return points + +def readSectionCoordinates(path, downFactor = 1): + sections = [] + if os.path.isfile(path): + f = open(path, 'r') + lines = f.readlines() + for line in lines: + points = line.split('\t') + points.pop() + # print points + section = [ [int(round(float(point.split(',')[0])/float(downFactor))), int(round(float(point.split(',')[1])/float(downFactor)))] for point in points] + sections.append(section) + f.close() + return sections + +def readMISTLocations(MISTPath): + patchPaths = [] + patchLocations = [] + f = open(MISTPath, 'r') + lines = f.readlines() + for line in lines: + patchPath = os.path.join(inputFolder8bit, line.split(';')[0].split(':')[1][1:]) + x = int(round(float(line.split(';')[2].split(':')[1].split('(')[1].split(',')[0]))) + y = int(round(float(line.split(';')[2].split(':')[1].split(',')[1][1:].split(')')[0]))) + patchPaths.append(patchPath) + patchLocations.append([x,y]) + f.close() + return patchPaths, patchLocations + +def readStitchedLocations(path): + f = open(path, 'r') + lines = f.readlines()[4:] # trimm the heading + f.close() + + patchPaths = [] + patchLocations = [] + + for line in lines: + patchPath = os.path.join(inputFolder8bit, line.replace('\n', '').split(';')[0]) + x = int(float(line.replace('\n', '').split(';')[2].split(',')[0].split('(')[1])) + y = int(float(line.replace('\n', '').split(';')[2].split(',')[1].split(')')[0])) + patchPaths.append(patchPath) + patchLocations.append([x,y]) + return patchPaths, patchLocations + +def sectionToList(pointList): # [[1,2],[5,8]] to [1,2,5,8] + l = array(2 * len(pointList) * [0], 'd') + for id, point in enumerate(pointList): + l[2*id] = point[0] + l[2*id+1] = point[1] + return l + +def listToSection(l): # [1,2,5,8] to [[1,2],[5,8]] + pointList = [] + for i in range(len(l)/2): + pointList.append([l[2*i], l[2*i+1]]) + return pointList + +def offsetCorners(corners, xOffset, yOffset): + for id, corner in enumerate(corners): + corners[id] = [corner[0] + xOffset, corner[1] + yOffset ] + return corners + +def writeSections(path, sectionList): + with open(path, 'w') as f: + for section in sectionList: + for corner in section: + # print 'corner', corner + f.write( str(int(round(corner[0]))) + ',' + str(int(round(corner[1]))) + '\t') + f.write('\n') + print 'The coordinates of', len(sectionList), 'sections have been written to', path + +######################### +### Geometric operations +######################### +def barycenter(points): + xSum = 0 + ySum = 0 + for i,point in enumerate(points): + xSum = xSum + point[0] + ySum = ySum + point[1] + x = int(round(xSum/float(i+1))) + y = int(round(ySum/float(i+1))) + return x,y + +def shrink(section, factor = 0): + ''' + factor = 0 : nothing happens + factor = 1 : complete shrinkage to center + ''' + f = factor + # center = [(section[0][0] + section[1][0] + section[2][0] + section[3][0])/4., (section[0][1] + section[1][1] + section[2][1] + section[3][1])/4.] + center = barycenter(section) + + p0, p1, p2, p3 = section # the 4 points of the section + + p0 = [int(round((1-f) * p0[0] + f * center[0])) , int(round((1-f) * p0[1] + f * center[1]))] + p1 = [int(round((1-f) * p1[0] + f * center[0])) , int(round((1-f) * p1[1] + f * center[1]))] + p2 = [int(round((1-f) * p2[0] + f * center[0])) , int(round((1-f) * p2[1] + f * center[1]))] + p3 = [int(round((1-f) * p3[0] + f * center[0])) , int(round((1-f) * p3[1] + f * center[1]))] + + return [p0, p1, p2, p3] + +def getAngle(line): + diff = [line[0] - line[2], line[1] - line[3]] + theta = Math.atan2(diff[1], diff[0]) + return theta + +def longestDiagonal(corners): + maxDiag = 0 + for corner1 in corners: + for corner2 in corners: + maxDiag = Math.max(Math.sqrt((corner2[0]-corner1[0]) * (corner2[0]-corner1[0]) + (corner2[1]-corner1[1]) * (corner2[1]-corner1[1])), maxDiag) + return int(maxDiag) + +def getArea(section): + bb = sectionToPoly(section).getBounds() + section = [[point[0] - bb.x, point[1]-bb.y] for point in section] + + im = IJ.createImage('', '8-bit', bb.width, bb.height, 1) + ip = im.getProcessor() + + ip.setRoi(sectionToPoly(section)) + area = ImageStatistics.getStatistics(ip, Measurements.MEAN, im.getCalibration()).area + im.close() + return area + +def getConnectedComponents(im, minSize = 0): + IJ.run(im, 'Invert', '') + points = [] + roim = RoiManager(True) + + pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER + ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES, Measurements.AREA, None, 0, Double.POSITIVE_INFINITY, 0.0, 1.0) + pa.setRoiManager(roim) + pa.analyze(im) + + for roi in roim.getRoisAsArray(): + # IJ.log(str(len(roi.getContainedPoints())) + '-' + str(minSize)) + if len(roi.getContainedPoints()) > minSize: + points.append(roi.getContourCentroid()) # center of mass instead ? There is nothing better apparently ... + roim.close() + + return points + +def getFastCC(im1,im2): + cc = CrossCorrelation(im1, im2) + cc.process() + return cc.getR() + +def rotate(im, angleDegree): + ip = im.getProcessor() + ip.setInterpolationMethod(ImageProcessor.BILINEAR) + ip.rotate(angleDegree) + +def getCroppedRotatedWindow(im, rDegree, x, y): # warning: uses quite a few global parameters + candidate = im.duplicate() # necessary, I cannot get it to work otherwise (the roi does not reset or something like this ...) + + wCandidate = candidate.getWidth() + hCandidate = candidate.getHeight() + + rotate(candidate, rDegree) + + candidate.setRoi(wCandidate/2 - wTemplate, hCandidate/2 - hTemplate, wTemplate * 2, hTemplate * 2) + croppedRotatedCandidate = candidate.crop() + + # the top left corner of the sliding window: middle - template/2 - neighborhood/2 + advancement + # xWindow = int(wTemplate - wTemplate/2. - neighborhood/2. + xStep * x) + # yWindow = int(hTemplate - hTemplate/2. - neighborhood/2. + yStep * y) + xWindow = x + yWindow = y + + # dapi location of the sliding template patch: topleft corner + templateDapi + newTemplateDapiCenter = [templateDapiCenter[0] + xWindow, templateDapiCenter[1] + yWindow] + + # distance between the dapiCenter of the sliding template and of the candidate (in the middle of the candidate) + dapiDistances = sqrt((newTemplateDapiCenter[0] - wTemplate)*(newTemplateDapiCenter[0] - wTemplate) + (newTemplateDapiCenter[1] - hTemplate)*(newTemplateDapiCenter[1] - hTemplate)) + + # crop the candidate below the sliding template + croppedRotatedCandidate.setRoi(xWindow, yWindow, wTemplate, hTemplate) + return croppedRotatedCandidate.crop() + + +def templateMatchCandidate(atom, candidatePaths, templateMatchingPath, allResults): + template = ImagePlusAdapter.wrap(IJ.openImage(templateMatchingPath)) + + while atom.get() < len(candidatePaths): + k = atom.getAndIncrement() + if k < len(candidatePaths): + IJ.log('Processing section ' + str(k)) + candidatePath = candidatePaths[k] + cand = IJ.openImage(candidatePath) + + wCandidate = cand.getWidth() + hCandidate = cand.getHeight() + + results = [] + for rotationId in range(rotations): + candidate = cand.duplicate() # necessary, I cannot get it to work otherwise (the roi does not reset or something like this ...) + + # rotate the candidate + rotationDegree = rotationStepDegree * rotationId + rotate(candidate, rotationDegree) + + # ip = candidate.getProcessor() + # ip.setInterpolationMethod(ImageProcessor.BILINEAR) + # ip.rotate(rotationDegree) + + # extract a central region of the rotation candidate, size is template*2 - is that really necessary ? + # candidate.setRoi(wCandidate/2 - wTemplate, hCandidate/2 - hTemplate, wTemplate * 2, hTemplate * 2) + # croppedRotatedCandidate = candidate.crop() + + # loops for the brute force search + for x in range(xMatchingGrid): + for y in range(yMatchingGrid): + # the top left corner of the sliding window: middle - template/2 - neighborhood/2 + advancement + xWindow = int(wCandidate/2. - wTemplate/2. - neighborhood/2. + xStep * x) + yWindow = int(hCandidate/2. - hTemplate/2. - neighborhood/2. + yStep * y) + + # dapi location of the sliding template patch: topleft corner + templateDapi + newTemplateDapiCenter = [templateDapiCenter[0] + xWindow, templateDapiCenter[1] + yWindow] + # distance between the dapiCenter of the sliding template and of the candidate (in the middle of the candidate) + dapiDistances = sqrt((newTemplateDapiCenter[0] - wTemplate)*(newTemplateDapiCenter[0] - wTemplate) + (newTemplateDapiCenter[1] - hTemplate)*(newTemplateDapiCenter[1] - hTemplate)) + # IJ.log(str(dapiDistances)) + if dapiDistances < dapiCenterDistanceThreshold: + # if dapiDistances < 99999: + # crop the candidate below the sliding template patch + candidate.setRoi(xWindow, yWindow, wTemplate, hTemplate) + croppedRotatedCandidate = candidate.crop() + # IJ.log('-----' + str(croppedRotatedCandidate.getWidth())) + croppedRotatedCandidate = ImagePlusAdapter.wrap(croppedRotatedCandidate) + # compute CC and append result + cc = getFastCC(template, croppedRotatedCandidate) + results.append([cc , [rotationDegree, x, y]]) + + # close open images + cand.close() + croppedRotatedCandidate.close() + candidate.close() + + # sort results and append to total results + sortedResults = sorted(results, key=itemgetter(0), reverse=True) # maybe sort with the Id instead ? + # sortedResults = sorted(results, key=itemgetter(1), reverse=True) # maybe sort with the Id instead ? + # IJ.log(str(sortedResults[:5])) + bestId = sortedResults[0][1] + allResults.append([k] + sortedResults) + + # # optional display of the best candidates + # im = IJ.openImage(candidatePath) + # rotationDegree = bestId[0] + # rotate(im, rotationDegree) + # im.setRoi(int(wCandidate/2. - wTemplate/2. - neighborhood/2. + xStep * bestId[1]) , int(hCandidate/2. - hTemplate/2. - neighborhood/2. + yStep * bestId[2]), wTemplate, hTemplate) + # im = im.crop() + # im.show() + # 8/0 +######################### +### Affine transform operations +######################### +def applyTransform(section, aff): + sourceList = sectionToList(section) + targetList = array(len(sourceList) * [0], 'd') + aff.transform(sourceList, 0, targetList, 0, len(section)) + targetSection = listToSection(targetList) + return targetSection + +def affineT(sourceLandmarks, targetLandmarks, sourcePoints): + aff = fc.getModelFromPoints(sourceLandmarks, targetLandmarks).createAffine() + return applyTransform(sourcePoints, aff) + +####################### +# Parameters to provide +####################### +inputFolder = os.path.normpath(r'D:\ThomasT\Thesis\B6\B6_Wafer1_203_24_12\AllImages') + + +################### +# Get the mosaic configuration +################### +# mosaicMetadataPath = os.path.join(os.path.dirname(inputFolder), 'Mosaic_Metadata.xml') +try: + # mosaicMetadataPath = os.path.join(os.path.dirname(inputFolder), filter(lambda x: 'metadata' in x, os.listdir(os.path.dirname(inputFolder)))[0]) # ugly ... + mosaicMetadataPath = os.path.join(inputFolder, filter(lambda x: 'Mosaic_Metadata' in x, os.listdir(inputFolder))[0]) # ugly ... + IJ.log('Using grid size from the ZEN metadate file') + xmldoc = minidom.parse(mosaicMetadataPath) + xGrid = int(float(xmldoc.getElementsByTagName('Columns')[0].childNodes[0].nodeValue)) + yGrid = int(float(xmldoc.getElementsByTagName('Rows')[0].childNodes[0].nodeValue)) +except Exception, e: + IJ.log('No metadata file found') + IJ.log('Using manually entered grid size for the mosaic') + xGrid = 1 + yGrid = 1 +IJ.log('Mosaic size: (' + str(xGrid) + ', ' + str(yGrid) + ')') + +channels = ['BF', 'DAPI', '488', '546'] +nChannels = len(channels) + +overlap = 0.1 +downsizingFactor = 3 +shrinkFactor = 2 # for orientation flipping + +calibrationX = 1.302428227746592 +calibrationY = 1.302910064239829 + + +######################### +# Parameters for template matching +######################### +xMatchingGrid = 10 # number of locations tested with the template on the x axis +yMatchingGrid = 10 +neighborhood = 25 # neighborhood around the center tested for matching +xStep = neighborhood/xMatchingGrid +yStep = neighborhood/yMatchingGrid + +rotations = 180 # number of rotations tested within the total 360 degrees +rotationStepDegree = 360/float(rotations) +rotationStepRadian = rotationStepDegree * PI / 180. + +dapiCenterDistanceThreshold = 9999 # during brute force matching, the DAPI of the template and the DAPI of the candidate should be close +tissueShrinkingForEdgeFreeRegion = 0.6 + +####################### +# Setting up +IJ.log('Setting up') +####################### + +####################### +# Getting the template +####################### + +ControlWindow.setGUIEnabled(False) + +workingFolder = fc.mkdir_p(os.path.join(os.path.dirname(inputFolder), 'workingFolder')) + +templatePath = os.path.join(workingFolder,'templateCoordinates.txt') +inputFolderContent = os.listdir(inputFolder) + +inputFolder8bit = os.path.join(workingFolder, '8bit' + os.path.basename(inputFolder)) +fc.mkdir_p(inputFolder8bit) +inputFolder8bitContent = os.listdir(inputFolder8bit) + +inputFolder8bitFullRes = os.path.join(workingFolder, '8bitFullRes' + os.path.basename(inputFolder)) +fc.mkdir_p(inputFolder8bitFullRes) +inputFolder8bitFullResContent = os.listdir(inputFolder8bitFullRes) + +fluoOffsetPath = os.path.join(workingFolder, 'fluoOffset.txt') + +templateSectionsPath = os.path.join(workingFolder, 'templateSections.txt') +templateMagsPath = os.path.join(workingFolder, 'templateMags.txt') + +templateSectionsLowResPath = os.path.join(workingFolder, 'templateSectionsLowRes.txt') +templateMagsLowResPath = os.path.join(workingFolder, 'templateMagsLowRes.txt') + +userTemplateInputPath = os.path.join(workingFolder, 'userTemplateInputPath.txt') + +preImagingFolder = fc.mkdir_p(os.path.join(workingFolder, 'preImaging')) +landmarksPath = os.path.join(preImagingFolder, 'source_landmarks.txt') +sourceTissueMagDescriptionPath = os.path.join(preImagingFolder, 'source_tissue_mag_description.txt') #2 sections: template tissue and template mag +sourceROIDescriptionPath = os.path.join(preImagingFolder, 'source_ROI_description.txt') #2 sections: template tissue and ROI + +patchSizeAndOverlapFullResPath = os.path.join(workingFolder, 'patchSizeAndOverlapFullRes.txt') +patchSizeAndOverlapLowResPath = os.path.join(workingFolder, 'patchSizeAndOverlapLowRes.txt') + +# print "filter(lambda x: '_Edges' in x, os.listdir(workingFolder))", filter(lambda x: '_Edges' in x, os.listdir(workingFolder)) + +magSectionsLowResPath = os.path.join(workingFolder, 'allMagSectionsCoordinatesLowRes.txt') +tissueSectionsLowResPath = os.path.join(workingFolder, 'allTissueSectionsCoordinatesLowRes.txt') + +magSectionsHighResPath = os.path.join(workingFolder, 'allMagSectionsCoordinatesFullRes.txt') +tissueSectionsHighResPath = os.path.join(workingFolder, 'allTissueSectionsCoordinatesFullRes.txt') + +finalMagSectionsPath = os.path.join(workingFolder, 'finalMagSectionsCoordinates.txt') +finalTissueSectionsPath = os.path.join(workingFolder, 'finalTissueSectionsCoordinates.txt') + +finalMagSectionsPreImagingPath = os.path.join(preImagingFolder, 'source_sections_mag.txt') +finalTissueSectionsPreImagingPath = os.path.join(preImagingFolder, 'source_sections_tissue.txt') + +flipFlag = os.path.join(workingFolder, 'flipflag') + +### Matching files and folders ### +dapiCentersPath = os.path.join(workingFolder, 'dapiCenters') +sectionsSpecsPath = os.path.join(workingFolder, 'sectionsSpecs') + +####################### +# 0.0 Converting all images to 8 bit and downsizing +####################### +if len(filter(lambda x: os.path.splitext(x)[1] == '.tif', inputFolder8bitContent)) < len(filter(lambda x: os.path.splitext(x)[1] == '.tif', inputFolderContent)): + IJ.log('0. 8-bit conversion and downsizing') + imageNames = filter(lambda x: os.path.splitext(x)[1] == '.tif', inputFolderContent) + imagePaths = [os.path.join(inputFolder, imageName) for imageName in imageNames] + newImagePaths = [os.path.join(inputFolder8bit, imageName) for imageName in imageNames] + + atomicI = AtomicInteger(0) + fc.startThreads(convertTo8BitAndResize, fractionCores = 0.9, wait = 0, arguments = (imagePaths, newImagePaths, downsizingFactor, atomicI)) +####################### +# 0.1 Converting all images to 8 bit, for full res +####################### +if len(filter(lambda x: os.path.splitext(x)[1] == '.tif', inputFolder8bitFullResContent)) < len(filter(lambda x: os.path.splitext(x)[1] == '.tif', inputFolderContent)): + IJ.log('0. 8-bit conversion for full res') + imageNames = filter(lambda x: os.path.splitext(x)[1] == '.tif', inputFolderContent) + imagePaths = [os.path.join(inputFolder, imageName) for imageName in imageNames] + newImagePaths = [os.path.join(inputFolder8bitFullRes, imageName) for imageName in imageNames] + + atomicI = AtomicInteger(0) + fc.startThreads(convertTo8BitAndResize, fractionCores = 0.9, wait = 0, arguments = (imagePaths, newImagePaths, 1, atomicI)) + +####################### +# 1. Assembling the overview +####################### +# setting up trakem project +trakemFolder = workingFolder +projectPath = os.path.join(os.path.normpath(trakemFolder) , 'WaferLMProject.xml') # the project with 7 layers (low res I believe) +projectPathBis = os.path.join(os.path.normpath(trakemFolder) , 'WaferLMProjectBis.xml') +projectPathFullRes = os.path.join(os.path.normpath(trakemFolder) , 'WaferLMProjectFullRes.xml') +overlaysProjectPath = os.path.join(os.path.normpath(trakemFolder) , 'OverlaysWaferLMProject.xml') + +# Image names from ZEN +# Wafer_SFN_2016_b0s0c0x1249-1388y3744-1040m45.tif + +imageNames = [] + +if not os.path.isfile(projectPath): + IJ.log('1. Assembling the overview') + imageNames = [] + for channelId, channel in enumerate(channels): + # imageNames = fc.naturalSort(filter(lambda x: (os.path.splitext(x)[1] in ['.tif', '.TIF']) and ('c' + str(channelId) + 'x') in x , os.listdir(inputFolder8bit))) + if channel == 'BF': # BF and fluo channels have different naming schemes at the Z1 microscope because all fluo chanels imaged together and the BF imaged separately (because high intensity needed for fluo but low intensity needed for BF) + imageNames = fc.naturalSort(filter(lambda x: (os.path.splitext(x)[1] in ['.tif', '.TIF']) and (('c' + str(channelId) + 'x') in x) and ('BF' in x) , os.listdir(inputFolder8bit))) + else: + imageNames = fc.naturalSort(filter(lambda x: (os.path.splitext(x)[1] in ['.tif', '.TIF']) and ('c' + str(channelId-1) + 'x') in x , os.listdir(inputFolder8bit))) + # Wafer_210_BF_b0s0c0x3747-1388y0-1040m3 + # Wafer_210_DAPI_488_546_b0s0c0x4996-1388y11232-1040m112 + + if channelId == 0: + # # commented lines below probably to delete + # im0 = IJ.openImage(os.path.join(inputFolder8bit, imageNames[0])) + # width = im0.getWidth() + # height = im0.getHeight() + # im0.close() + + # widthEffective = int(round((1-overlap) * width)) + # heightEffective = int(round((1-overlap) * height)) + + project, loader, layerset, nLayers = fc.getProjectUtils(fc.initTrakem(trakemFolder, nChannels + 3)) #channels + thresholded + edged + rawEdged + loader.setMipMapsRegeneration(False) + project.saveAs(projectPath, True) + + layer = layerset.getLayers().get(channelId) + + # inserting all patches + patchPaths = [] + patchLocations = [] + # for x in range(xGrid): + # for y in range(yGrid): + # patchNumber = y * xGrid + x * (1 - (y%2)) + (xGrid - x - 1) * (y%2) + # patchNumber = x * yGrid + y * (1 - (x%2)) + (yGrid - y - 1) * (x%2) + # patchNumber = x * yGrid + y + + # renaming to e.g. DAPI_038.tif for stitching plugin instead of the ZEN names + for imageId, imageName in enumerate(fc.naturalSort(imageNames)): + sourcePatchPath = os.path.join(inputFolder8bit, imageName) + targetPatchPath = os.path.join(inputFolder8bit, channel + '_' + str(imageId).zfill(3) + '.tif') + os.rename(sourcePatchPath, targetPatchPath) + + # patchPaths.append() + + # xmldoc = minidom.parse(os.path.join(inputFolder, imageName + '_metadata.xml')) + # patchLocations.append([int(float(xmldoc.getElementsByTagName('StageXPosition')[0].childNodes[0].nodeValue)/float(calibrationX)/float(downsizingFactor)) + 10000, int(float(xmldoc.getElementsByTagName('StageYPosition')[0].childNodes[0].nodeValue)/float(calibrationY)/float(downsizingFactor)) + 10000]) + + if (xGrid * yGrid) != 1: + patchLocationsPath = os.path.join(inputFolder8bit, 'TileConfiguration.registered.txt') # patch locations calculated by the plugin + if not os.path.isfile(patchLocationsPath): + # Grid/Collection stitching plugin has issues with section-free areas + command = 'type=[Grid: column-by-column] order=[Down & Right ] grid_size_x=' + str(xGrid) + ' grid_size_y=' + str(yGrid) + ' tile_overlap=' + str(overlap * 100) + ' first_file_index_i=0 directory=' + inputFolder8bit + ' file_names=BF_{iii}.tif output_textfile_name=TileConfiguration.txt fusion_method=[Linear Blending] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap computation_parameters=[Save computation time (but use more RAM)] image_output=[Write to disk] output_directory=' + inputFolder8bit + IJ.log('Stitching command - ' + command) + IJ.run('Grid/Collection stitching', command) + + # # The MIST plugin does not seem to have problems with the section-free areas, but more inaccurate + # IJ.run('MIST', 'gridwidth=' + str(xGrid) + ' gridheight=' + str(yGrid) + ' starttile=0 imagedir=' + inputFolder8bit + ' filenamepattern=' + channel + '_{ppp}.tif filenamepatterntype=SEQUENTIAL gridorigin=UL assemblefrommetadata=false globalpositionsfile=[] numberingpattern=VERTICALCOMBING startrow=0 startcol=0 extentwidth=' + str(xGrid) + ' extentheight=' + str(yGrid) + ' timeslices=0 istimeslicesenabled=false issuppresssubgridwarningenabled=false outputpath=' + inputFolder8bit + ' displaystitching=false outputfullimage=false outputmeta=true outputimgpyramid=false blendingmode=OVERLAY blendingalpha=NaN outfileprefix=img- programtype=AUTO numcputhreads=' + str(Runtime.getRuntime().availableProcessors()) + ' loadfftwplan=true savefftwplan=true fftwplantype=MEASURE fftwlibraryname=libfftw3 fftwlibraryfilename=libfftw3.dll planpath=' + os.path.join(IJ.getDirectory('imagej'), 'lib', 'fftw', 'fftPlans') + ' fftwlibrarypath=' + os.path.join(IJ.getDirectory('imagej'), 'lib', 'fftw') + ' stagerepeatability=0 horizontaloverlap=' + str(overlap * 100) + ' verticaloverlap=' + str(overlap * 100) + ' numfftpeaks=0 overlapuncertainty=NaN isusedoubleprecision=false isusebioformats=false isenablecudaexceptions=false translationrefinementmethod=SINGLE_HILL_CLIMB numtranslationrefinementstartpoints=16 headless=false loglevel=MANDATORY debuglevel=NONE') + # patchLocationsPath = os.path.join(inputFolder8bit, 'img-global-positions-0.txt') + # patchPaths, patchLocations = readMISTLocations(patchLocationsPath) + + patchPaths, patchLocations = readStitchedLocations(patchLocationsPath) + else: + patchLocations.append([0,0]) + patchPaths.append(os.path.join(inputFolder8bit, channel + '_' + str(0).zfill(3) + '.tif')) + + IJ.log('patchPaths ' + str(patchPaths)) + IJ.log('patchLocations ' + str(patchLocations)) + + # import all patches in the trakEM project + importFilePath = createImportFile(workingFolder, patchPaths, patchLocations) + task = loader.importImages(layerset.getLayers().get(channelId), importFilePath, '\t', 1, 1, False, 1, 0) + task.join() + + # # # # # # /!\ old version with the phase correlation TEM stitcher + # # # ####################### + # # # # 2. Montaging the overview + # # # IJ.log('2. Montaging the overview') + # # # ####################### + # # # patchScale, hide_disconnected, remove_disconnected, mean_factor, min_R = 1, False, False, 2.5, 0.1 + # # # stitcher = StitchingTEM() + # # # params = PhaseCorrelationParam(patchScale, overlap, hide_disconnected, remove_disconnected, mean_factor, min_R) + # # # collectionPatches = layer.getDisplayables(Patch) + # # # stitcher.montageWithPhaseCorrelation(collectionPatches, params) + + else: # stitching has already been done in the first channel, simply read the calculated stitching locations + + # renaming to e.g. DAPI_038.tif for stitching plugin instead of the ZEN names + for imageId, imageName in enumerate(fc.naturalSort(imageNames)): + sourcePatchPath = os.path.join(inputFolder8bit, imageName) + targetPatchPath = os.path.join(inputFolder8bit, channel + '_' + str(imageId).zfill(3) + '.tif') + os.rename(sourcePatchPath, targetPatchPath) + + # read the patch coordinates of stitched layer 0 of the trakem project + patches0 = layerset.getLayers().get(0).getDisplayables(Patch) + patchPaths = [os.path.join(os.path.dirname(patch.getFilePath()), os.path.basename(patch.getFilePath()).replace(channels[0], channel)) for patch in patches0] + patchLocations = [ [patch.getX() , patch.getY()] for patch in patches0] + + # patchLocations = [] + # for patchPath in patchPaths: + # xmldoc = minidom.parse(os.path.join(inputFolder, os.path.basename(patchPath) + '_metadata.xml')) + # patchLocations.append([int(float(xmldoc.getElementsByTagName('StageXPosition')[0].childNodes[0].nodeValue)/float(calibrationX)/float(downsizingFactor)) + 10000 , int(float(xmldoc.getElementsByTagName('StageYPosition')[0].childNodes[0].nodeValue)/float(calibrationY)/float(downsizingFactor)) + 10000]) + + # import patches to the trakem project + importFilePath = createImportFile(workingFolder, patchPaths, patchLocations) + task = loader.importImages(layerset.getLayers().get(channelId), importFilePath, '\t', 1, 1, False, 1, 0) + task.join() + + project.save() + IJ.log('Assembling the channels done and saved into ' + projectPath) + fc.resizeDisplay(layerset) + fc.closeProject(project) + +####################### +# 2. Assembling the full res overview +####################### + +if not os.path.isfile(projectPathFullRes): + IJ.log('Creating the full res project: ' + str(projectPathFullRes)) + project, loader, layerset, nLayers = fc.getProjectUtils(fc.initTrakem(trakemFolder, nChannels)) + layerset.setDimensions(0, 0, 100000, 100000) + loader.setMipMapsRegeneration(False) + project.saveAs(projectPathFullRes, True) + + layer = layerset.getLayers().get(0) + + # insterting from the calculated registration does not work because there are negative values that trigger an offset of the layerset (though I do not understand why the problem does not occur in the low res project) + lowResproject, lowResloader, lowReslayerset, lowResnLayers = fc.openTrakemProject(projectPath) + + for l, layer in enumerate(layerset.getLayers()): + + # renaming to e.g. DAPI_038.tif for stitching plugin instead of the ZEN names + if l == 0: + imageNames = fc.naturalSort(filter(lambda x: + (os.path.splitext(x)[1] in ['.tif', '.TIF']) and + ('c0x' in x) and + ('BF' in x), + os.listdir(inputFolder8bitFullRes))) + else: + imageNames = fc.naturalSort(filter(lambda x: + (os.path.splitext(x)[1] in ['.tif', '.TIF']) and + ('c' + str(l-1) + 'x') in x, + os.listdir(inputFolder8bitFullRes))) + + for imageId, imageName in enumerate(fc.naturalSort(imageNames)): + sourcePatchPath = os.path.join(inputFolder8bitFullRes, imageName) + targetPatchPath = os.path.join(inputFolder8bitFullRes, channels[l] + '_' + str(imageId).zfill(3) + '.tif') + os.rename(sourcePatchPath, targetPatchPath) + + patchPaths = [] + patchLocations = [] + for patch in lowReslayerset.getLayers().get(l).getDisplayables(Patch): + lowPatchPath = patch.getImageFilePath() + # renaming the patchPaths to the full resolution ones (replacing the folder) + highPatchPath = os.path.normpath(lowPatchPath).replace(os.path.normpath(inputFolder8bit), os.path.normpath(inputFolder8bitFullRes)) + print 'lowPatchPath', lowPatchPath + print 'highPatchPath', highPatchPath + # 8/0 + patchPaths.append(highPatchPath) + # upscaling the locations for the full res + patchLocations.append([int(round(patch.getX()*float(downsizingFactor))), int(round(patch.getY()*float(downsizingFactor)))]) + + # trakEM import + importFilePath = createImportFile(workingFolder, patchPaths, patchLocations) + task = loader.importImages(layer, importFilePath, '\t', 1, 1, False, 1, 0) + task.join() + + fc.resizeDisplay(layerset) + + project.save() + time.sleep(2) + fc.closeProject(project) + fc.closeProject(lowResproject) + +####################### +# 3. Asking the user to input an offset between the BF and fluo channels (they have been imaged in two different sessions, the stage might have moved) +####################### +if not os.path.isfile(fluoOffsetPath): #/!\ Warning: the fluo offset is on the lowRes project + IJ.log('Asking user for offset between BF and fluo channels') + project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) + offsetPoints = getLandmarks(project, fluoOffsetPath, 'Place pairs of landmarks to assess the fluorescent channel offset {BF, Fluo}') + meanOffset = [0,0] + + # if there are points from the user, take the average + if len(offsetPoints)>0: + offsets = [] + for k in range(len(offsetPoints)/2): + offsets.append([offsetPoints[2*k+1][0] - offsetPoints[2*k][0], offsetPoints[2*k+1][1] - offsetPoints[2*k][1]]) + + for offset in offsets: + meanOffset[0] = meanOffset[0] + offset[0] + meanOffset[1] = meanOffset[1] + offset[1] + meanOffset[0] = meanOffset[0]/float(len(offsets)) + meanOffset[1] = meanOffset[1]/float(len(offsets)) + + writePoints(fluoOffsetPath, [meanOffset]) + + # offsetting the patches + for id, patch in enumerate(layerset.getLayers().get(channels.index('BF')).getDisplayables(Patch)): + patch.setLocation(patch.getX() + meanOffset[0], patch.getY() + meanOffset[1]) + + # save and close + fc.resizeDisplay(layerset) + project.save() + fc.closeProject(project) + + # Offsetting the fullResProject + project, loader, layerset, nLayers = fc.openTrakemProject(projectPathFullRes) + for id, patch in enumerate(layerset.getLayers().get(0).getDisplayables(Patch)): + patch.setLocation(patch.getX() + meanOffset[0] * downsizingFactor, patch.getY() + meanOffset[1] * downsizingFactor) + # fc.resizeDisplay(layerset) /!\ WARNING: Never resize the display of the fullresProject, otherwise the offset gets lost + project.save() + fc.closeProject(project) + +####################### +# 4. Asking user for the template +####################### +if not os.path.isfile(templateMagsPath): + IJ.log('3. Asking user for the template') + project, loader, layerset, nLayers = fc.openTrakemProject(projectPathFullRes) + getTemplates(project) + project.save() # saving mipmaps + fc.closeProject(project) + + # write the low res templates for python segmentation + templateMagLowRes = readSectionCoordinates(templateMagsPath, downFactor = downsizingFactor)[0] + writeSections(templateMagsLowResPath, [templateMagLowRes]) + + templateTissueLowRes = readSectionCoordinates(templateSectionsPath, downFactor = downsizingFactor)[0] + writeSections(templateSectionsLowResPath, [templateTissueLowRes]) + +####################### +# 5. Asking user for the wafer landmarks +####################### +if not os.path.isfile(landmarksPath): + project, loader, layerset, nLayers = fc.openTrakemProject(projectPathFullRes) + landmarks = getLandmarks(project, landmarksPath, 'Select at least 4 landmarks for CLEM imaging (4 is usually ok). You can postpone that to another time.') + addLandmarkOverlays(project, landmarks) + project.save() # saving mipmaps + fc.closeProject(project) + +####################### +# 6. Asking user for the ROI in the tissue part +####################### +if not os.path.isfile(sourceROIDescriptionPath): + project, loader, layerset, nLayers = fc.openTrakemProject(projectPathFullRes) + getROIDescription(project) + project.save() # saving mipmaps + fc.closeProject(project) + + +#################### +# Optionally start the manual segmentation mode +#################### +# if not fc.getOK('Manual segmentation mode ?'): +if True: + ############################# + # WEKA process the DAPI sections + ############################# + imageNames = fc.naturalSort(filter(lambda x: # the DAPI tiles + (os.path.splitext(x)[1] in ['.tif', '.TIF']) and + ('DAPI' in x) + , fc.naturalSort(os.listdir(inputFolder8bit)))) + imagePaths = [os.path.join(inputFolder8bit, imageName) for imageName in imageNames] + + clahedFolder = fc.mkdir_p(os.path.join(workingFolder, 'clahedFolder')) + clahedPaths = [os.path.join(clahedFolder, os.path.splitext(imageName)[0] + '_CLAHED' + os.path.splitext(imageName)[1]) + for imageName in fc.naturalSort(imageNames)] + + wekaedFolder = fc.mkdir_p(os.path.join(workingFolder, 'wekaedFolder')) + wekaedPaths = [os.path.join(wekaedFolder, os.path.splitext(imageName)[0] + '_WEKAED' + os.path.splitext(imageName)[1]) + for imageName in fc.naturalSort(imageNames)] + + wekaedStackPath = os.path.join(workingFolder, 'wekaedStack.tif') + wekaModelPath = os.path.join(workingFolder, 'wekaClassifier.model') + + edgedFolder = fc.mkdir_p(os.path.join(workingFolder, 'edgedFolder')) + edgedPaths = [os.path.join(edgedFolder, os.path.splitext(imageName)[0] + '_EDGED' + os.path.splitext(imageName)[1]) + for imageName in fc.naturalSort(imageNames)] + + if sum([os.path.isfile(wekaedPath) for wekaedPath in wekaedPaths]) != len(imageNames): # check whether files already created + if sum([os.path.isfile(clahedPath) for clahedPath in clahedPaths]) != len(imageNames): + # creating the DAPI clahed images + for k, imagePath in enumerate(imagePaths): # (the list is sorted) + im = IJ.openImage(imagePath) + im = fc.localContrast(im) + im = fc.localContrast(im) + IJ.log('CLAHING ' + str(k) + ' out of ' + str(len(imagePaths))) + IJ.save(im, clahedPaths[k]) + + # getting the weka model from user on the clahed DAPI images + if not os.path.isfile(wekaModelPath): + WaitForUserDialog('Create a WEKA model on the CLAHED images and save it as "wekaClassifier.model" in "workingFolder" then click OK.').show() + segmentator = WekaSegmentation() + segmentator.loadClassifier(wekaModelPath); + + # apply the weka classifier and save the stack + imageStack = fc.stackFromPaths(clahedPaths) + if not os.path.isfile(wekaedStackPath): + result = segmentator.applyClassifier(imageStack, 0, 1) # 0 indicates number of threads is auto-detected + result.show() + time.sleep(1) + IJ.run('Grays') # or IJ.run(im, 'Grays', '') + IJ.setMinAndMax(0, 1) # warning: relies on the image being the current one + time.sleep(1) + IJ.run(result, '8-bit', '') + IJ.save(result, wekaedStackPath) + + # # small trick to ask an input from the user. Rerun the script then + # WaitForUserDialog('Open the WEKA stack and find parameters for subsequent thresholding. Then rerun').show() + # 8/0 + result.close() + + result = IJ.openImage(wekaedStackPath) + stack = result.getImageStack() + + # loop through the tiles + for k, wekaedPath in enumerate(wekaedPaths): + # save the thresholded/eroded WEKA + IJ.log('Saving WEKA ...') + tileIndex = result.getStackIndex(0, 0, k + 1) # to access the slice in the stack + wekaTile = ImagePlus('wekaed_' + str(k), stack.getProcessor(tileIndex).convertToByteProcessor()) + IJ.save(wekaTile, wekaedPath) + + # # save the edged weka: probably not necessary any more ... + # IJ.run(wekaTile, 'Find Edges', '') + # # edged = fc.blur(wekaed, 2) + # # edged = fc.minMax(edged, 60, 60) + # edged = fc.minMax(wekaed, 60, 60) + # IJ.save(edged, edgedPaths[k]) + result.changes = False + result.close() + + ############################################## + # Edge the raw images for section orientation flipping + ############################################## + imageNames = fc.naturalSort(filter(lambda x: (os.path.splitext(x)[1] in ['.tif', '.TIF']) and ('BF' in x), os.listdir(inputFolder8bit))) + imagePaths = [os.path.join(inputFolder8bit, imageName) for imageName in imageNames] + rawEdgedFolder = fc.mkdir_p(os.path.join(workingFolder, 'rawEdgedFolder')) + rawEdgedPaths = [os.path.join(rawEdgedFolder, os.path.splitext(imageName)[0] + '_rawEdged' + os.path.splitext(imageName)[1]) for imageName in imageNames] + + if sum([os.path.isfile(rawEdgedPath) for rawEdgedPath in rawEdgedPaths]) != len(imageNames): + IJ.log('RawEdging ...') + for k, imagePath in enumerate(imagePaths): + im = IJ.openImage(imagePath) + IJ.run(im, 'Median...', 'radius=2') + im = fc.localContrast(im) + IJ.run(im, 'Find Edges', '') + IJ.log('RawEdging ' + str(k) + ' out of ' + str(len(imagePaths))) + IJ.save(im, rawEdgedPaths[k]) + + dapiWekaedPath = os.path.join(workingFolder, 'dapiWekaed.tif') + if not os.path.isfile(projectPathBis): + ####################### + # Insert the wekaed tiles in the last layers of projectBis + IJ.log('Insert the wekaed tiles in the last layers of projectBis') + ####################### + shutil.copyfile(projectPath, projectPathBis) # the low res project with all channels + project, loader, layerset, nLayers = fc.openTrakemProject(projectPathBis) + + meanOffset = readPoints(fluoOffsetPath) # actually not needed here + + wekaedPatchPaths = [] + edgedPatchPaths = [] + + # get patch coordinates from the dapi layer and convert the names of the patches to insert + patchLocations = [] + for id, patch in enumerate(layerset.getLayers().get(1).getDisplayables(Patch)): # read patch coordinates in the dapi layer (no offset) + IJ.log('Processing patch ' + str(id)) + patchLocations.append([patch.getX() , patch.getY()]) + + patchPath = patch.getImageFilePath() + patchName = os.path.basename(patchPath) + # patchName = os.path.basename(patchPath).replace('BF', 'Fluo') + + # get the wekaedPath from the dapi path + wekaedPatchName = os.path.splitext(patchName)[0] + '_WEKAED' + os.path.splitext(patchName)[1] + wekaedPatchPath = os.path.join(wekaedFolder, wekaedPatchName) + wekaedPatchPaths.append(wekaedPatchPath) + + # get the edgedPath from the dapi path + edgedPatchName = os.path.splitext(patchName)[0] + '_EDGED' + os.path.splitext(patchName)[1] + edgedPatchPath = os.path.join(edgedFolder, edgedPatchName) + edgedPatchPaths.append(edgedPatchPath) + + # insert the patches in layer number (nChannels) + importFile = createImportFile(workingFolder, wekaedPatchPaths, patchLocations, layer = nChannels) + task = loader.importImages(layerset.getLayers().get(0), importFile, '\t', 1, 1, False, 1, 0) + task.join() + + # insert the patches in layer number (nChannels + 1) + importFile = createImportFile(workingFolder, edgedPatchPaths, patchLocations, layer = nChannels + 1) + task = loader.importImages(layerset.getLayers().get(0), importFile, '\t', 1, 1, False, 1, 0) + task.join() + + # Insert the raw edges, useful for flips + patchLocations = [] # need to use new patchLocations because of the BF-fluo offset + rawEdgedPatchPaths = [] + for id, patch in enumerate(layerset.getLayers().get(0).getDisplayables(Patch)): + IJ.log('Inserting rawedged patch ' + str(id)) + patchLocations.append([patch.getX() , patch.getY()]) # already contains the offset + + patchPath = patch.getImageFilePath() + patchName = os.path.basename(patchPath) + + rawEdgedPatchName = os.path.splitext(patchName)[0] + '_rawEdged' + os.path.splitext(patchName)[1] + rawEdgedPatchPath = os.path.join(rawEdgedFolder, rawEdgedPatchName) + rawEdgedPatchPaths.append(rawEdgedPatchPath) + + importFile = createImportFile(workingFolder, rawEdgedPatchPaths, patchLocations, layer = nChannels + 2) + task = loader.importImages(layerset.getLayers().get(0), importFile, '\t', 1, 1, False, 1, 0) + task.join() + + # blending the raw and the 3 last channels + Blending.blendLayerWise(layerset.getLayers(0, 0), True, None) # layerRaw + Blending.blendLayerWise(layerset.getLayers(nChannels, nChannels), True, None) # layerWekaed + Blending.blendLayerWise(layerset.getLayers(nChannels + 1, nChannels + 1), True, None) # layerWekaed + Blending.blendLayerWise(layerset.getLayers(nChannels + 2, nChannels + 2), True, None) # layerWekaed + + layerDapiWekaed = layerset.getLayers().get(nChannels) + dapiWekaed = loader.getFlatImage(layerDapiWekaed, + # Rectangle(0, 0, layerDapiWekaed.getLayerWidth(), layerDapiWekaed.getLayerHeight()), # warning: should I take the bounding box of the layer, not of the layerset because of the fluo offset ? + layerset.get2DBounds(), + 1, 0x7fffffff, ImagePlus.GRAY8, Patch, + layerDapiWekaed.getAll(Patch), True, Color.black, None) + + IJ.save(dapiWekaed, dapiWekaedPath) + dapiWekaed.show() + + project.save() + fc.closeProject(project) + + # small trick to ask an input from the user. Rerun the script then + WaitForUserDialog('Open the wekaed wafer and find parameters for subsequent thresholding. Then rerun').show() + dapiWekaed.close() + 8/0 + + ######################## + # Second round of templating: create the templates, calculate dapiCenter, calculate box in which there should not be any edge + ######################## + templateTissue = readSectionCoordinates(templateSectionsLowResPath)[0] # work with the low res templates + templateMag = readSectionCoordinates(templateMagsLowResPath)[0] + + userTemplateInputLowRes = readSectionCoordinates(userTemplateInputPath, downFactor = downsizingFactor) + userTemplateTissueLowRes = userTemplateInputLowRes[0] + userTemplateMagLowRes = userTemplateInputLowRes[1] + + t0, t1, t2, t3 = userTemplateTissueLowRes + m0, m1, m2, m3 = userTemplateMagLowRes + + print 'userTemplateInputLowRes', userTemplateInputLowRes + print 'userTemplateTissueLowRes', userTemplateTissueLowRes + print 'userTemplateMagLowRes', userTemplateMagLowRes + + completeSection = [m0, t1, t2, m3] # containing mag and tissue + sectionExtent = longestDiagonal(completeSection) + + completeSectionCenter = barycenter(completeSection) + + extractingBoxSize = int(sectionExtent * 3) + extractingBox = Rectangle(int(round(completeSectionCenter[0] - extractingBoxSize/2.)) , int(round(completeSectionCenter[1] - extractingBoxSize/2.)), extractingBoxSize, extractingBoxSize) + templateOriginalAngle = getAngle([m0[0], m0[1], m1[0], m1[1]]) # radian + + + # the important reference is the center of the completeSection: calculate positions relative to that center + + # rotate the corners around the center of the completeSection in trakem + rotTransform = AffineTransform.getRotateInstance(-templateOriginalAngle, completeSectionCenter[0], completeSectionCenter[1]) + rotatedTissueInWaferCoordinates = applyTransform(userTemplateTissueLowRes, rotTransform) + rotatedMagInWaferCoordinates = applyTransform(userTemplateMagLowRes, rotTransform) + + rtw0, rtw1, rtw2, rtw3 = rotatedTissueInWaferCoordinates # rtw stands for rotated tissue in wafer coordinates + rmw0, rmw1, rmw2, rmw3 = rotatedMagInWaferCoordinates # rmw stands for rotated mag in wafer coordinates + + # get the locations of the rotated template corners relative to the center of the section + rtc0 = [rtw0[0] - completeSectionCenter[0], rtw0[1] - completeSectionCenter[1]] # rtc stands for Rotated Tissue in Center coordinates (relative to the center of the completeSection) + rtc1 = [rtw1[0] - completeSectionCenter[0], rtw1[1] - completeSectionCenter[1]] + rtc2 = [rtw2[0] - completeSectionCenter[0], rtw2[1] - completeSectionCenter[1]] + rtc3 = [rtw3[0] - completeSectionCenter[0], rtw3[1] - completeSectionCenter[1]] + + rmc0 = [rmw0[0] - completeSectionCenter[0], rmw0[1] - completeSectionCenter[1]] # rtc stands for Rotated mag (=dapi) in Center coordinates (relative to the center of the completeSection) + rmc1 = [rmw1[0] - completeSectionCenter[0], rmw1[1] - completeSectionCenter[1]] + rmc2 = [rmw2[0] - completeSectionCenter[0], rmw2[1] - completeSectionCenter[1]] + rmc3 = [rmw3[0] - completeSectionCenter[0], rmw3[1] - completeSectionCenter[1]] + + # get the locations of the rotated template corners relative to the center of the *dapi center* + dapiCenter = barycenter([rmc0, rmc1, rmc2, rmc3]) # in completeSection coordinates + + rtd0 = [rtc0[0] - dapiCenter[0], rtc0[1] - dapiCenter[1]] # rtd stands for Rotated Tissue in dapiCenter coordinates (relative to the center of the dapi center) + rtd1 = [rtc1[0] - dapiCenter[0], rtc1[1] - dapiCenter[1]] + rtd2 = [rtc2[0] - dapiCenter[0], rtc2[1] - dapiCenter[1]] + rtd3 = [rtc3[0] - dapiCenter[0], rtc3[1] - dapiCenter[1]] + + rmd0 = [rmc0[0] - dapiCenter[0], rmc0[1] - dapiCenter[1]] # rtd stands for Rotated Tissue in dapiCenter coordinates (relative to the center of the dapi center) + rmd1 = [rmc1[0] - dapiCenter[0], rmc1[1] - dapiCenter[1]] + rmd2 = [rmc2[0] - dapiCenter[0], rmc2[1] - dapiCenter[1]] + rmd3 = [rmc3[0] - dapiCenter[0], rmc3[1] - dapiCenter[1]] + + completeSectionInCenterCoordinates = [rmc0, rtc1, rtc2, rmc3] # centerCoordinates: the center is the center of the complete section + expandedSection = shrink(completeSectionInCenterCoordinates, -0.2) # 20% bigger + templateBoxInCenterCoordinates = sectionToPoly(expandedSection).getBounds() + + # final crop with the templateBox in the coordinates of the extracted image + templateBox = Rectangle(int(round(templateBoxInCenterCoordinates.x + extractingBox.width/2.)), + int(round(templateBoxInCenterCoordinates.y + extractingBox.height/2.)), + int(round(templateBoxInCenterCoordinates.width)), + int(round(templateBoxInCenterCoordinates.height))) + + # Get the corner coordinates in the new coordinate system of the extracted template + tt0 = [int(round(rtc0[0] + templateBox.width/2.)), int(round(rtc0[1] + templateBox.height/2.))] # tt stands for tissue in template coordinates + tt1 = [int(round(rtc1[0] + templateBox.width/2.)), int(round(rtc1[1] + templateBox.height/2.))] + tt2 = [int(round(rtc2[0] + templateBox.width/2.)), int(round(rtc2[1] + templateBox.height/2.))] + tt3 = [int(round(rtc3[0] + templateBox.width/2.)), int(round(rtc3[1] + templateBox.height/2.))] + + mt0 = [int(round(rmc0[0] + templateBox.width/2.)), int(round(rmc0[1] + templateBox.height/2.))] # mt stands for mag in template coordinates + mt1 = [int(round(rmc1[0] + templateBox.width/2.)), int(round(rmc1[1] + templateBox.height/2.))] + mt2 = [int(round(rmc2[0] + templateBox.width/2.)), int(round(rmc2[1] + templateBox.height/2.))] + mt3 = [int(round(rmc3[0] + templateBox.width/2.)), int(round(rmc3[1] + templateBox.height/2.))] + + templateDapiCenter = barycenter([mt0, mt1, mt2, mt3]) # the center of dapi is the barycenter of the template mag section + + edgeFreeSectionTemplateCoordinates = shrink([tt0, tt1, tt2, tt3], 0.2) # 80% of the tissue box should be free of edges (a flipped section would have edges in this region) + edgeFreeSectionCenterCoordinates = shrink([rtc0, rtc1, rtc2, rtc3], 0.2) + + magBox = sectionToPoly([mt0, mt1, mt2, mt3]).getBounds() + + project, loader, layerset, nLayers = fc.openTrakemProject(projectPathBis) + layerNames = ['BF', 'DAPI', '488', '546', 'dapiWekaed', 'nothing', 'rawEdged'] + + for l, layer in enumerate(layerset.getLayers()): + finalTemplatePath = os.path.join(workingFolder, 'finalTemplate_' + layerNames[l] + '.tif') + + extractedTemplate = loader.getFlatImage(layer, extractingBox , 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer.getAll(Patch), True, Color.black, None) + rotate(extractedTemplate, -templateOriginalAngle * 180 /float(PI)) + + if layerNames[l] == 'dapiWekaed': # the template is simply the white rectangle ... I could also make an artificial one + extractedTemplate.setRoi(templateBox) + extractedTemplate = extractedTemplate.crop() + + extractedTemplate.setRoi(magBox) + finalTemplate = extractedTemplate.crop() + IJ.save(finalTemplate, finalTemplatePath) + else: + extractedTemplate.setRoi(templateBox) + finalTemplate = extractedTemplate.crop() + + IJ.save(finalTemplate, finalTemplatePath) + + allPoints = [tt0, tt1, tt2, tt3, mt0, mt1, mt2, mt3, templateDapiCenter] + edgeFreeSectionTemplateCoordinates + print 'allPoints', allPoints + poly = Polygon([point[0] for point in allPoints], [point[1] for point in allPoints], len(allPoints)) + + finalTemplate.setRoi(PointRoi(poly.xpoints, poly.ypoints, poly.npoints)) + flattenedTemplate = finalTemplate.flatten() + flattenedTemplatePath = os.path.join(workingFolder, 'finalTemplate_WithPoints_' + layerNames[l] + '.tif') + + IJ.save(flattenedTemplate, flattenedTemplatePath) + # finalTemplate.show() + + fc.closeProject(project) + # 8/0 + + ############################################## + # Find the DAPI centers + ############################################## + if not os.path.isfile(dapiCentersPath): + dapiWekaed = IJ.openImage(dapiWekaedPath) + + # preprocess the dapiWekaed to create good separated components + dapiWekaed = fc.minMax(dapiWekaed, 110, 255) + dapiWekaed = fc.blur(dapiWekaed, 5) + dapiWekaed = fc.minMax(dapiWekaed, 50, 160) + dapiWekaed = fc.blur(dapiWekaed, 5) + dapiWekaed = fc.minMax(dapiWekaed, 80, 120) + IJ.run(dapiWekaed, 'Gray Morphology', 'radius=10 type=circle operator=erode') + dapiWekaed = fc.minMax(dapiWekaed, 245, 245) + + templateMag = readSectionCoordinates(templateMagsLowResPath)[0] + dapiCenters = getConnectedComponents(dapiWekaed, minSize = getArea(templateMag)/10 ) # to adjust probably + IJ.log('There are ' + str(len(dapiCenters)) + ' dapiCenters') + + with open(dapiCentersPath, 'w') as f: + pickle.dump(dapiCenters, f) + + candidateEdgedFolder = fc.mkdir_p(os.path.join(workingFolder, 'candidateEdgedFolder')) + candidateErodedFolder = fc.mkdir_p(os.path.join(workingFolder, 'candidateErodedFolder')) + candidateRawFolder = fc.mkdir_p(os.path.join(workingFolder, 'candidateRawFolder')) + candidateRawEdgedFolder = fc.mkdir_p(os.path.join(workingFolder, 'candidateRawEdgedFolder')) + + candidateHighResRawFolder = fc.mkdir_p(os.path.join(workingFolder, 'candidateHighResRawFolder')) + + ####################### + # Exporting all candidates centered on the dapiCenters + ####################### + if len(os.listdir(candidateRawFolder)) == 0: + IJ.log('Exporting all candidates centered on the dapiCenters') + project, loader, layerset, nLayers = fc.openTrakemProject(projectPathBis) + dapiCenters = loader.deserialize(dapiCentersPath) + with open(dapiCentersPath, 'r') as f: + dapiCenters = pickle.load(f) + + # 1. Determining the size of the candidates based on the template size + templateSections = readSectionCoordinates(templateSectionsPath, downFactor = downsizingFactor)[0] # why plural ? + sectionExtent = int(round(longestDiagonal(templateSections))) + IJ.log('Section extent is ' + str(sectionExtent) + ' pixels') + + candidateWidth = sectionExtent * 3 # *2 because the candidate will be rotated and cropped for matching + candidateHeight = sectionExtent * 3 + + bounds = layerset.get2DBounds() + layerRaw = layerset.getLayers().get(0) + layerWekaed = layerset.getLayers().get(nChannels) + layerEdged = layerset.getLayers().get(nChannels + 1) + layerRawEdged = layerset.getLayers().get(nChannels + 2) + + for idSection, dapiCenter in enumerate(dapiCenters): + x, y = dapiCenter + + rawPath = os.path.join(candidateRawFolder, 'candidate_' + str(idSection).zfill(4) + '_Raw.tif') + thresPath = os.path.join(candidateErodedFolder, 'candidate_' + str(idSection).zfill(4) + '_Wekaed.tif') + edgesPath = os.path.join(candidateEdgedFolder, 'candidate_' + str(idSection).zfill(4) + '_Edges.tif') + rawEdgesPath = os.path.join(candidateRawEdgedFolder, 'candidate_' + str(idSection).zfill(4) + '_RawEdges.tif') + + roiExport = Rectangle(int(round(x - candidateWidth/2)), int(round(y - candidateHeight/2)), candidateWidth, candidateHeight) + IJ.log('x ' + str(x) + '; y ' + str(y) + ' roiexport' + str(roiExport)) + + # save the raw image + rawPatch = loader.getFlatImage(layerRaw, roiExport , 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layerRaw.getAll(Patch), True, Color.black, None) + IJ.save(rawPatch, rawPath) + + # save the thresholded image + thresholdedPatch = loader.getFlatImage(layerWekaed, roiExport , 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layerWekaed.getAll(Patch), True, Color.black, None) + IJ.save(thresholdedPatch, thresPath) + + # save the edges image + patchEdges = loader.getFlatImage(layerEdged, roiExport , 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layerEdged.getAll(Patch), True, Color.black, None) + IJ.save(patchEdges, edgesPath) + + # save the edges image + patchRawEdges = loader.getFlatImage(layerRawEdged, roiExport , 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layerRawEdged.getAll(Patch), True, Color.black, None) + IJ.save(patchRawEdges, rawEdgesPath) + + project.save() + fc.closeProject(project) + + ####################### + # Exporting all candidates centered on the dapiCenters of the high res raw wafer + ####################### + if len(os.listdir(candidateHighResRawFolder)) == 0: + IJ.log('Exporting all candidates centered on the dapiCenters') + project, loader, layerset, nLayers = fc.openTrakemProject(projectPathFullRes) + dapiCenters = loader.deserialize(dapiCentersPath) + with open(dapiCentersPath, 'r') as f: + dapiCenters = pickle.load(f) + + dapiCentersHighRes = [ [dapiCenter[0]*float(downsizingFactor), dapiCenter[1]*float(downsizingFactor)] for dapiCenter in dapiCenters] + + + # 1. Determining the size of the candidates based on the saved low res candidates + candidate0 = IJ.openImage(os.path.join(candidateRawFolder, os.listdir(candidateRawFolder)[0])) + candidateWidth = int(round(candidate0.getWidth() * downsizingFactor)) + candidateHeight = int(round(candidate0.getHeight() * downsizingFactor)) + candidate0.close() + + bounds = layerset.get2DBounds() + layerRaw = layerset.getLayers().get(0) + + for idSection, dapiCenter in enumerate(dapiCentersHighRes): + x, y = dapiCenter + + rawPath = os.path.join(candidateHighResRawFolder, 'candidate_' + str(idSection).zfill(4) + '_Raw.tif') + + roiExport = Rectangle(int(round(x - candidateWidth/2)), int(round(y - candidateHeight/2)), candidateWidth, candidateHeight) + IJ.log('x ' + str(x) + '; y ' + str(y) + ' roiexport' + str(roiExport)) + + # save the raw image + rawPatch = loader.getFlatImage(layerRaw, roiExport , 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layerRaw.getAll(Patch), True, Color.black, None) + IJ.save(rawPatch, rawPath) + + project.save() + fc.closeProject(project) + + + ##################### + # Template matching + ##################### + + if not os.path.isfile(sectionsSpecsPath): + + # ############################# + # # run matching for raw images + # ############################# + # rawTemplateMatchingPath = os.path.join(workingFolder, 'finalTemplate_' + layerNames[0] + '.tif') + # rawTemplate = IJ.openImage(rawTemplateMatchingPath) + # wTemplate = rawTemplate.getWidth() + # hTemplate = rawTemplate.getHeight() + # rawTemplate.close() + + # atom = AtomicInteger(0) + # rawSectionResults = [] + # candidatePaths = [os.path.join(candidateRawFolder, candidateName) for candidateName in fc.naturalSort(os.listdir(candidateRawFolder))] + # fc.startThreads(templateMatchCandidate, fractionCores = 1, wait = 0, arguments = (atom, candidatePaths, rawTemplateMatchingPath, rawSectionResults)) + # IJ.log(str(rawSectionResults)) + + ############################# + # run matching for dapiWekaed images + ############################# + + # the dapi template has a different size, the w/hTemplate must be updated + dapiTemplateMatchingPath = os.path.join(workingFolder, 'finalTemplate_' + layerNames[4] + '.tif') + dapiTemplate = IJ.openImage(dapiTemplateMatchingPath) + wTemplate = dapiTemplate.getWidth() + hTemplate = dapiTemplate.getHeight() + dapiTemplate.close() + + atom = AtomicInteger(0) + dapiSectionResults = [] + candidatePaths = [os.path.join(candidateErodedFolder, candidateName) for candidateName in fc.naturalSort(os.listdir(candidateErodedFolder))] + candidateEdgedPaths = [os.path.join(candidateRawEdgedFolder, candidateName) for candidateName in fc.naturalSort(os.listdir(candidateRawEdgedFolder))] + fc.startThreads(templateMatchCandidate, fractionCores = 0.8, wait = 0, arguments = (atom, candidatePaths, dapiTemplateMatchingPath, dapiSectionResults)) + IJ.log(str(dapiSectionResults)) + # 8/0 + + sectionsSpecs = [] # list with nDapiCenter elements containing [sectionId, r, x, y] + for id, dapiSectionResult in enumerate(dapiSectionResults): + sectionId = dapiSectionResult[0] + # candidate = IJ.openImage(candidatePaths[sectionId]) + cand = IJ.openImage(candidateEdgedPaths[sectionId]) + sectionResults = dapiSectionResult[1:] + + wCandidate = cand.getWidth() + hCandidate = cand.getHeight() + + solutionRank = 0 # counter in the while loop to go through the solutions until a non-flipped section is found + foundNonFlippedSection = False + while (not foundNonFlippedSection) and (solutionRank<50) : + candidate = cand.duplicate() + bestResult = sectionResults[solutionRank] # because it has been sorted + ccScore, [r, x, y] = bestResult + + + rotate(candidate, r) + + # the center of the template sliding window + xWindowCenter = wCandidate/2. - neighborhood/2. + xStep * x + yWindowCenter = hCandidate/2. - neighborhood/2. + yStep * y + + # # show center of template window + # poly = Polygon([int(round(xWindowCenter))], [int(round(yWindowCenter))], 1) + # candidate.setRoi(PointRoi(poly.xpoints, poly.ypoints, poly.npoints)) + # candidate.show() + + tissueDapiCoordinates = [rtd0, rtd1, rtd2, rtd3] # tissue corners in dapiCenter coordinates + edgeFreeDapiCoordinates = shrink(tissueDapiCoordinates, tissueShrinkingForEdgeFreeRegion) + + # edgeFreeDapiCoordinates is in relative dapiCenter coordinates, need to transform in coordinates of the current image + translationToCandidateDapiCenter = AffineTransform.getTranslateInstance(int(round(wCandidate/2.)), int(round(hCandidate/2.))) + edgeFreeSection = applyTransform(edgeFreeDapiCoordinates, translationToCandidateDapiCenter) + + # # show edgeFree section : the region that should be edge free when a section is not flipped + edgeFreePoly = Polygon([int(round(point[0])) for point in edgeFreeSection], [ int(round(point[1])) for point in edgeFreeSection], len(edgeFreeSection)) + + candidate = fc.minMax(candidate, 60, 200) + + edgeFreePoly = PolygonRoi(edgeFreePoly, Roi.POLYGON) + candidate.setRoi(edgeFreePoly) + edgeMeasure = candidate.getStatistics(Measurements.MEAN).mean + print 'edgeMeasure', edgeMeasure + candidate.setTitle( str(sectionId) + ' - ' + str(int(edgeMeasure)) ) + + if edgeMeasure < 1: + foundNonFlippedSection = True + sectionsSpecs.append([sectionId, r, x, y, ccScore]) + # candidate.show() + + solutionRank = solutionRank + 1 + + with open(sectionsSpecsPath, 'w') as f: + pickle.dump(sectionsSpecs, f) + + #################### + # Write the sections in wafer coordinates + #################### + if not os.path.isfile(tissueSectionsHighResPath): + with open(dapiCentersPath, 'r') as f: + waferDapiCenters = pickle.load(f) + + with open(sectionsSpecsPath, 'r') as f: + sectionsSpecs = pickle.load(f) + + candidate0 = IJ.openImage(os.path.join(candidateRawFolder, os.listdir(candidateRawFolder)[0])) + wCandidate = candidate0.getWidth() # *2 because the candidate will be rotated and cropped for matching + hCandidate = candidate0.getHeight() + candidate0.close() + + magSectionsLowRes = [] + tissueSectionsLowRes = [] + + magSectionsHighRes = [] + tissueSectionsHighRes = [] + + completeSectionDapiCenter = [rtd0, rtd1, rtd2, rtd3, rmd0, rmd1, rmd2, rmd3] # relative to the dapi center + + for sectionSpecs in sectionsSpecs: + sectionId, r, x, y, ccScore = sectionSpecs + waferDapiCenterLowRes = waferDapiCenters[sectionId] + # waferDapiCenterHighRes = [waferDapiCenterLowRes[0]*float(downsizingFactor), waferDapiCenterLowRes[1]*float(downsizingFactor)] + + + # found dapiCenter during the matching search relative to the dapiCenter + xWindowCenter = - neighborhood/2. + xStep * x + yWindowCenter = - neighborhood/2. + yStep * y + + transform = AffineTransform() + translate1 = AffineTransform.getTranslateInstance(xWindowCenter, yWindowCenter) # /!\ I believe correct, but hard to confirm ... + translate2 = AffineTransform.getTranslateInstance(waferDapiCenterLowRes[0], waferDapiCenterLowRes[1]) + rotateTransform = AffineTransform.getRotateInstance(-r * PI/float(180)) + + transform.concatenate(translate1) + transform.concatenate(translate2) + transform.concatenate(rotateTransform) + completeSectionWafer = applyTransform(completeSectionDapiCenter, transform) + + # completeSectionWafer = map(lambda x: [int(round(x[0])), int(round(x[1]))], completeSectionWafer) # necessary ? + + magSectionLowRes = completeSectionWafer[4:] + tissueSectionLowRes = completeSectionWafer[:4] + + magSectionsLowRes.append(magSectionLowRes) + tissueSectionsLowRes.append(tissueSectionLowRes) + + magSectionHighRes = [ [point[0]*float(downsizingFactor), point[1]*float(downsizingFactor)] for point in magSectionLowRes] + tissueSectionHighRes = [ [point[0]*float(downsizingFactor), point[1]*float(downsizingFactor)] for point in tissueSectionLowRes] + + magSectionsHighRes.append(magSectionHighRes) + tissueSectionsHighRes.append(tissueSectionHighRes) + + writeSections(magSectionsLowResPath, magSectionsLowRes) + writeSections(tissueSectionsLowResPath, tissueSectionsLowRes) + + writeSections(magSectionsHighResPath, magSectionsHighRes) + writeSections(tissueSectionsHighResPath, tissueSectionsHighRes) + + # 8/0 + + ################################ + # GUI to adjust existing sections + ################################ + if fc.getOK('Do you want to manually adjust the sections ?'): + with open(dapiCentersPath, 'r') as f: + waferDapiCenters = pickle.load(f) + with open(sectionsSpecsPath, 'r') as f: + sectionsSpecs = pickle.load(f) + + # the following ensures that all manual adjustments are systematically saved after each adjustment + if not os.path.isfile(finalTissueSectionsPath): + shutil.copyfile(magSectionsHighResPath, finalMagSectionsPath) + shutil.copyfile(tissueSectionsHighResPath, finalTissueSectionsPath) + + tissueSectionsHighRes = readSectionCoordinates(finalTissueSectionsPath) + magSectionsHighRes = readSectionCoordinates(finalMagSectionsPath) + + candidate0 = IJ.openImage(os.path.join(candidateRawFolder, os.listdir(candidateRawFolder)[0])) + wCandidate = candidate0.getWidth() + hCandidate = candidate0.getHeight() + candidate0.close() + + candidateHighResRawPaths = [os.path.join(candidateHighResRawFolder, name) for name in os.listdir(candidateHighResRawFolder)] + sectionStart = fc.getNumber('At which section do you want to start ?', default = 0, decimals = 0) + + magSectionsLowRes = [] + tissueSectionsLowRes = [] + + for id, sectionSpecs in enumerate(sectionsSpecs): + if id > sectionStart-1: + sectionId, r, x, y, ccScore = sectionSpecs + IJ.log('Manually checking section ' + str(id) + ' out of ' + str(len(sectionSpecs)) + '(the real id of the section is ' + str(sectionId) + ')') + waferDapiCenterLowRes = waferDapiCenters[sectionId] + waferDapiCenterHighRes = [waferDapiCenterLowRes[0]*float(downsizingFactor), waferDapiCenterLowRes[1]*float(downsizingFactor)] + + completeSectionDapiCenter = [rtd0, rtd1, rtd2, rtd3, rmd0, rmd1, rmd2, rmd3] # relative to the dapi center + + # the center of the template sliding window + xWindowCenter = wCandidate/2. - neighborhood/2. + xStep * x + yWindowCenter = hCandidate/2. - neighborhood/2. + yStep * y + + # completeSectionDapiCenter is in relative dapiCenter coordinates, need to transform in coordinates of the candidate + translationToCandidateCoordinates = AffineTransform.getTranslateInstance(int(round(wCandidate/2.)), int(round(hCandidate/2.))) + completeSectionInLowResCandidate = applyTransform(completeSectionDapiCenter, translationToCandidateCoordinates) + + # in coordinates of the high res candidate + completeSectionInHighResCandidate = [[point[0]*float(downsizingFactor), point[1]*float(downsizingFactor)] for point in completeSectionInLowResCandidate] + + # display the section points for user to adjust + # create roi + poly = Polygon([int(round(point[0])) for point in completeSectionInHighResCandidate], [int(round(point[1])) for point in completeSectionInHighResCandidate], len(completeSectionInHighResCandidate)) + + # show roi and user dialog + candidate = IJ.openImage(candidateHighResRawPaths[sectionId]) + rotate(candidate, r) + candidate.setRoi(PointRoi(poly.xpoints, poly.ypoints, poly.npoints)) + candidate.show() + zoomFactor = 0 + for repeat in range(zoomFactor): + time.sleep(0.2) + IJ.run('In [+]') + w = candidate.getWindow() + w.setLocation(0,0) + rrt = RoiRotationTool() + rrt.run('') + WindowManager.setCurrentWindow(w) + WaitForUserDialog('Adjust the section points then click Ok.').show() + + # get the user input + adjustedSectionLocal = [] + for id, roi in enumerate(candidate.getRoi()): + adjustedSectionLocal.append([roi.x, roi.y]) + candidate.close() + # transform to dapiCenter coordinates + translationToDapiCenter = AffineTransform.getTranslateInstance(-int(round(wCandidate*downsizingFactor/2.)), -int(round(hCandidate*downsizingFactor/2.))) + userInputCompleteSectionDapiCenter = applyTransform(adjustedSectionLocal, translationToDapiCenter) + + # calculate transform to transform manual input to wafer coordinates (see details of the calculation earlier) + # xWindowCenter = (- neighborhood/2. + xStep * x)*downsizingFactor + # yWindowCenter = (- neighborhood/2. + yStep * y)*downsizingFactor + transform = AffineTransform() + # translate1 = AffineTransform.getTranslateInstance(xWindowCenter, yWindowCenter) # /!\ I believe correct, but hard to confirm ... + translate2 = AffineTransform.getTranslateInstance(waferDapiCenterHighRes[0], waferDapiCenterHighRes[1]) + rotateTransform = AffineTransform.getRotateInstance(-r * PI/float(180)) + # transform.concatenate(translate1) + transform.concatenate(translate2) + transform.concatenate(rotateTransform) + + completeSectionWafer = applyTransform(userInputCompleteSectionDapiCenter, transform) + + magSectionHighRes = completeSectionWafer[4:] + tissueSectionHighRes = completeSectionWafer[:4] + + magSectionsHighRes[sectionId] = magSectionHighRes + tissueSectionsHighRes[sectionId] = tissueSectionHighRes + + # magSectionLowRes = [ [point[0]/float(downsizingFactor), point[1]/float(downsizingFactor)] for point in magSectionHighRes] + # tissueSectionLowRes = [ [point[0]/float(downsizingFactor), point[1]/float(downsizingFactor)] for point in tissueSectionHighRes] + + # magSectionsLowRes.append(magSectionLowRes) + # tissueSectionsLowRes.append(tissueSectionLowRes) + + # write after each section (an issue is that if you start this manual proofreading, you need to go through the whole process, could be avoided ...) + # writeSections(magSectionsLowResPath, magSectionsLowRes) + # writeSections(tissueSectionsLowResPath, tissueSectionsLowRes) + + writeSections(finalMagSectionsPath, magSectionsHighRes) + writeSections(finalTissueSectionsPath, tissueSectionsHighRes) + + + +# # # # # # # # # # # ### Manually adding an offset, +# # # # # # # # # # # tissueSections = readSectionCoordinates(finalTissueSectionsPath) +# # # # # # # # # # # magSections = readSectionCoordinates(finalMagSectionsPath) + +# # # # # # # # # # # translate = AffineTransform.getTranslateInstance(103, 128) + +# # # # # # # # # # # tissueSections = [applyTransform(tissueSection, translate) for tissueSection in tissueSections] + +# # # # # # # # # # # magSections = [applyTransform(magSection, translate) for magSection in magSections] + +# # # # # # # # # # # writeSections(finalTissueSectionsPath, tissueSections) +# # # # # # # # # # # writeSections(finalMagSectionsPath, magSections) + + +############################# +# GUI to catch missed sections +############################# +if fc.getOK('Do you want to catch missed sections ?'): + + shutil.copyfile(projectPathFullRes, overlaysProjectPath) + p, loader, layerset, nLayers = fc.openTrakemProject(overlaysProjectPath) + p.saveAs(overlaysProjectPath, True) + + if os.path.isfile(finalTissueSectionsPath): + tissueSections = readSectionCoordinates(finalTissueSectionsPath) + magSections = readSectionCoordinates(finalMagSectionsPath) + else: + tissueSections = readSectionCoordinates(tissueSectionsHighResPath) + magSections = readSectionCoordinates(magSectionsHighResPath) + + counter = 0 + addSectionOverlays(p, [0], magSections, [Color.yellow], [0.5], 'allMagSectionsWith_' + str(counter) + '_manualSections') + addSectionOverlays(p, [0], tissueSections, [Color.blue], [0.5], 'allTissueSectionsWith_' + str(counter) + '_manualSections') + + noSectionAdded = False + while not noSectionAdded: + counter = counter + 1 + newSections = getPointsFromUser(p, 0, text = 'Select 4 corners in the right order of 1. the tissue 2. the mag.') + print 'newSections', newSections + if newSections != None: + newSectionsTissue = [newSections[8*k: 8*k + 4] for k in range(len(newSections)/8)] + newSectionsMag = [newSections[8*k + 4: 8*k + 8] for k in range(len(newSections)/8)] + + magSections = magSections + newSectionsMag + tissueSections = tissueSections + newSectionsTissue + + addSectionOverlays(p, [0], newSectionsMag, [Color.yellow], [0.5], 'manualMagSections_' + str(counter)) + addSectionOverlays(p, [0], newSectionsTissue, [Color.blue], [0.5], 'manualTissueSections_' + str(counter)) + # # # # # forceAlphas(layerset) + # update the starting point + p.save() + else: + noSectionAdded = True + + writeSections(finalTissueSectionsPath, tissueSections) + writeSections(finalMagSectionsPath, magSections) + + IJ.log('Writing the final image coordinates in the preImaging folder') + writeSections(finalTissueSectionsPreImagingPath, tissueSections) + writeSections(finalMagSectionsPreImagingPath, magSections) + + p.save() + fc.closeProject(p) + # disp = Display(p, layerset.getLayers().get(0)) + # disp.showFront(layerset.getLayers().get(0)) + # fc.closeProject(p) + + +####################### +# Extract images for all sections +####################### +finalExtractedSectionsFolder = fc.mkdir_p(os.path.join(workingFolder, 'finalExtractedSections')) +blendFlagPath = os.path.join(workingFolder, 'finalHighResBlendFlag') +if len(os.listdir(finalExtractedSectionsFolder)) == 0: + magSections = readSectionCoordinates(finalMagSectionsPath) + tissueSections = readSectionCoordinates(finalTissueSectionsPath) + + boxSize = longestDiagonal(magSections[0]) * 3 + IJ.log('Exporting all candidates centered on the dapiCenters') + project, loader, layerset, nLayers = fc.openTrakemProject(projectPathFullRes) + + # for idSection, dapiCenter in enumerate(dapiCentersHighRes): + for l, layer in enumerate(layerset.getLayers()): + if not os.path.isfile(blendFlagPath): + # blending: /!\ takes some time + IJ.log('Blending the high res wafer: takes some time ... channel ' + str(channels[l])) + Blending.blendLayerWise(layerset.getLayers(l, l), True, None) + + for sectionId, magSection in enumerate(magSections): + x, y = barycenter(magSection + tissueSections[sectionId]) + angle = getAngle([magSection[0][0], magSection[0][1], magSection[1][0], magSection[1][1]]) + + extractedSectionPath = os.path.join(finalExtractedSectionsFolder, 'finalSection_' + str(sectionId).zfill(4) + '_' + channels[l] + '.tif') + + roiExport = Rectangle(int(round(x - boxSize/2)), int(round(y - boxSize/2)), boxSize, boxSize) + IJ.log('x ' + str(x) + '; y ' + str(y) + ' roiexport' + str(roiExport)) + + # save the raw image + extractedSection = loader.getFlatImage(layer, roiExport , 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layer.getAll(Patch), True, Color.black, None) + rotate(extractedSection, -angle * 180/float(PI)) + + IJ.save(extractedSection, extractedSectionPath) + + with open(blendFlagPath, 'w') as f: + f.write('blended done') + project.save() + fc.closeProject(project) + +####################### +# Sanity check +####################### +tissueSections = readSectionCoordinates(finalTissueSectionsPath) +magSections = readSectionCoordinates(finalMagSectionsPath) + +# for id, tissueSection in enumerate(tissueSections): + # IJ.log(str(id) + ' - ' + str(int(getArea(tissueSection)/100))) + +for id, magSection in enumerate(magSections): + IJ.log(str(id) + ' - ' + str(int(getArea(magSection)/100))) + +# for id, [tissueSection, magSection] in enumerate(zip(tissueSections, magSections)): # distance between the barycenters of mag and tissue + # b1 = barycenter(tissueSection) + # b2 = barycenter(magSection) + # IJ.log(str(id) + ' - ' + str(int(round(Math.sqrt((b1[0] - b2[0]) * (b1[0] - b2[0]) + (b1[1] - b2[1]) * (b1[1] - b2[1])))))) \ No newline at end of file diff --git a/syringePump.py b/syringePump.py new file mode 100644 index 0000000..325ff77 --- /dev/null +++ b/syringePump.py @@ -0,0 +1,62 @@ +import serial +import time + +def cSum(l): + cS = o + for i in range(len(l)): + cS ^= l[i] + print 'csum( ', str(l), ' ) is ', cS + print formate(cS, '02x') + return cS + +s = serial.Serial('COM4') +print 's', s + +# s.close() +# s.open() +# print 's opened', s +# isopen = s.isOpen() +# print 'isopen', isopen + +### working +# s.write('ratew?' + '\r\n') +# s.write('volw?' + '\r\n') +# s.write('mode W' + '\r\n') +# s.write('mode?' + '\r\n') + +mode = 'i' +# mode = 'W' + +iVolume = 2 +iRate = 1500 + +wVolume = 1 +wRate = 150 + + +### Withdrawal +if mode == 'W': + s.write('mode W' + '\r\n') + time.sleep(0.2) + s.write('volw ' + str(wVolume) + ' ml' + '\r\n') + time.sleep(0.2) + s.write('ratew '+ str(wRate) + ' ml/h' + '\r\n') + time.sleep(0.2) +elif mode == 'i': + ### Infusion + s.write('mode i' + '\r\n') + time.sleep(0.2) + s.write('voli ' + str(iVolume) + ' ml' + '\r\n') + time.sleep(0.2) + s.write('ratei ' + str(iRate) + ' ml/h' + '\r\n') + time.sleep(0.2) + +s.write('run' + '\r\n') +# s.write('stop' + '\r\n') + +time.sleep(0.1) +# read = s.read(6) +read = s.read(3) +print 'read', len(read), read + +s.close() \ No newline at end of file diff --git a/trakemToNeuroglancer.py b/trakemToNeuroglancer.py new file mode 100644 index 0000000..50f1d90 --- /dev/null +++ b/trakemToNeuroglancer.py @@ -0,0 +1,305 @@ +# inputFolder + # EM.xml + # EMData + # LM_Channel1.xml + # finalLM_Channel1 + # LM_Channel2.xml + # finalLM_Channel2 + +import os, shutil, subprocess, re, json, pickle, time, sys +from pathlib import Path +from concurrent.futures import ThreadPoolExecutor +import xml.etree.cElementTree as ET +import numpy as np + +def getSubFoldersNames(folder): + return sorted([name for name in os.listdir(folder) + if os.path.isdir(os.path.join(folder, name))]) + +def getMaxRowMaxCol(folder): # to get the dimensions of the render project by looking at the mipmap folders + maxRow, maxCol = 0, 0 + sliceFoldersNames = getSubFoldersNames(folder) + for sliceFolderName in sliceFoldersNames: + sliceFolder = os.path.join(folder, sliceFolderName) + rowFolderNames = getSubFoldersNames(sliceFolder) + maxRow = max([*map(int, rowFolderNames)] + [maxRow]) + for rowFolderName in rowFolderNames: + rowFolder = os.path.join(sliceFolder, rowFolderName) + maxCol = max([*map(lambda x: int(os.path.splitext(x)[0]), os.listdir(rowFolder))] + [maxCol]) + return maxRow, maxCol + +def renderCatmaidBoxesCall(l): + p = subprocess.Popen([os.path.join(renderScriptsFolder, 'render_catmaid_boxes.sh'), '--baseDataUrl', url, + '--owner', owner, '--project', projectName, '--stack', stackName, '--numberOfRenderGroups', '1', + '--renderGroup', '1', '--rootDirectory', mipmapFolder, '--maxLevel', str(nResolutionLevels-1), + '--height', str(mipmapSize), '--width', str(mipmapSize), str(l)], cwd = renderScriptsFolder) # can add '--forceGeneration' + p.wait() + +def renderImportJson(path): + p = subprocess.Popen([os.path.join(renderScriptsFolder, 'import_json.sh'), '--baseDataUrl', url, '--owner', owner, '--project', projectName, '--stack', stackName, path], cwd = renderFolder) + p.wait() + +### Dataset parameters ### (# weird offset when LMResolutionLevels = 3, just take 2 instead) +# EMPixelSize, LMEMFactor, datasetName, LMResolutionLevels, EMResolutionLevels, nMipmapThreads = 8, 10, 'B6', 3, 7, 9 +EMPixelSize, LMEMFactor, datasetName, LMResolutionLevels, EMResolutionLevels, nMipmapThreads = 8, 13, 'C1', 4, 7, 9 + + +### What to run in the script ### +XML_to_JSON, JSON_to_Render, Render_to_Mipmaps, Mipmaps_to_Precomputed = 1,1,1,1 +doEM = 1 +doLM = 1 +### ### + +visualizationMode = 'online' # 'online' for gs, or 'local' with the HBP docker +chunkSize = [64, 64, 64] + +mipmapSize = 2048 # size of the Render mipmap files +# nRenderGroups = 12 # about 16GB of ram needed per renderGroup +# nMipmapThreads = 9 +nThreadsMipmapToPrecomputed = 4 +rootFolder = os.path.join(r'/home/tt/research/data/trakemToNeuroglancerProjects', '') + +datasetFolder = os.path.join(rootFolder, datasetName) +inputFolder = os.path.join(datasetFolder, 'input') + +### Manual configuration once ### +reposFolder = os.path.join(r'/home/tt/research/repos', '') +myScriptsFolder = os.path.join(reposFolder, 'puzzletomography', 'renderNG', 'myScriptsOptimized', '') +owner = 'Thomas' +projectName = 'MagC' +url = 'http://localhost:8080/render-ws/v1' # MOST PROBABLY SHOULD NOT BE MODIFIED +### Folders and paths initializations +renderFolder = os.path.join(reposFolder, 'render', '') +renderScriptsFolder = os.path.join(renderFolder, 'render-ws-java-client', 'src', 'main', 'scripts') +neuroglancerFolder = os.path.join(reposFolder, 'neuroglancer', '') +trakemToJsonPath = os.path.join(renderScriptsFolder, 'trakemToJson.sh') +shutil.copyfile(os.path.join(myScriptsFolder, 'trakemToJson.sh'), trakemToJsonPath) + +outputFolder = os.path.join(datasetFolder, 'outputFolder') +os.makedirs(outputFolder, exist_ok=True) + +try: + nSections = len(list(os.walk(os.path.join(inputFolder, 'EMData')))[0][1]) # EM gives the number of layers, not the LM where some layers can be empty +except Exception as e: + nSections = int(raw_input('How many sections are there ?')) + +for trakemProjectFileName in filter(lambda x: os.path.splitext(x)[1] == '.xml', os.listdir(inputFolder)): + print('trakemProjectFileName',trakemProjectFileName) + trakemProjectPath = os.path.join(inputFolder, trakemProjectFileName) + if ('LM' in trakemProjectFileName) and doLM: + pixelSize = EMPixelSize * LMEMFactor + channelName = os.path.splitext(trakemProjectFileName)[0].split('_')[1]# LMProject_546.xml + nResolutionLevels = LMResolutionLevels + if 'LMProject' in trakemProjectFileName: + dataFolder = os.path.join(inputFolder, 'affineCropped_' + channelName) + stackName = datasetName + '_LM_' + channelName + elif 'Segmented' in trakemProjectFileName: + dataFolder = os.path.join(inputFolder, 'segmentedTracks_' + channelName) + stackName = datasetName + '_SegmentedLM_' + channelName + else: + print('Error in reading an LM project - exit') + sys.exit() + # find nonEmptyLayers based on the .xml + nonEmptyLayers = [] + with open(trakemProjectPath, 'r') as f: + for line in f: + if 'file_path="' in line: + nonEmptyLayers.append(int(float(line.split('_')[-1].replace('.tif"\n','')))) # file_path="finalLM_546/finalLM__546_0001.tif" + nonEmptyLayers = sorted(list(set(nonEmptyLayers))) # remove duplicates + + elif ('EM' in trakemProjectFileName) and doEM: + stackName = datasetName + '_EM' + dataFolder = os.path.join(inputFolder, 'EMData') + nResolutionLevels = EMResolutionLevels + pixelSize = EMPixelSize + nonEmptyLayers = range(nSections) + channelName = '' + else: + print('Either nothing to do (check doLM and doEM), or error because the trakem xml file should contain "LM" or "EM"') + exit() + + print('\n *** \nProcessing stack', stackName, '\n', 'with pixelSize', pixelSize, '\n', 'channelName', channelName, '\n', 'nNonEmptyLayers', len(nonEmptyLayers), '\n***\n') + + precomputedFolderProject = os.path.join(outputFolder, 'precomputed', stackName) + os.makedirs(precomputedFolderProject, exist_ok=True) + + trakemDimensionsPath = os.path.join(outputFolder, stackName + '_Dimensions') + + jsonPath = os.path.join(outputFolder, stackName + '.json') + + renderProjectFolder = os.path.join(outputFolder, 'renderProject_' + stackName, '') # new folder that will contain the whole render project + os.makedirs(renderProjectFolder, exist_ok=True) # xxx should be created ? + mipmapFolder = os.path.join(renderProjectFolder, 'mipmaps', '') + + + # ################### + # ### XML to JSON ### + # ################### + if XML_to_JSON: + # If LMProject: (the LMSegmented project is not faulty, correct only the projects with the MLSTransforms) + # add a around the MLST + trakemProjectCorrectedPath = os.path.join(outputFolder, stackName + '_Corrected.xml') + with open(trakemProjectPath, 'r') as f, open(trakemProjectCorrectedPath, 'w') as g: + for line in f: + if 'LMProject' in trakemProjectFileName: + # adding the missing around the MLST + if 'ict_transform class=' in line: + g.write('\n\n') + elif '' in line: + g.write('\n\n') + # writing the corrected xml file + g.write(line) + + p = subprocess.Popen(['chmod +x ' + trakemToJsonPath], shell = True) + p.wait() + + p = subprocess.Popen([trakemToJsonPath, trakemProjectCorrectedPath, dataFolder, jsonPath], cwd = renderScriptsFolder) + p.wait() + + # Correct the json: + # - correct the relative image paths with the new data location + # - remove initial comma when first layer empty + # - if LM: reset the patch transform to identity (weirdly added by the converter) + jsonCorrectedPath = os.path.join(outputFolder, stackName + '_Corrected.json') + + with open(jsonPath, 'r') as f, open(jsonCorrectedPath, 'w') as g: + for idLine, line in enumerate(f): + # trakem2.converter adds an unnecessary comma when the first trakem layer is empty + if (idLine == 1) and (',' in line): + line = '' + # correct data location + elif 'imageUrl' in line: + if 'EM' in trakemProjectFileName: + splitted = line.split('EMData') + line = splitted[0] + 'EMData' + splitted[2] + if 'LM' in trakemProjectFileName: + splitted = line.split('file:') + pathParts = list(Path(splitted[1].replace('"\n', '')).parts) + del pathParts[-2] + newPath = os.path.join(*pathParts) + line = splitted[0] + 'file:' + newPath + '"\n' + # "imageUrl" : "file:/home/thomas/research/trakemToNeuroglancerProjects/firstMinimalTest/input/finalLM_brightfield/finalLM_brightfield/finalLM__brightfield_0001.tif" + # correct the wrongly added transform by the converter + elif ('LM' in trakemProjectFileName) and ('"dataString" : "1.0' in line): # revert to identity the affine transform that has been wrongly added by the trakem2.Converter + splitted = line.split('"dataString" : "1.0') + line = splitted[0] + '"dataString" : "1.0 0.0 0.0 1.0 0.0 0.0"\n' + # writing the corrected json + g.write(line) + + os.remove(jsonPath) + os.rename(jsonCorrectedPath, jsonPath) + + # # ###################### + # # ### JSON to Render ### + # # ###################### + if JSON_to_Render: + p = subprocess.Popen(['sudo service mongod start'], shell = True) + p.wait() + + p = subprocess.Popen([os.path.join(renderFolder, 'deploy', 'jetty_base', 'jetty_wrapper.sh') + ' start'], shell = True) + p.wait() + + # split the json into smaller ones (the 2012 tiles of A7 trigger a failure) + tilesPerJson = 100 + splitJsonPaths = [] + + with open(jsonPath, 'r') as f: + mainJson = json.load(f) + nTiles = len(mainJson) + splitJsons = [mainJson[i:min(i + tilesPerJson, nTiles)] for i in range(0, nTiles, tilesPerJson)] + for id, splitJson in enumerate(splitJsons): + splitJsonPath = os.path.join(outputFolder, stackName + '_' + str(id).zfill(4) + '_splitJson.json') + splitJsonPaths.append(splitJsonPath) + if not os.path.isfile(splitJsonPath): + with open(splitJsonPath, 'w') as g: + json.dump(splitJson, g, indent=4) + + p = subprocess.Popen([os.path.join(renderScriptsFolder, 'manage_stacks.sh'), '--baseDataUrl', url, + '--owner', owner, '--project', projectName, '--stack', stackName, + '--action', 'CREATE', '--cycleNumber', str(1), '--cycleStepNumber', str(1)], cwd = renderFolder) + p.wait() + + with ThreadPoolExecutor(max_workers=6) as executor: # import the jsons into the project + executor.map(renderImportJson, splitJsonPaths) + + p = subprocess.Popen([os.path.join(renderScriptsFolder, 'manage_stacks.sh'), '--baseDataUrl', url, + '--owner', owner, '--project', projectName, '--stack', stackName, '--action', 'SET_STATE', '--stackState', 'COMPLETE'], cwd = renderFolder) + p.wait() + + + + # ######################### + # ### Render to MipMaps ### + # ######################### + # echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf + # sudo sysctl -p + # inotifywait -m -r -e create /home/tt/research/data/trakemToNeuroglancerProjects/B6/outputFolder + + if Render_to_Mipmaps: + with ThreadPoolExecutor(max_workers=nMipmapThreads) as executor: + executor.map(renderCatmaidBoxesCall, nonEmptyLayers) + # executor.map(renderCatmaidBoxesCall, range(163,175)) + + # sudo swapoff -a && sudo swapon -a + + ############################## + ### MipMaps to Precomputed ### + ############################## + if Mipmaps_to_Precomputed: + mipmapFolderDirect = os.path.join(mipmapFolder, projectName, stackName, str(mipmapSize) + 'x' + str(mipmapSize), '') + + # create the info file + infoPath = os.path.join(precomputedFolderProject, 'info') + shutil.copyfile(os.path.join(myScriptsFolder, 'infoTemplate'), infoPath) + with open(infoPath, 'r') as f: + info = json.load(f) + + del info['scales'][nResolutionLevels:] # ok with n=4, to be checked for different resolution level numbers + + # get the dimensions of the render universe + level0MipmapFolder = os.path.join(mipmapFolderDirect, '0') + maxRow, maxCol = np.array(getMaxRowMaxCol(level0MipmapFolder)) * mipmapSize + print(maxRow, maxCol) + + for idScale, scale in enumerate(info['scales']): + resolution = pixelSize * 2**idScale # pixel size increases with powers of 2 + scale['resolution'] = [resolution, resolution, 50] + scale['chunk_sizes'] = [[chunkSize[0], chunkSize[1], min(nSections, chunkSize[2])]] # nSections-1 because issue with section number 0 ? + scale['key'] = str(resolution) + 'nm' + scale['encoding'] = 'raw' + # adding a *1.5 because I do not understand why otherwise the volume gets truncated at low resolution ... + scale['size'] = [ int((maxCol // 2**idScale)*1.5) , int((maxRow // 2**idScale)*1.5), nSections] # integers specifying the x, y, and z dimensions of the volume in voxels + info['scales'][idScale] = scale + + with open(infoPath, 'w') as f: + json.dump(info, f) + + start = time.perf_counter() + processes = [] + for threadId in range(nThreadsMipmapToPrecomputed): + p = subprocess.Popen(['python3', os.path.join(myScriptsFolder, 'mipmapToPrecomputed.py'), mipmapFolderDirect, precomputedFolderProject, str(mipmapSize), infoPath, str(nThreadsMipmapToPrecomputed), str(threadId), visualizationMode]) # nThreads, threadId tells the thread what to process (only tasks with threadId%nThreads = i) + processes.append(p) + [p.wait() for p in processes] + print('Mipmap to precomputed took: ', time.perf_counter() - start, ' seconds') + +##################### +### Visualization ### +##################### +''' +Upload to GCS + +# install gsutil +curl https://sdk.cloud.google.com | bash +Restart your shell: +exec -l $SHELL +Run gcloud init to initialize the gcloud environment: +gcloud init + +gcloud auth login +gcloud config set project affable-ring-187517 # (the project id can be found in the online gs browser) + +add 'allUsers' as a member in the IAM setting of GS + +cd outputFolder +gsutil -m cp -r -Z precomputed/ gs://xxx +''' \ No newline at end of file