diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000..a40a4c923 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,34 @@ +name: Install Environment and Run Tests + +on: + push: + branches: master + pull_request: +jobs: + install-and-test: + permissions: + contents: read + runs-on: ubuntu-22.04 + steps: + - name: Checkout the Squid repo + uses: actions/checkout@v4 + - name: Run the setup script for Ubuntu 22.04 + run: ./setup_22.04.sh + working-directory: ./software + env: + SRC_ROOT: "${{ runner.temp }}" + - name: Run the cuda setup script + run: ./setup_cuda_22.04.sh + working-directory: ./software + env: + SRC_ROOT: "${{ runner.temp }}" + # NOTE(imo): Our setup script checks out the repository, so we actually check it out twice. + # Once to get the script (using actions/checkout@v4), then again via the script. We want to do + # all of our actually testing in the one the script checks out, though, so make sure we set SRC_ROOT + # properly and then use the same working directory for running within the setup script's checkout + - name: "TEMPORARY: copy a valid config into the repo software root" + run: cp configurations/configuration_Squid+.ini . + working-directory: "${{ runner.temp }}/Squid/software" + - name: Run the tests + run: python3 -m pytest + working-directory: "${{ runner.temp }}/Squid/software" \ No newline at end of file diff --git a/.gitignore b/.gitignore index e376b7232..0494bf710 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .DS_Store *.pyc *.xml +**/.idea/ diff --git a/README.md b/README.md index 0d9e09cdd..590eb45ef 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ Applications include - BOM for the multicolor laser engine: [link](https://docs.google.com/spreadsheets/d/1hEM6PsxZPTp1LY3cpxUJOS3Q1YLQN-xniF33ZddFj9U/edit#gid=1175873468) - BOM for the control panel: [link](https://docs.google.com/spreadsheets/d/1z2HjibIG9PHffiDsbuzQXmvf2gSFMduHrXkPwDbcXRY/edit?usp=sharing) -## Eearly Results, Related Work and Possible Applications +## Early Results, Related Work and Possible Applications Refer to our website: www.squid-imaging.org ## References diff --git a/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A.cpp b/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A.cpp index 80f51950b..2e8b16272 100644 --- a/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A.cpp +++ b/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A.cpp @@ -151,6 +151,7 @@ void tmc4361A_init(TMC4361ATypeDef *tmc4361A, uint8_t channel, ConfigurationType tmc4361A->dac_idx = NO_DAC; tmc4361A->dac_fullscale_msteps = 0; tmc4361A->config = config; + tmc4361A->velocity_mode = false; tmc4361A->config->callback = NULL; tmc4361A->config->channel = channel; diff --git a/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A.h b/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A.h index 71b3ed320..e4fa92c56 100644 --- a/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A.h +++ b/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A.h @@ -59,6 +59,7 @@ typedef struct float threadPitch; uint16_t stepsPerRev; uint16_t microsteps; + bool velocity_mode; uint8_t dac_idx; uint32_t dac_fullscale_msteps; @@ -66,6 +67,10 @@ typedef struct //TClosedLoopConfig closedLoopConfig; uint8_t status; ConfigurationTypeDef *cover; + + int target_tolerance; + int pid_tolerance; + } TMC4361ATypeDef; typedef void (*tmc4361A_callback)(TMC4361ATypeDef*, ConfigState); diff --git a/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A_TMC2660_Utils.cpp b/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A_TMC2660_Utils.cpp index 2c5c49ffa..ef0adbc87 100644 --- a/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A_TMC2660_Utils.cpp +++ b/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A_TMC2660_Utils.cpp @@ -34,7 +34,7 @@ tmc4361A_stop: Halt operation by setting the target position to the current position Arguments: TMC4361ATypeDef *tmc4361A tmc4361A_isRunning: Returns true if the motor is moving - Arguments: TMC4361ATypeDef *tmc4361A + Arguments: TMC4361ATypeDef *tmc4361A, bool pid_enable tmc4361A_xmmToMicrosteps: Convert from millimeters to units microsteps for position and jerk values Arguments: TMC4361ATypeDef *tmc4361A, float mm tmc4361A_xmicrostepsTomm: Convert from microsteps to units millimeters for position and jerk values @@ -434,7 +434,7 @@ void tmc4361A_writeSPR(TMC4361ATypeDef *tmc4361A) { OPERATION: We write several bytes to the two ICs to configure their behaviors. ARGUMENTS: - TMC4361ATypeDef *tmc4361A: Pointer to a struct containing motor driver info + TMC4361ATypeDef *tmc4361A: Pointer to a struct containing motor driver info uint32_t clk_Hz_TMC4361: Clock frequency we are driving the ICs at RETURNS: None @@ -459,10 +459,10 @@ void tmc4361A_tmc2660_init(TMC4361ATypeDef *tmc4361A, uint32_t clk_Hz_TMC4361) { // SPI configuration tmc4361A_writeInt(tmc4361A, TMC4361A_SPIOUT_CONF, 0x4440108A); // cover datagram for TMC2660 - tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000900C3); - tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000A0000); - tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000C000A); - tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000E00A0); // SDOFF = 1 -> SPI mode + tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000900C3); // CHOPCONF + tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000A0000); // SMARTEN + tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000C000A); // SGCSCON + tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000E00A1); // SDOFF = 1 -> SPI mode // current scaling tmc4361A_cScaleInit(tmc4361A); // microstepping setting @@ -471,6 +471,56 @@ void tmc4361A_tmc2660_init(TMC4361ATypeDef *tmc4361A, uint32_t clk_Hz_TMC4361) { return; } +/* + ----------------------------------------------------------------------------- + DESCRIPTION: tmc4361A_tmc2660_disable_driver() shut off driver MOSFETs + + OPERATION: shut off the driver MOSFETs to make axis to disable status + + ARGUMENTS: + TMC4361ATypeDef *tmc4361A: Pointer to a struct containing motor driver info + + RETURNS: None + + LOCAL VARIABLES: None + + SHARED VARIABLES: + TMC4361ATypeDef *tmc4361A: Values are read from the struct + + GLOBAL VARIABLES: None + + DEPENDENCIES: tmc4316A.h + ----------------------------------------------------------------------------- +*/ +void tmc4361A_tmc2660_disable_driver(TMC4361ATypeDef *tmc4361A) { + tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000900C0); // CHOPCONF +} + +/* + ----------------------------------------------------------------------------- + DESCRIPTION: tmc4361A_tmc2660_enable_driver() enable driver MOSFETs to init argument value + + OPERATION: enable the driver MOSFETs to make axis to enable status + + ARGUMENTS: + TMC4361ATypeDef *tmc4361A: Pointer to a struct containing motor driver info + + RETURNS: None + + LOCAL VARIABLES: None + + SHARED VARIABLES: + TMC4361ATypeDef *tmc4361A: Values are read from the struct + + GLOBAL VARIABLES: None + + DEPENDENCIES: tmc4316A.h + ----------------------------------------------------------------------------- +*/ +void tmc4361A_tmc2660_enable_driver(TMC4361ATypeDef *tmc4361A) { + tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, 0x000900C3); // CHOPCONF +} + /* ----------------------------------------------------------------------------- DESCRIPTION: tmc4361A_tmc2660_update() update the tmc4361A and tmc2660 settings (current scaling and microstepping settings) @@ -670,6 +720,7 @@ int8_t tmc4361A_setVirtualLimit(TMC4361ATypeDef *tmc4361A, int dir, int32_t limi TMC4361ATypeDef *tmc4361A: Pointer to a struct containing motor driver info uint8_t polarity: Polarity of the switch - 0 if active low, 1 if active high uint8_t which: Which switch to use as home + uint16_t safety_margin: safty margin of home pointer around RETURNS: None @@ -685,7 +736,7 @@ int8_t tmc4361A_setVirtualLimit(TMC4361ATypeDef *tmc4361A, int dir, int32_t limi DEPENDENCIES: tmc4316A.h ----------------------------------------------------------------------------- */ -void tmc4361A_enableHomingLimit(TMC4361ATypeDef *tmc4361A, uint8_t polarity, uint8_t which) { +void tmc4361A_enableHomingLimit(TMC4361ATypeDef *tmc4361A, uint8_t polarity, uint8_t which, uint16_t safety_margin) { if (which == LEFT_SW) { if (polarity != 0) { // If the left switch is active high, HOME_REF = 0 indicates positive direction in reference to X_HOME @@ -711,7 +762,7 @@ void tmc4361A_enableHomingLimit(TMC4361ATypeDef *tmc4361A, uint8_t polarity, uin tmc4361A_setBits(tmc4361A, TMC4361A_REFERENCE_CONF, TMC4361A_STOP_RIGHT_IS_HOME_MASK); } // have a safety margin around home - tmc4361A_setBits(tmc4361A, TMC4361A_HOME_SAFETY_MARGIN, 1 << 2); + tmc4361A_setBits(tmc4361A, TMC4361A_HOME_SAFETY_MARGIN, safety_margin); return; } @@ -932,7 +983,7 @@ void tmc4361A_moveToExtreme(TMC4361ATypeDef *tmc4361A, int32_t vel, int8_t dir) ----------------------------------------------------------------------------- */ void tmc4361A_sRampInit(TMC4361ATypeDef *tmc4361A) { - tmc4361A_setBits(tmc4361A, TMC4361A_RAMPMODE, 0b110); // positioning mode, s-shaped ramp + tmc4361A_setBits(tmc4361A, TMC4361A_RAMPMODE, TMC4361A_RAMP_POSITION | TMC4361A_RAMP_SSHAPE); // positioning mode, s-shaped ramp tmc4361A_rstBits(tmc4361A, TMC4361A_GENERAL_CONF, TMC4361A_USE_ASTART_AND_VSTART_MASK); // keep astart, vstart = 0 tmc4361A_writeInt(tmc4361A, TMC4361A_BOW1, tmc4361A->rampParam[BOW1_IDX]); // determines the value which increases the absolute acceleration value. tmc4361A_writeInt(tmc4361A, TMC4361A_BOW2, tmc4361A->rampParam[BOW2_IDX]); // determines the value which decreases the absolute acceleration value. @@ -1082,8 +1133,9 @@ void tmc4361A_setMaxSpeed(TMC4361ATypeDef *tmc4361A, int32_t velocity) { ----------------------------------------------------------------------------- */ void tmc4361A_setSpeed(TMC4361ATypeDef *tmc4361A, int32_t velocity) { + tmc4361A->velocity_mode = true; tmc4361A_readInt(tmc4361A, TMC4361A_EVENTS); // clear register - tmc4361A_rstBits(tmc4361A, TMC4361A_RAMPMODE, 0b100); // no velocity ramp + tmc4361A_rstBits(tmc4361A, TMC4361A_RAMPMODE, TMC4361A_RAMP_POSITION | TMC4361A_RAMP_HOLD); // no velocity ramp tmc4361A_writeInt(tmc4361A, TMC4361A_VMAX, velocity); return; } @@ -1210,12 +1262,16 @@ int8_t tmc4361A_setMaxAcceleration(TMC4361ATypeDef *tmc4361A, uint32_t accelerat ----------------------------------------------------------------------------- */ int8_t tmc4361A_moveTo(TMC4361ATypeDef *tmc4361A, int32_t x_pos) { - // ensure we are in positioning mode with S-shaped ramp - tmc4361A_sRampInit(tmc4361A); - if (x_pos < tmc4361A->xmin || x_pos > tmc4361A->xmax) { return ERR_OUT_OF_RANGE; } + + if(tmc4361A->velocity_mode) { + // ensure we are in positioning mode with S-shaped ramp + tmc4361A_sRampInit(tmc4361A); + tmc4361A->velocity_mode = false; + } + // Read events before and after to clear the register tmc4361A_readInt(tmc4361A, TMC4361A_EVENTS); tmc4361A_writeInt(tmc4361A, TMC4361A_X_TARGET, x_pos); @@ -1408,6 +1464,7 @@ int8_t tmc4361A_setCurrentPosition(TMC4361ATypeDef *tmc4361A, int32_t position) tmc4361A_writeInt(tmc4361A, TMC4361A_XACTUAL, position); tmc4361A_moveTo(tmc4361A, position); + tmc4361A->velocity_mode = true; return err; } @@ -1447,6 +1504,7 @@ void tmc4361A_stop(TMC4361ATypeDef *tmc4361A) { ARGUMENTS: TMC4361ATypeDef *tmc4361A: Pointer to a struct containing motor driver info + bool pid_enable: true: if this aix enable pid control, else: false RETURNS: bool moving: true if moving, false otherwise @@ -1464,20 +1522,25 @@ void tmc4361A_stop(TMC4361ATypeDef *tmc4361A) { DEPENDENCIES: tmc4316A.h ----------------------------------------------------------------------------- */ -bool tmc4361A_isRunning(TMC4361ATypeDef *tmc4361A) { +bool tmc4361A_isRunning(TMC4361ATypeDef *tmc4361A, bool pid_enable) { int32_t stat_reg = tmc4361A_readInt(tmc4361A, TMC4361A_STATUS); - // We aren't running if target is reached OR (velocity = 0 and acceleration == 0) - if ((stat_reg & TMC4361A_TARGET_REACHED_MASK) != 0) { - return true; + if (pid_enable) { + int32_t pid_err = abs(tmc4361A_readInt(tmc4361A, TMC4361A_PID_E_RD)); + // We aren't running if target is reached OR (velocity = 0 and acceleration == 0) + if (((stat_reg & TMC4361A_TARGET_REACHED_MASK) == 1) && ((stat_reg & (TMC4361A_VEL_STATE_F_MASK | TMC4361A_RAMP_STATE_F_MASK))==0) && (pid_err < tmc4361A->target_tolerance)) { + return false; + } } - stat_reg &= (TMC4361A_VEL_STATE_F_MASK | TMC4361A_RAMP_STATE_F_MASK); - if (stat_reg == 0) { - return true; + else { + // We aren't running if target is reached OR (velocity = 0 and acceleration == 0) + if (((stat_reg & TMC4361A_TARGET_REACHED_MASK) == 1) && ((stat_reg & (TMC4361A_VEL_STATE_F_MASK | TMC4361A_RAMP_STATE_F_MASK))==0)) { + return false; + } } - // Otherwise, return false - return false; + // Otherwise, return true + return true; } /* @@ -1776,8 +1839,8 @@ void tmc4361A_init_PID(TMC4361ATypeDef *tmc4361A, uint32_t target_tolerance, uin // Write the PID parameters tmc4361A_writeInt(tmc4361A, TMC4361A_PID_P_WR, pid_p & TMC4361A_PID_P_MASK); - tmc4361A_writeInt(tmc4361A, TMC4361A_PID_I_WR, pid_d & TMC4361A_PID_I_MASK); - tmc4361A_writeInt(tmc4361A, TMC4361A_PID_D_WR, pid_p & TMC4361A_PID_D_MASK); + tmc4361A_writeInt(tmc4361A, TMC4361A_PID_I_WR, pid_i & TMC4361A_PID_I_MASK); + tmc4361A_writeInt(tmc4361A, TMC4361A_PID_D_WR, pid_d & TMC4361A_PID_D_MASK); tmc4361A_writeInt(tmc4361A, TMC4361A_PID_DV_CLIP_WR, pid_dclip & TMC4361A_PID_DV_CLIP_MASK); @@ -1785,6 +1848,9 @@ void tmc4361A_init_PID(TMC4361ATypeDef *tmc4361A, uint32_t target_tolerance, uin datagram = ((pid_iclip << TMC4361A_PID_I_CLIP_SHIFT) & TMC4361A_PID_I_CLIP_MASK) + ((pid_d_clkdiv << TMC4361A_PID_D_CLKDIV_SHIFT) & TMC4361A_PID_D_CLKDIV_MASK); tmc4361A_writeInt(tmc4361A, TMC4361A_PID_I_CLIP_WR, datagram); + // save paramters + tmc4361A->target_tolerance = target_tolerance; + tmc4361A->pid_tolerance = pid_tolerance; return; } @@ -2002,3 +2068,61 @@ int8_t tmc4361A_move_no_stick(TMC4361ATypeDef *tmc4361A, int32_t x_pos, int32_t return tmc4361A_moveTo_no_stick(tmc4361A, target, backup_amount, err_thresh, timeout_ms); } + +/* + ----------------------------------------------------------------------------- + DESCRIPTION: tmc4361A_config_init_stallGuard() initializes stall prevention on the TMC4316A and TMC2660 + + OPERATION: First, check if arugments are within bounds. If the argument exceed the bounds, constrain them before writing the values, and note that this function failed. + We then write the sensitivitity to the TMC2660. + + + ARGUMENTS: + TMC4361ATypeDef *tmc4361A: Pointer to a struct containing motor driver info + int8_t sensitivity: Value from -64 to +63 indicating sensitivity to stall condition. Larger values are less sensitive. + bool filter_en: Set true to use filter (more accurate, slower). + uint32_t vstall_lim: 24-bit value. The internal ramp velocity is set immediately to 0 whenever a stall is detected and |VACTUAL| >VSTALL_LIMIT. + + RETURNS: + bool success: return true if there were no errors + + INPUTS / OUTPUTS: Sends signals over SPI + + LOCAL VARIABLES: None + + SHARED VARIABLES: + TMC4361ATypeDef *tmc4361A: Values are read from the struct + + GLOBAL VARIABLES: None + + DEPENDENCIES: tmc4316A.h + ----------------------------------------------------------------------------- +*/ +int16_t tmc4361A_config_init_stallGuard(TMC4361ATypeDef *tmc4361A, int8_t sensitivity, bool filter_en, uint32_t vstall_lim){ + // First, ensure values are within limits + bool success = true; + if((sensitivity > 63) || (sensitivity < -64) || (vstall_lim >= (1<<24))){ + success = false; + } + sensitivity = constrain(sensitivity, -64, 63); + vstall_lim = constrain(vstall_lim, 0, ((1<<24)-1)); + // Mask the high bit + sensitivity = sensitivity & 0x7F; + // Build the datagram + uint32_t datagram = 0; + datagram = filter_en ? SFILT : 0; + datagram |= SGCSCONF; + datagram |= (sensitivity << 8); + datagram |= tmc4361A->cscaleParam[CSCALE_IDX]; + // Next, write to the TMC2660 - write to the "cover_0.3 *10^6 /(200*256)low" register + tmc4361A_writeInt(tmc4361A, TMC4361A_COVER_LOW_WR, datagram); + // Enable stall detection on the TMC4316A + // set vstall limit + tmc4361A_writeInt(tmc4361A, TMC4361A_VSTALL_LIMIT_WR, vstall_lim); + // enable stop on stall + tmc4361A_setBits(tmc4361A, TMC4361A_REFERENCE_CONF, TMC4361A_STOP_ON_STALL_MASK); + // disable drive after stall + tmc4361A_rstBits(tmc4361A, TMC4361A_REFERENCE_CONF, TMC4361A_DRV_AFTER_STALL_MASK); + + return success; +} diff --git a/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A_TMC2660_Utils.h b/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A_TMC2660_Utils.h index 68d8ecec2..c435a363f 100644 --- a/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A_TMC2660_Utils.h +++ b/firmware/octopi_firmware_v2/main_controller_teensy41/TMC4361A_TMC2660_Utils.h @@ -18,6 +18,8 @@ // Functions for user-facing API void tmc4361A_tmc2660_config(TMC4361ATypeDef *tmc4361A, float tmc2660_cscale, float tmc4361a_hold_scale_val, float tmc4361a_drv2_scale_val, float tmc4361a_drv1_scale_val, float tmc4361a_boost_scale_val, float pitch_mm, uint16_t steps_per_rev, uint16_t microsteps, uint8_t dac_idx = NO_DAC, uint32_t dac_fullscale_msteps = 0); void tmc4361A_tmc2660_init(TMC4361ATypeDef *tmc4361A, uint32_t clk_Hz_TMC4361); +void tmc4361A_tmc2660_enable_driver(TMC4361ATypeDef *tmc4361A); +void tmc4361A_tmc2660_disable_driver(TMC4361ATypeDef *tmc4361A); void tmc4361A_tmc2660_update(TMC4361ATypeDef *tmc4361A); void tmc4361A_setMaxSpeed(TMC4361ATypeDef *tmc4361A, int32_t velocity); void tmc4361A_setSpeed(TMC4361ATypeDef *tmc4361A, int32_t velocity); @@ -41,7 +43,7 @@ int32_t tmc4361A_currentPosition(TMC4361ATypeDef *tmc4361A); int32_t tmc4361A_targetPosition(TMC4361ATypeDef *tmc4361A); int8_t tmc4361A_setCurrentPosition(TMC4361ATypeDef *tmc4361A, int32_t position); void tmc4361A_stop(TMC4361ATypeDef *tmc4361A); -bool tmc4361A_isRunning(TMC4361ATypeDef *tmc4361A); +bool tmc4361A_isRunning(TMC4361ATypeDef *tmc4361A, bool pid_enable); int32_t tmc4361A_xmmToMicrosteps(TMC4361ATypeDef *tmc4361A, float mm); float tmc4361A_xmicrostepsTomm(TMC4361ATypeDef *tmc4361A, int32_t microsteps); int32_t tmc4361A_vmmToMicrosteps(TMC4361ATypeDef *tmc4361A, float mm); @@ -49,7 +51,7 @@ float tmc4361A_vmicrostepsTomm(TMC4361ATypeDef *tmc4361A, int32_t microsteps); int32_t tmc4361A_ammToMicrosteps(TMC4361ATypeDef *tmc4361A, float mm); float tmc4361A_amicrostepsTomm(TMC4361ATypeDef *tmc4361A, int32_t microsteps); void tmc4361A_enableLimitSwitch(TMC4361ATypeDef *tmc4361A, uint8_t polarity, uint8_t which, uint8_t flipped); -void tmc4361A_enableHomingLimit(TMC4361ATypeDef *tmc4361A, uint8_t polarity, uint8_t which); +void tmc4361A_enableHomingLimit(TMC4361ATypeDef *tmc4361A, uint8_t polarity, uint8_t which, uint16_t safety_margin); uint8_t tmc4361A_readLimitSwitches(TMC4361ATypeDef *tmc4361A); void tmc4361A_setHome(TMC4361ATypeDef *tmc4361A); void tmc4361A_moveToExtreme(TMC4361ATypeDef *tmc4361A, int32_t vel, int8_t dir); @@ -63,6 +65,7 @@ void tmc4361A_setVirtualStop(TMC4361ATypeDef *tmc4361A, uint8_t which, int32_t t int8_t tmc4361A_setVirtualLimit(TMC4361ATypeDef *tmc4361A, int dir, int32_t limit); void tmc4361A_disableVirtualLimitSwitch(TMC4361ATypeDef *tmc4361A, int dir); void tmc4361A_enableVirtualLimitSwitch(TMC4361ATypeDef *tmc4361A, int dir); +int16_t tmc4361A_config_init_stallGuard(TMC4361ATypeDef *tmc4361A, int8_t sensitivity, bool filter_en, uint32_t vstall_lim); // The following does not need to be accessed by the end user // Default motor settings - can override using tmc4361A_setPitch(), tmc4361A_setMicrosteps(), tmc4361A_setSPR() diff --git a/firmware/octopi_firmware_v2/main_controller_teensy41/main_controller_teensy41.ino b/firmware/octopi_firmware_v2/main_controller_teensy41/main_controller_teensy41.ino index 21a05ca00..31d698d51 100644 --- a/firmware/octopi_firmware_v2/main_controller_teensy41/main_controller_teensy41.ino +++ b/firmware/octopi_firmware_v2/main_controller_teensy41/main_controller_teensy41.ino @@ -50,6 +50,8 @@ static const int SET_ILLUMINATION = 12; static const int SET_ILLUMINATION_LED_MATRIX = 13; static const int ACK_JOYSTICK_BUTTON_PRESSED = 14; static const int ANALOG_WRITE_ONBOARD_DAC = 15; +static const int SET_DAC80508_REFDIV_GAIN = 16; +static const int SET_ILLUMINATION_INTENSITY_FACTOR = 17; static const int SET_LIM_SWITCH_POLARITY = 20; static const int CONFIGURE_STEPPER_DRIVER = 21; static const int SET_MAX_VELOCITY_ACCELERATION = 22; @@ -58,8 +60,11 @@ static const int SET_OFFSET_VELOCITY = 24; static const int CONFIGURE_STAGE_PID = 25; static const int ENABLE_STAGE_PID = 26; static const int DISABLE_STAGE_PID = 27; +static const int SET_HOME_SAFETY_MERGIN = 28; +static const int SET_PID_ARGUMENTS = 29; static const int SEND_HARDWARE_TRIGGER = 30; static const int SET_STROBE_DELAY = 31; +static const int SET_AXIS_DISABLE_ENABLE = 32; static const int SET_PIN_LEVEL = 41; static const int INITIALIZE = 254; static const int RESET = 255; @@ -104,6 +109,7 @@ static const int LASER_488nm = 4; // to rename static const int LASER_561nm = 22; // to rename static const int LASER_638nm = 3; // to rename static const int LASER_730nm = 23; // to rename +static const int LASER_INTERLOCK = 1; // PWM6 2 // PWM7 1 // PWM8 0 @@ -113,13 +119,13 @@ static const int LASER_730nm = 23; // to rename //static const int num_digital_pins = 10; // pin 7,8 (PWM 10, 11) may be used for UART, pin 24,25 (PWM 15, 16) may be used for UART static const int num_digital_pins = 6; -static const int digitial_output_pins[num_digital_pins] = {2,1,6,9,10,15}; // PWM 6-7, 9, 12-14 +static const int digitial_output_pins[num_digital_pins] = {2, 1, 6, 9, 10, 15}; // PWM 6-7, 9, 12-14 -// camera trigger -static const int camera_trigger_pins[] = {29,30,31,32,16,28}; // trigger 1-6 +// camera trigger +static const int camera_trigger_pins[] = {29, 30, 31, 32, 16, 28}; // trigger 1-6 // motors -const uint8_t pin_TMC4361_CS[4] = {41,36,35,34}; +const uint8_t pin_TMC4361_CS[4] = {41, 36, 35, 34}; const uint8_t pin_TMC4361_CLK = 37; // DAC @@ -135,13 +141,13 @@ const int pin_PG = 0; /************************************ camera trigger and strobe ************************************/ /***************************************************************************************************/ static const int TRIGGER_PULSE_LENGTH_us = 50; -bool trigger_output_level[6] = {HIGH,HIGH,HIGH,HIGH,HIGH,HIGH}; -bool control_strobe[6] = {false,false,false,false,false,false}; -bool strobe_output_level[6] = {LOW,LOW,LOW,LOW,LOW,LOW}; -bool strobe_on[6] = {false,false,false,false,false,false}; -int strobe_delay[6] = {0,0,0,0,0,0}; -long illumination_on_time[6] = {0,0,0,0,0,0}; -long timestamp_trigger_rising_edge[6] = {0,0,0,0,0,0}; +bool trigger_output_level[6] = {HIGH, HIGH, HIGH, HIGH, HIGH, HIGH}; +bool control_strobe[6] = {false, false, false, false, false, false}; +bool strobe_output_level[6] = {LOW, LOW, LOW, LOW, LOW, LOW}; +bool strobe_on[6] = {false, false, false, false, false, false}; +unsigned long strobe_delay[6] = {0, 0, 0, 0, 0, 0}; +long illumination_on_time[6] = {0, 0, 0, 0, 0, 0}; +long timestamp_trigger_rising_edge[6] = {0, 0, 0, 0, 0, 0}; IntervalTimer strobeTimer; static const int strobeTimer_interval_us = 100; @@ -152,36 +158,42 @@ const uint8_t DAC8050x_DAC_ADDR = 0x08; const uint8_t DAC8050x_GAIN_ADDR = 0x04; const uint8_t DAC8050x_CONFIG_ADDR = 0x03; -void set_DAC8050x_gain() +void set_DAC8050x_gain(uint8_t div, uint8_t gains) { uint16_t value = 0; - value = (0x00 << 8) + 0x80; // REFDIV-E = 0 (no div), BUFF0-GAIN = 0 (no gain) for channel 0-6, 2 for channel 7 + value = (div << 8) + gains; SPI.beginTransaction(SPISettings(1000000, MSBFIRST, SPI_MODE2)); - digitalWrite(DAC8050x_CS_pin,LOW); + digitalWrite(DAC8050x_CS_pin, LOW); SPI.transfer(DAC8050x_GAIN_ADDR); SPI.transfer16(value); - digitalWrite(DAC8050x_CS_pin,HIGH); + digitalWrite(DAC8050x_CS_pin, HIGH); SPI.endTransaction(); } +// REFDIV-E = 0 (no div), BUFF7-GAIN = 0 (no gain) 1 for channel 0-6, 2 for channel 7 +void set_DAC8050x_default_gain() +{ + set_DAC8050x_gain(0x00, 0x80); +} + void set_DAC8050x_config() { uint16_t value = 0; SPI.beginTransaction(SPISettings(1000000, MSBFIRST, SPI_MODE2)); - digitalWrite(DAC8050x_CS_pin,LOW); + digitalWrite(DAC8050x_CS_pin, LOW); SPI.transfer(DAC8050x_CONFIG_ADDR); SPI.transfer16(value); - digitalWrite(DAC8050x_CS_pin,HIGH); + digitalWrite(DAC8050x_CS_pin, HIGH); SPI.endTransaction(); } void set_DAC8050x_output(int channel, uint16_t value) { SPI.beginTransaction(SPISettings(1000000, MSBFIRST, SPI_MODE2)); - digitalWrite(DAC8050x_CS_pin,LOW); - SPI.transfer(DAC8050x_DAC_ADDR+channel); + digitalWrite(DAC8050x_CS_pin, LOW); + SPI.transfer(DAC8050x_DAC_ADDR + channel); SPI.transfer16(value); - digitalWrite(DAC8050x_CS_pin,HIGH); + digitalWrite(DAC8050x_CS_pin, HIGH); SPI.endTransaction(); } @@ -189,8 +201,8 @@ void set_DAC8050x_output(int channel, uint16_t value) /******************************************* steppers **********************************************/ /***************************************************************************************************/ const uint32_t clk_Hz_TMC4361 = 16000000; -const uint8_t lft_sw_pol[4] = {0,0,0,1}; -const uint8_t rht_sw_pol[4] = {0,0,0,1}; +const uint8_t lft_sw_pol[4] = {0, 0, 0, 1}; +const uint8_t rht_sw_pol[4] = {0, 0, 0, 1}; const uint8_t TMC4361_homing_sw[4] = {LEFT_SW, LEFT_SW, RGHT_SW, LEFT_SW}; const int32_t vslow = 0x04FFFC00; @@ -237,23 +249,34 @@ bool homing_direction_Z; elapsedMicros us_since_x_home_found; elapsedMicros us_since_y_home_found; elapsedMicros us_since_z_home_found; -/* to do: move the movement direction sign from configuration.txt (python) to the firmware (with - * setPinsInverted() so that homing_direction_X, homing_direction_Y, homing_direction_Z will no - * longer be needed. This way the home switches can act as limit switches - right now because - * homing_direction_ needs be set by the computer, before they're set, the home switches cannot be - * used as limit switches. Alternatively, add homing_direction_set variables. - */ - -long X_POS_LIMIT = X_POS_LIMIT_MM*steps_per_mm_X; -long X_NEG_LIMIT = X_NEG_LIMIT_MM*steps_per_mm_X; -long Y_POS_LIMIT = Y_POS_LIMIT_MM*steps_per_mm_Y; -long Y_NEG_LIMIT = Y_NEG_LIMIT_MM*steps_per_mm_Y; -long Z_POS_LIMIT = Z_POS_LIMIT_MM*steps_per_mm_Z; -long Z_NEG_LIMIT = Z_NEG_LIMIT_MM*steps_per_mm_Z; +/* to do: move the movement direction sign from configuration.txt (python) to the firmware (with + setPinsInverted() so that homing_direction_X, homing_direction_Y, homing_direction_Z will no + longer be needed. This way the home switches can act as limit switches - right now because + homing_direction_ needs be set by the computer, before they're set, the home switches cannot be + used as limit switches. Alternatively, add homing_direction_set variables. +*/ + +long X_POS_LIMIT = X_POS_LIMIT_MM * steps_per_mm_X; +long X_NEG_LIMIT = X_NEG_LIMIT_MM * steps_per_mm_X; +long Y_POS_LIMIT = Y_POS_LIMIT_MM * steps_per_mm_Y; +long Y_NEG_LIMIT = Y_NEG_LIMIT_MM * steps_per_mm_Y; +long Z_POS_LIMIT = Z_POS_LIMIT_MM * steps_per_mm_Z; +long Z_NEG_LIMIT = Z_NEG_LIMIT_MM * steps_per_mm_Z; // PID bool stage_PID_enabled[N_MOTOR]; +// PID arguments +typedef struct pid_arguments { + uint16_t p; + uint8_t i; + uint8_t d; +} PID_ARGUMENTS; +PID_ARGUMENTS axis_pid_arg[N_MOTOR]; + +// home safety margin +uint16_t home_safety_margin[4] = {4, 4, 4, 4}; + /***************************************************************************************************/ /******************************************** timing ***********************************************/ /***************************************************************************************************/ @@ -265,6 +288,14 @@ volatile bool flag_send_pos_update = false; static const int interval_send_pos_update = 10000; // in us elapsedMicros us_since_last_pos_update; +static const int interval_check_position = 10000; // in us +elapsedMicros us_since_last_check_position; + +static const int interval_send_joystick_update = 30000; // in us +elapsedMicros us_since_last_joystick_update; + +static const int interval_check_limit = 20000; // in us +elapsedMicros us_since_last_check_limit; /***************************************************************************************************/ /******************************************* joystick **********************************************/ /***************************************************************************************************/ @@ -274,7 +305,7 @@ bool flag_read_joystick = false; // joystick xy int16_t joystick_delta_x = 0; -int16_t joystick_delta_y = 0; +int16_t joystick_delta_y = 0; // joystick button bool joystick_button_pressed = false; @@ -289,10 +320,10 @@ uint8_t btns; void onJoystickPacketReceived(const uint8_t* buffer, size_t size) { - - if(size != JOYSTICK_MSG_LENGTH) + + if (size != JOYSTICK_MSG_LENGTH) { - if(DEBUG_MODE) + if (DEBUG_MODE) Serial.println("! wrong number of bytes received !"); return; } @@ -300,29 +331,29 @@ void onJoystickPacketReceived(const uint8_t* buffer, size_t size) // focuswheel_pos = uint32_t(buffer[0])*16777216 + uint32_t(buffer[1])*65536 + uint32_t(buffer[2])*256 + uint32_t(buffer[3]); // focusPosition = focuswheel_pos; - if(first_packet_from_joystick_panel) + if (first_packet_from_joystick_panel) { - focuswheel_pos = int32_t(uint32_t(buffer[0])*16777216 + uint32_t(buffer[1])*65536 + uint32_t(buffer[2])*256 + uint32_t(buffer[3])); + focuswheel_pos = int32_t(uint32_t(buffer[0]) * 16777216 + uint32_t(buffer[1]) * 65536 + uint32_t(buffer[2]) * 256 + uint32_t(buffer[3])); first_packet_from_joystick_panel = false; } else { - focusPosition = focusPosition + ( int32_t(uint32_t(buffer[0])*16777216 + uint32_t(buffer[1])*65536 + uint32_t(buffer[2])*256 + uint32_t(buffer[3]))-focuswheel_pos ); - focuswheel_pos = int32_t(uint32_t(buffer[0])*16777216 + uint32_t(buffer[1])*65536 + uint32_t(buffer[2])*256 + uint32_t(buffer[3])); + focusPosition = focusPosition + ( int32_t(uint32_t(buffer[0]) * 16777216 + uint32_t(buffer[1]) * 65536 + uint32_t(buffer[2]) * 256 + uint32_t(buffer[3])) - focuswheel_pos ); + focuswheel_pos = int32_t(uint32_t(buffer[0]) * 16777216 + uint32_t(buffer[1]) * 65536 + uint32_t(buffer[2]) * 256 + uint32_t(buffer[3])); } - joystick_delta_x = JOYSTICK_SIGN_X*int16_t( uint16_t(buffer[4])*256 + uint16_t(buffer[5]) ); - joystick_delta_y = JOYSTICK_SIGN_Y*int16_t( uint16_t(buffer[6])*256 + uint16_t(buffer[7]) ); + joystick_delta_x = JOYSTICK_SIGN_X * int16_t( uint16_t(buffer[4]) * 256 + uint16_t(buffer[5]) ); + joystick_delta_y = JOYSTICK_SIGN_Y * int16_t( uint16_t(buffer[6]) * 256 + uint16_t(buffer[7]) ); btns = buffer[8]; // temporary /* - if(btns & 0x01) - { + if(btns & 0x01) + { joystick_button_pressed = true; joystick_button_pressed_timestamp = millis(); // to add: ACK for the joystick panel - } + } */ flag_read_joystick = true; @@ -334,6 +365,7 @@ void onJoystickPacketReceived(const uint8_t* buffer, size_t size) /***************************************************************************************************/ int illumination_source = 0; uint16_t illumination_intensity = 65535; +float illumination_intensity_factor = 0.6; uint8_t led_matrix_r = 0; uint8_t led_matrix_g = 0; uint8_t led_matrix_b = 0; @@ -380,28 +412,28 @@ void turn_on_LED_matrix_pattern(CRGB * matrix, int pattern, uint8_t led_matrix_r void turn_on_illumination() { illumination_is_on = true; - switch(illumination_source) + switch (illumination_source) { case ILLUMINATION_SOURCE_LED_ARRAY_FULL: - turn_on_LED_matrix_pattern(matrix,ILLUMINATION_SOURCE_LED_ARRAY_FULL,led_matrix_r,led_matrix_g,led_matrix_b); + turn_on_LED_matrix_pattern(matrix, ILLUMINATION_SOURCE_LED_ARRAY_FULL, led_matrix_r, led_matrix_g, led_matrix_b); break; case ILLUMINATION_SOURCE_LED_ARRAY_LEFT_HALF: - turn_on_LED_matrix_pattern(matrix,ILLUMINATION_SOURCE_LED_ARRAY_LEFT_HALF,led_matrix_r,led_matrix_g,led_matrix_b); + turn_on_LED_matrix_pattern(matrix, ILLUMINATION_SOURCE_LED_ARRAY_LEFT_HALF, led_matrix_r, led_matrix_g, led_matrix_b); break; case ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_HALF: - turn_on_LED_matrix_pattern(matrix,ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_HALF,led_matrix_r,led_matrix_g,led_matrix_b); + turn_on_LED_matrix_pattern(matrix, ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_HALF, led_matrix_r, led_matrix_g, led_matrix_b); break; case ILLUMINATION_SOURCE_LED_ARRAY_LEFTB_RIGHTR: - turn_on_LED_matrix_pattern(matrix,ILLUMINATION_SOURCE_LED_ARRAY_LEFTB_RIGHTR,led_matrix_r,led_matrix_g,led_matrix_b); + turn_on_LED_matrix_pattern(matrix, ILLUMINATION_SOURCE_LED_ARRAY_LEFTB_RIGHTR, led_matrix_r, led_matrix_g, led_matrix_b); break; case ILLUMINATION_SOURCE_LED_ARRAY_LOW_NA: - turn_on_LED_matrix_pattern(matrix,ILLUMINATION_SOURCE_LED_ARRAY_LOW_NA,led_matrix_r,led_matrix_g,led_matrix_b); + turn_on_LED_matrix_pattern(matrix, ILLUMINATION_SOURCE_LED_ARRAY_LOW_NA, led_matrix_r, led_matrix_g, led_matrix_b); break; case ILLUMINATION_SOURCE_LED_ARRAY_LEFT_DOT: - turn_on_LED_matrix_pattern(matrix,ILLUMINATION_SOURCE_LED_ARRAY_LEFT_DOT,led_matrix_r,led_matrix_g,led_matrix_b); + turn_on_LED_matrix_pattern(matrix, ILLUMINATION_SOURCE_LED_ARRAY_LEFT_DOT, led_matrix_r, led_matrix_g, led_matrix_b); break; case ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_DOT: - turn_on_LED_matrix_pattern(matrix,ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_DOT,led_matrix_r,led_matrix_g,led_matrix_b); + turn_on_LED_matrix_pattern(matrix, ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_DOT, led_matrix_r, led_matrix_g, led_matrix_b); break; case ILLUMINATION_SOURCE_LED_ARRAY_TOP_HALF: turn_on_LED_matrix_pattern(matrix,ILLUMINATION_SOURCE_LED_ARRAY_TOP_HALF,led_matrix_r,led_matrix_g,led_matrix_b); @@ -412,19 +444,24 @@ void turn_on_illumination() case ILLUMINATION_SOURCE_LED_EXTERNAL_FET: break; case ILLUMINATION_SOURCE_405NM: - digitalWrite(LASER_405nm,HIGH); + if(digitalRead(LASER_INTERLOCK) == LOW) + digitalWrite(LASER_405nm, HIGH); break; case ILLUMINATION_SOURCE_488NM: - digitalWrite(LASER_488nm,HIGH); + if(digitalRead(LASER_INTERLOCK) == LOW) + digitalWrite(LASER_488nm, HIGH); break; case ILLUMINATION_SOURCE_638NM: - digitalWrite(LASER_638nm,HIGH); + if(digitalRead(LASER_INTERLOCK) == LOW) + digitalWrite(LASER_638nm, HIGH); break; case ILLUMINATION_SOURCE_561NM: - digitalWrite(LASER_561nm,HIGH); + if(digitalRead(LASER_INTERLOCK) == LOW) + digitalWrite(LASER_561nm, HIGH); break; case ILLUMINATION_SOURCE_730NM: - digitalWrite(LASER_730nm,HIGH); + if(digitalRead(LASER_INTERLOCK) == LOW) + digitalWrite(LASER_730nm, HIGH); break; } } @@ -463,19 +500,19 @@ void turn_off_illumination() case ILLUMINATION_SOURCE_LED_EXTERNAL_FET: break; case ILLUMINATION_SOURCE_405NM: - digitalWrite(LASER_405nm,LOW); + digitalWrite(LASER_405nm, LOW); break; case ILLUMINATION_SOURCE_488NM: - digitalWrite(LASER_488nm,LOW); + digitalWrite(LASER_488nm, LOW); break; case ILLUMINATION_SOURCE_638NM: - digitalWrite(LASER_638nm,LOW); + digitalWrite(LASER_638nm, LOW); break; case ILLUMINATION_SOURCE_561NM: - digitalWrite(LASER_561nm,LOW); + digitalWrite(LASER_561nm, LOW); break; case ILLUMINATION_SOURCE_730NM: - digitalWrite(LASER_730nm,LOW); + digitalWrite(LASER_730nm, LOW); break; } illumination_is_on = false; @@ -484,26 +521,26 @@ void turn_off_illumination() void set_illumination(int source, uint16_t intensity) { illumination_source = source; - illumination_intensity = intensity*0.6; - switch(source) + illumination_intensity = intensity * illumination_intensity_factor; + switch (source) { case ILLUMINATION_SOURCE_405NM: - set_DAC8050x_output(0,illumination_intensity); + set_DAC8050x_output(0, illumination_intensity); break; case ILLUMINATION_SOURCE_488NM: - set_DAC8050x_output(1,illumination_intensity); + set_DAC8050x_output(1, illumination_intensity); break; case ILLUMINATION_SOURCE_638NM: - set_DAC8050x_output(3,illumination_intensity); + set_DAC8050x_output(3, illumination_intensity); break; case ILLUMINATION_SOURCE_561NM: - set_DAC8050x_output(2,illumination_intensity); + set_DAC8050x_output(2, illumination_intensity); break; case ILLUMINATION_SOURCE_730NM: - set_DAC8050x_output(4,illumination_intensity); + set_DAC8050x_output(4, illumination_intensity); break; - } - if(illumination_is_on) + } + if (illumination_is_on) turn_on_illumination(); //update the illumination } @@ -513,21 +550,21 @@ void set_illumination_led_matrix(int source, uint8_t r, uint8_t g, uint8_t b) led_matrix_r = r; led_matrix_g = g; led_matrix_b = b; - if(illumination_is_on) + if (illumination_is_on) turn_on_illumination(); //update the illumination } void ISR_strobeTimer() { - for(int camera_channel=0;camera_channel<6;camera_channel++) + for (int camera_channel = 0; camera_channel < 6; camera_channel++) { // strobe pulse - if(control_strobe[camera_channel]) + if (control_strobe[camera_channel]) { - if(illumination_on_time[camera_channel] <= 30000) + if (illumination_on_time[camera_channel] <= 30000) { // if the illumination on time is smaller than 30 ms, use delayMicroseconds to control the pulse length to avoid pulse length jitter - if( ((micros()-timestamp_trigger_rising_edge[camera_channel])>=strobe_delay[camera_channel]) && strobe_output_level[camera_channel]==LOW ) + if ( ((micros() - timestamp_trigger_rising_edge[camera_channel]) >= strobe_delay[camera_channel]) && strobe_output_level[camera_channel] == LOW ) { turn_on_illumination(); delayMicroseconds(illumination_on_time[camera_channel]); @@ -538,13 +575,13 @@ void ISR_strobeTimer() else { // start the strobe - if( ((micros()-timestamp_trigger_rising_edge[camera_channel])>=strobe_delay[camera_channel]) && strobe_output_level[camera_channel]==LOW ) + if ( ((micros() - timestamp_trigger_rising_edge[camera_channel]) >= strobe_delay[camera_channel]) && strobe_output_level[camera_channel] == LOW ) { turn_on_illumination(); strobe_output_level[camera_channel] = HIGH; } // end the strobe - if(((micros()-timestamp_trigger_rising_edge[camera_channel])>=strobe_delay[camera_channel]+illumination_on_time[camera_channel]) && strobe_output_level[camera_channel]==HIGH) + if (((micros() - timestamp_trigger_rising_edge[camera_channel]) >= strobe_delay[camera_channel] + illumination_on_time[camera_channel]) && strobe_output_level[camera_channel] == HIGH) { turn_off_illumination(); strobe_output_level[camera_channel] = LOW; @@ -559,9 +596,9 @@ void ISR_strobeTimer() /********************************************* setup ***********************************************/ /***************************************************************************************************/ void setup() { - + // Initialize Native USB port - SerialUSB.begin(2000000); + SerialUSB.begin(2000000); delay(500); SerialUSB.setTimeout(200); //while(!SerialUSB); // Wait until connection is established @@ -573,15 +610,18 @@ void setup() { joystick_packetSerial.setPacketHandler(&onJoystickPacketReceived); // power good pin - pinMode(pin_PG,INPUT_PULLUP); + pinMode(pin_PG, INPUT_PULLUP); + + // laser safety interlock + pinMode(LASER_INTERLOCK, INPUT_PULLUP); // camera trigger pins - for(int i=0;i<6;i++) + for (int i = 0; i < 6; i++) { pinMode(camera_trigger_pins[i], OUTPUT); digitalWrite(camera_trigger_pins[i], HIGH); } - + // enable pins pinMode(LASER_405nm, OUTPUT); digitalWrite(LASER_405nm, LOW); @@ -598,55 +638,60 @@ void setup() { pinMode(LASER_730nm, OUTPUT); digitalWrite(LASER_730nm, LOW); - for(int i=0;i 16 (out of 31) - + tmc4361A_tmc2660_config(&tmc4361[x], (X_MOTOR_RMS_CURRENT_mA / 1000)*R_sense_xy / 0.2298, X_MOTOR_I_HOLD, 1, 1, 1, SCREW_PITCH_X_MM, FULLSTEPS_PER_REV_X, MICROSTEPPING_X); + tmc4361A_tmc2660_config(&tmc4361[y], (Y_MOTOR_RMS_CURRENT_mA / 1000)*R_sense_xy / 0.2298, Y_MOTOR_I_HOLD, 1, 1, 1, SCREW_PITCH_Y_MM, FULLSTEPS_PER_REV_Y, MICROSTEPPING_Y); + tmc4361A_tmc2660_config(&tmc4361[z], (Z_MOTOR_RMS_CURRENT_mA / 1000)*R_sense_z / 0.2298, Z_MOTOR_I_HOLD, 1, 1, 1, SCREW_PITCH_Z_MM, FULLSTEPS_PER_REV_Z, MICROSTEPPING_Z); // need to make current scaling on TMC2660 is > 16 (out of 31) + // SPI SPI.begin(); delayMicroseconds(5000); // initilize TMC4361 and TMC2660 - turn on functionality - for (int i = 0; i < N_MOTOR; i++) + for (int i = 0; i < N_MOTOR; i++) tmc4361A_tmc2660_init(&tmc4361[i], clk_Hz_TMC4361); // set up ICs with SPI control and other parameters // enable limit switch reading @@ -685,8 +730,8 @@ void setup() { max_acceleration_usteps[z] = tmc4361A_ammToMicrosteps(&tmc4361[z], MAX_ACCELERATION_Z_mm); max_velocity_usteps[x] = tmc4361A_vmmToMicrosteps(&tmc4361[x], MAX_VELOCITY_X_mm); max_velocity_usteps[y] = tmc4361A_vmmToMicrosteps(&tmc4361[y], MAX_VELOCITY_Y_mm); - max_velocity_usteps[z] = tmc4361A_vmmToMicrosteps(&tmc4361[z], MAX_VELOCITY_Z_mm); - for (int i = 0; i < N_MOTOR; i++) + max_velocity_usteps[z] = tmc4361A_vmmToMicrosteps(&tmc4361[z], MAX_VELOCITY_Z_mm); + for (int i = 0; i < N_MOTOR; i++) { // initialize ramp with default values tmc4361A_setMaxSpeed(&tmc4361[i], max_velocity_usteps[i]); @@ -695,31 +740,31 @@ void setup() { tmc4361[i].rampParam[DFINAL_IDX] = 0; tmc4361A_sRampInit(&tmc4361[i]); } - + /* - // homing - temporary - tmc4361A_enableHomingLimit(&tmc4361[x], lft_sw_pol[x], TMC4361_homing_sw[x]); - tmc4361A_moveToExtreme(&tmc4361[x], vslow*2, RGHT_DIR); - tmc4361A_moveToExtreme(&tmc4361[x], vslow*2, LEFT_DIR); - tmc4361A_setHome(&tmc4361[x]); - - tmc4361A_enableHomingLimit(&tmc4361[y], lft_sw_pol[y], TMC4361_homing_sw[y]); - tmc4361A_moveToExtreme(&tmc4361[y], vslow*2, RGHT_DIR); - tmc4361A_moveToExtreme(&tmc4361[y], vslow*2, LEFT_DIR); - tmc4361A_setHome(&tmc4361[y]); + // homing - temporary + tmc4361A_enableHomingLimit(&tmc4361[x], lft_sw_pol[x], TMC4361_homing_sw[x]); + tmc4361A_moveToExtreme(&tmc4361[x], vslow*2, RGHT_DIR); + tmc4361A_moveToExtreme(&tmc4361[x], vslow*2, LEFT_DIR); + tmc4361A_setHome(&tmc4361[x]); + + tmc4361A_enableHomingLimit(&tmc4361[y], lft_sw_pol[y], TMC4361_homing_sw[y]); + tmc4361A_moveToExtreme(&tmc4361[y], vslow*2, RGHT_DIR); + tmc4361A_moveToExtreme(&tmc4361[y], vslow*2, LEFT_DIR); + tmc4361A_setHome(&tmc4361[y]); */ // homing switch settings - tmc4361A_enableHomingLimit(&tmc4361[x], lft_sw_pol[x], TMC4361_homing_sw[x]); - tmc4361A_enableHomingLimit(&tmc4361[y], lft_sw_pol[y], TMC4361_homing_sw[y]); - tmc4361A_enableHomingLimit(&tmc4361[z], rht_sw_pol[z], TMC4361_homing_sw[z]); - + tmc4361A_enableHomingLimit(&tmc4361[x], lft_sw_pol[x], TMC4361_homing_sw[x], home_safety_margin[x]); + tmc4361A_enableHomingLimit(&tmc4361[y], lft_sw_pol[y], TMC4361_homing_sw[y], home_safety_margin[y]); + tmc4361A_enableHomingLimit(&tmc4361[z], rht_sw_pol[z], TMC4361_homing_sw[z], home_safety_margin[z]); + /********************************************************************************************************* - ***************************************** TMC4361A + TMC2660 end **************************************** + ***************************************** TMC4361A + TMC2660 end **************************************** *********************************************************************************************************/ // DAC init set_DAC8050x_config(); - set_DAC8050x_gain(); + set_DAC8050x_default_gain(); // led matrix FastLED.addLeds(matrix, NUM_LEDS); // 1 MHz clock rate @@ -733,8 +778,17 @@ void setup() { offset_velocity_y = 0; // strobe timer - strobeTimer.begin(ISR_strobeTimer,strobeTimer_interval_us); + strobeTimer.begin(ISR_strobeTimer, strobeTimer_interval_us); + + // motor stall prevention + tmc4361A_config_init_stallGuard(&tmc4361[x], 12, true, 1); + tmc4361A_config_init_stallGuard(&tmc4361[y], 12, true, 1); + // initialize timer value + us_since_last_pos_update = 5000; + us_since_last_check_position = 3000; + us_since_last_joystick_update = 3000; + us_since_last_check_limit = 2000; } /***************************************************************************************************/ @@ -747,20 +801,20 @@ void loop() { joystick_packetSerial.update(); // read one meesage from the buffer - while (SerialUSB.available()) - { + while (SerialUSB.available()) + { buffer_rx[buffer_rx_ptr] = SerialUSB.read(); buffer_rx_ptr = buffer_rx_ptr + 1; - if (buffer_rx_ptr == CMD_LENGTH) + if (buffer_rx_ptr == CMD_LENGTH) { buffer_rx_ptr = 0; cmd_id = buffer_rx[0]; - uint8_t checksum = crc8ccitt(buffer_rx,CMD_LENGTH-1); - if(checksum != buffer_rx[CMD_LENGTH-1]) + uint8_t checksum = crc8ccitt(buffer_rx, CMD_LENGTH - 1); + if (checksum != buffer_rx[CMD_LENGTH - 1]) { checksum_error = true; // empty the serial buffer because byte-level out-of-sync can also cause this error - while (SerialUSB.available()) + while (SerialUSB.available()) SerialUSB.read(); return; } @@ -768,687 +822,765 @@ void loop() { { checksum_error = false; } - - switch(buffer_rx[1]) + + switch (buffer_rx[1]) { case MOVE_X: - { - long relative_position = int32_t(uint32_t(buffer_rx[2])*16777216 + uint32_t(buffer_rx[3])*65536 + uint32_t(buffer_rx[4])*256 + uint32_t(buffer_rx[5])); - long current_position = tmc4361A_currentPosition(&tmc4361[x]); - X_direction = sgn(relative_position); - X_commanded_target_position = ( relative_position>0?min(current_position+relative_position,X_POS_LIMIT):max(current_position+relative_position,X_NEG_LIMIT) ); - if( tmc4361A_moveTo(&tmc4361[x], X_commanded_target_position) == 0) { - X_commanded_movement_in_progress = true; - mcu_cmd_execution_in_progress = true; + long relative_position = int32_t(uint32_t(buffer_rx[2]) * 16777216 + uint32_t(buffer_rx[3]) * 65536 + uint32_t(buffer_rx[4]) * 256 + uint32_t(buffer_rx[5])); + long current_position = tmc4361A_currentPosition(&tmc4361[x]); + X_direction = sgn(relative_position); + X_commanded_target_position = ( relative_position > 0 ? min(current_position + relative_position, X_POS_LIMIT) : max(current_position + relative_position, X_NEG_LIMIT) ); + if ( tmc4361A_moveTo(&tmc4361[x], X_commanded_target_position) == 0) + { + X_commanded_movement_in_progress = true; + mcu_cmd_execution_in_progress = true; + } + break; } - break; - } case MOVE_Y: - { - long relative_position = int32_t(uint32_t(buffer_rx[2])*16777216 + uint32_t(buffer_rx[3])*65536 + uint32_t(buffer_rx[4])*256 + uint32_t(buffer_rx[5])); - long current_position = tmc4361A_currentPosition(&tmc4361[y]); - Y_direction = sgn(relative_position); - Y_commanded_target_position = ( relative_position>0?min(current_position+relative_position,Y_POS_LIMIT):max(current_position+relative_position,Y_NEG_LIMIT) ); - if( tmc4361A_moveTo(&tmc4361[y], Y_commanded_target_position) == 0) { - Y_commanded_movement_in_progress = true; - mcu_cmd_execution_in_progress = true; + long relative_position = int32_t(uint32_t(buffer_rx[2]) * 16777216 + uint32_t(buffer_rx[3]) * 65536 + uint32_t(buffer_rx[4]) * 256 + uint32_t(buffer_rx[5])); + long current_position = tmc4361A_currentPosition(&tmc4361[y]); + Y_direction = sgn(relative_position); + Y_commanded_target_position = ( relative_position > 0 ? min(current_position + relative_position, Y_POS_LIMIT) : max(current_position + relative_position, Y_NEG_LIMIT) ); + if ( tmc4361A_moveTo(&tmc4361[y], Y_commanded_target_position) == 0) + { + Y_commanded_movement_in_progress = true; + mcu_cmd_execution_in_progress = true; + } + break; } - break; - } case MOVE_Z: - { - long relative_position = int32_t(uint32_t(buffer_rx[2])*16777216 + uint32_t(buffer_rx[3])*65536 + uint32_t(buffer_rx[4])*256 + uint32_t(buffer_rx[5])); - long current_position = tmc4361A_currentPosition(&tmc4361[z]); - Z_direction = sgn(relative_position); - Z_commanded_target_position = ( relative_position>0?min(current_position+relative_position,Z_POS_LIMIT):max(current_position+relative_position,Z_NEG_LIMIT) ); - focusPosition = Z_commanded_target_position; - if( tmc4361A_moveTo(&tmc4361[z], Z_commanded_target_position) == 0) { - Z_commanded_movement_in_progress = true; - mcu_cmd_execution_in_progress = true; + long relative_position = int32_t(uint32_t(buffer_rx[2]) * 16777216 + uint32_t(buffer_rx[3]) * 65536 + uint32_t(buffer_rx[4]) * 256 + uint32_t(buffer_rx[5])); + long current_position = tmc4361A_currentPosition(&tmc4361[z]); + Z_direction = sgn(relative_position); + Z_commanded_target_position = ( relative_position > 0 ? min(current_position + relative_position, Z_POS_LIMIT) : max(current_position + relative_position, Z_NEG_LIMIT) ); + focusPosition = Z_commanded_target_position; + if ( tmc4361A_moveTo(&tmc4361[z], Z_commanded_target_position) == 0) + { + Z_commanded_movement_in_progress = true; + mcu_cmd_execution_in_progress = true; + } + break; } - break; - } case MOVETO_X: - { - long absolute_position = int32_t(uint32_t(buffer_rx[2])*16777216 + uint32_t(buffer_rx[3])*65536 + uint32_t(buffer_rx[4])*256 + uint32_t(buffer_rx[5])); - X_direction = sgn(absolute_position-tmc4361A_currentPosition(&tmc4361[x])); - X_commanded_target_position = absolute_position; - if(tmc4361A_moveTo(&tmc4361[x], X_commanded_target_position) == 0) { - X_commanded_movement_in_progress = true; - mcu_cmd_execution_in_progress = true; - } - break; - } - case MOVETO_Y: - { - long absolute_position = int32_t(uint32_t(buffer_rx[2])*16777216 + uint32_t(buffer_rx[3])*65536 + uint32_t(buffer_rx[4])*256 + uint32_t(buffer_rx[5])); - Y_direction = sgn(absolute_position-tmc4361A_currentPosition(&tmc4361[y])); - Y_commanded_target_position = absolute_position; - if(tmc4361A_moveTo(&tmc4361[y], Y_commanded_target_position)==0) - { - Y_commanded_movement_in_progress = true; - mcu_cmd_execution_in_progress = true; - } - break; - } - case MOVETO_Z: - { - long absolute_position = int32_t(uint32_t(buffer_rx[2])*16777216 + uint32_t(buffer_rx[3])*65536 + uint32_t(buffer_rx[4])*256 + uint32_t(buffer_rx[5])); - Z_direction = sgn(absolute_position-tmc4361A_currentPosition(&tmc4361[z])); - Z_commanded_target_position = absolute_position; - if(tmc4361A_moveTo(&tmc4361[z], Z_commanded_target_position)==0) - { - focusPosition = absolute_position; - Z_commanded_movement_in_progress = true; - mcu_cmd_execution_in_progress = true; - } - break; - } - case SET_LIM: - { - switch(buffer_rx[2]) - { - case LIM_CODE_X_POSITIVE: - { - X_POS_LIMIT = int32_t(uint32_t(buffer_rx[3])*16777216 + uint32_t(buffer_rx[4])*65536 + uint32_t(buffer_rx[5])*256 + uint32_t(buffer_rx[6])); - tmc4361A_setVirtualLimit(&tmc4361[x],1,X_POS_LIMIT); - tmc4361A_enableVirtualLimitSwitch(&tmc4361[x],1); - break; - } - case LIM_CODE_X_NEGATIVE: + long absolute_position = int32_t(uint32_t(buffer_rx[2]) * 16777216 + uint32_t(buffer_rx[3]) * 65536 + uint32_t(buffer_rx[4]) * 256 + uint32_t(buffer_rx[5])); + X_direction = sgn(absolute_position - tmc4361A_currentPosition(&tmc4361[x])); + X_commanded_target_position = absolute_position; + if (tmc4361A_moveTo(&tmc4361[x], X_commanded_target_position) == 0) { - X_NEG_LIMIT = int32_t(uint32_t(buffer_rx[3])*16777216 + uint32_t(buffer_rx[4])*65536 + uint32_t(buffer_rx[5])*256 + uint32_t(buffer_rx[6])); - tmc4361A_setVirtualLimit(&tmc4361[x],-1,X_NEG_LIMIT); - tmc4361A_enableVirtualLimitSwitch(&tmc4361[x],-1); - break; - } - case LIM_CODE_Y_POSITIVE: - { - Y_POS_LIMIT = int32_t(uint32_t(buffer_rx[3])*16777216 + uint32_t(buffer_rx[4])*65536 + uint32_t(buffer_rx[5])*256 + uint32_t(buffer_rx[6])); - tmc4361A_setVirtualLimit(&tmc4361[y],1,Y_POS_LIMIT); - tmc4361A_enableVirtualLimitSwitch(&tmc4361[y],1); - break; - } - case LIM_CODE_Y_NEGATIVE: - { - Y_NEG_LIMIT = int32_t(uint32_t(buffer_rx[3])*16777216 + uint32_t(buffer_rx[4])*65536 + uint32_t(buffer_rx[5])*256 + uint32_t(buffer_rx[6])); - tmc4361A_setVirtualLimit(&tmc4361[y],-1,Y_NEG_LIMIT); - tmc4361A_enableVirtualLimitSwitch(&tmc4361[y],-1); - break; - } - case LIM_CODE_Z_POSITIVE: - { - Z_POS_LIMIT = int32_t(uint32_t(buffer_rx[3])*16777216 + uint32_t(buffer_rx[4])*65536 + uint32_t(buffer_rx[5])*256 + uint32_t(buffer_rx[6])); - tmc4361A_setVirtualLimit(&tmc4361[z],1,Z_POS_LIMIT); - tmc4361A_enableVirtualLimitSwitch(&tmc4361[z],1); - break; - } - case LIM_CODE_Z_NEGATIVE: - { - Z_NEG_LIMIT = int32_t(uint32_t(buffer_rx[3])*16777216 + uint32_t(buffer_rx[4])*65536 + uint32_t(buffer_rx[5])*256 + uint32_t(buffer_rx[6])); - tmc4361A_setVirtualLimit(&tmc4361[z],-1,Z_NEG_LIMIT); - tmc4361A_enableVirtualLimitSwitch(&tmc4361[z],-1); - break; + X_commanded_movement_in_progress = true; + mcu_cmd_execution_in_progress = true; } + break; } - break; - } - case SET_LIM_SWITCH_POLARITY: - { - switch(buffer_rx[2]) + case MOVETO_Y: { - case AXIS_X: + long absolute_position = int32_t(uint32_t(buffer_rx[2]) * 16777216 + uint32_t(buffer_rx[3]) * 65536 + uint32_t(buffer_rx[4]) * 256 + uint32_t(buffer_rx[5])); + Y_direction = sgn(absolute_position - tmc4361A_currentPosition(&tmc4361[y])); + Y_commanded_target_position = absolute_position; + if (tmc4361A_moveTo(&tmc4361[y], Y_commanded_target_position) == 0) { - if(buffer_rx[3]!=DISABLED) - { - LIM_SWITCH_X_ACTIVE_LOW = (buffer_rx[3]==ACTIVE_LOW); - } - break; - } - case AXIS_Y: - { - if(buffer_rx[3]!=DISABLED) - { - LIM_SWITCH_Y_ACTIVE_LOW = (buffer_rx[3]==ACTIVE_LOW); - } - break; - } - case AXIS_Z: - { - if(buffer_rx[3]!=DISABLED) - { - LIM_SWITCH_Z_ACTIVE_LOW = (buffer_rx[3]==ACTIVE_LOW); - } - break; + Y_commanded_movement_in_progress = true; + mcu_cmd_execution_in_progress = true; } + break; } - break; - } - case CONFIGURE_STEPPER_DRIVER: - { - switch(buffer_rx[2]) + case MOVETO_Z: { - case AXIS_X: - { - int microstepping_setting = buffer_rx[3]; - if(microstepping_setting>128) - microstepping_setting = 256; - MICROSTEPPING_X = microstepping_setting==0?1:microstepping_setting; - steps_per_mm_X = FULLSTEPS_PER_REV_X*MICROSTEPPING_X/SCREW_PITCH_X_MM; - X_MOTOR_RMS_CURRENT_mA = uint16_t(buffer_rx[4])*256+uint16_t(buffer_rx[5]); - X_MOTOR_I_HOLD = float(buffer_rx[6])/255; - tmc4361A_tmc2660_config(&tmc4361[x], (X_MOTOR_RMS_CURRENT_mA/1000.0)*R_sense_xy/0.2298, X_MOTOR_I_HOLD, 1, 1, 1, SCREW_PITCH_X_MM, FULLSTEPS_PER_REV_X, MICROSTEPPING_X); - tmc4361A_tmc2660_update(&tmc4361[x]); - break; - } - case AXIS_Y: + long absolute_position = int32_t(uint32_t(buffer_rx[2]) * 16777216 + uint32_t(buffer_rx[3]) * 65536 + uint32_t(buffer_rx[4]) * 256 + uint32_t(buffer_rx[5])); + Z_direction = sgn(absolute_position - tmc4361A_currentPosition(&tmc4361[z])); + Z_commanded_target_position = absolute_position; + if (tmc4361A_moveTo(&tmc4361[z], Z_commanded_target_position) == 0) { - int microstepping_setting = buffer_rx[3]; - if(microstepping_setting>128) - microstepping_setting = 256; - MICROSTEPPING_Y = microstepping_setting==0?1:microstepping_setting; - steps_per_mm_Y = FULLSTEPS_PER_REV_Y*MICROSTEPPING_Y/SCREW_PITCH_Y_MM; - Y_MOTOR_RMS_CURRENT_mA = uint16_t(buffer_rx[4])*256+uint16_t(buffer_rx[5]); - Y_MOTOR_I_HOLD = float(buffer_rx[6])/255; - tmc4361A_tmc2660_config(&tmc4361[y], (Y_MOTOR_RMS_CURRENT_mA/1000.0)*R_sense_xy/0.2298, Y_MOTOR_I_HOLD, 1, 1, 1, SCREW_PITCH_Y_MM, FULLSTEPS_PER_REV_Y, MICROSTEPPING_Y); - tmc4361A_tmc2660_update(&tmc4361[y]); - break; - } - case AXIS_Z: - { - int microstepping_setting = buffer_rx[3]; - if(microstepping_setting>128) - microstepping_setting = 256; - MICROSTEPPING_Z = microstepping_setting==0?1:microstepping_setting; - steps_per_mm_Z = FULLSTEPS_PER_REV_Z*MICROSTEPPING_Z/SCREW_PITCH_Z_MM; - Z_MOTOR_RMS_CURRENT_mA = uint16_t(buffer_rx[4])*256+uint16_t(buffer_rx[5]); - Z_MOTOR_I_HOLD = float(buffer_rx[6])/255; - tmc4361A_tmc2660_config(&tmc4361[z], (Z_MOTOR_RMS_CURRENT_mA/1000.0)*R_sense_z/0.2298, Z_MOTOR_I_HOLD, 1, 1, 1, SCREW_PITCH_Z_MM, FULLSTEPS_PER_REV_Z, MICROSTEPPING_Z); - tmc4361A_tmc2660_update(&tmc4361[z]); - break; + focusPosition = absolute_position; + Z_commanded_movement_in_progress = true; + mcu_cmd_execution_in_progress = true; } + break; } - break; - } - case SET_MAX_VELOCITY_ACCELERATION: - { - switch(buffer_rx[2]) + case SET_LIM: { - case AXIS_X: - { - MAX_VELOCITY_X_mm = float(uint16_t(buffer_rx[3])*256+uint16_t(buffer_rx[4]))/100; - MAX_ACCELERATION_X_mm = float(uint16_t(buffer_rx[5])*256+uint16_t(buffer_rx[6]))/10; - tmc4361A_setMaxSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], MAX_VELOCITY_X_mm) ); - tmc4361A_setMaxAcceleration(&tmc4361[x], tmc4361A_ammToMicrosteps( &tmc4361[x], MAX_ACCELERATION_X_mm) ); - break; - } - case AXIS_Y: - { - MAX_VELOCITY_Y_mm = float(uint16_t(buffer_rx[3])*256+uint16_t(buffer_rx[4]))/100; - MAX_ACCELERATION_Y_mm = float(uint16_t(buffer_rx[5])*256+uint16_t(buffer_rx[6]))/10; - tmc4361A_setMaxSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], MAX_VELOCITY_Y_mm) ); - tmc4361A_setMaxAcceleration(&tmc4361[y], tmc4361A_ammToMicrosteps( &tmc4361[y], MAX_ACCELERATION_Y_mm) ); - break; - } - case AXIS_Z: + switch (buffer_rx[2]) { - MAX_VELOCITY_Z_mm = float(uint16_t(buffer_rx[3])*256+uint16_t(buffer_rx[4]))/100; - MAX_ACCELERATION_Z_mm = float(uint16_t(buffer_rx[5])*256+uint16_t(buffer_rx[6]))/10; - tmc4361A_setMaxSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], MAX_VELOCITY_Z_mm) ); - tmc4361A_setMaxAcceleration(&tmc4361[z], tmc4361A_ammToMicrosteps( &tmc4361[z], MAX_ACCELERATION_Z_mm) ); - break; + case LIM_CODE_X_POSITIVE: + { + X_POS_LIMIT = int32_t(uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6])); + tmc4361A_setVirtualLimit(&tmc4361[x], 1, X_POS_LIMIT); + tmc4361A_enableVirtualLimitSwitch(&tmc4361[x], 1); + break; + } + case LIM_CODE_X_NEGATIVE: + { + X_NEG_LIMIT = int32_t(uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6])); + tmc4361A_setVirtualLimit(&tmc4361[x], -1, X_NEG_LIMIT); + tmc4361A_enableVirtualLimitSwitch(&tmc4361[x], -1); + break; + } + case LIM_CODE_Y_POSITIVE: + { + Y_POS_LIMIT = int32_t(uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6])); + tmc4361A_setVirtualLimit(&tmc4361[y], 1, Y_POS_LIMIT); + tmc4361A_enableVirtualLimitSwitch(&tmc4361[y], 1); + break; + } + case LIM_CODE_Y_NEGATIVE: + { + Y_NEG_LIMIT = int32_t(uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6])); + tmc4361A_setVirtualLimit(&tmc4361[y], -1, Y_NEG_LIMIT); + tmc4361A_enableVirtualLimitSwitch(&tmc4361[y], -1); + break; + } + case LIM_CODE_Z_POSITIVE: + { + Z_POS_LIMIT = int32_t(uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6])); + tmc4361A_setVirtualLimit(&tmc4361[z], 1, Z_POS_LIMIT); + tmc4361A_enableVirtualLimitSwitch(&tmc4361[z], 1); + break; + } + case LIM_CODE_Z_NEGATIVE: + { + Z_NEG_LIMIT = int32_t(uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6])); + tmc4361A_setVirtualLimit(&tmc4361[z], -1, Z_NEG_LIMIT); + tmc4361A_enableVirtualLimitSwitch(&tmc4361[z], -1); + break; + } } + break; } - break; - } - case SET_LEAD_SCREW_PITCH: - { - switch(buffer_rx[2]) + case SET_LIM_SWITCH_POLARITY: { - case AXIS_X: + switch (buffer_rx[2]) { - SCREW_PITCH_X_MM = float(uint16_t(buffer_rx[3])*256+uint16_t(buffer_rx[4]))/1000; - steps_per_mm_X = FULLSTEPS_PER_REV_X*MICROSTEPPING_X/SCREW_PITCH_X_MM; - break; - } - case AXIS_Y: - { - SCREW_PITCH_Y_MM = float(uint16_t(buffer_rx[3])*256+uint16_t(buffer_rx[4]))/1000; - steps_per_mm_Y = FULLSTEPS_PER_REV_Y*MICROSTEPPING_Y/SCREW_PITCH_Y_MM; - break; + case AXIS_X: + { + if (buffer_rx[3] != DISABLED) + { + LIM_SWITCH_X_ACTIVE_LOW = (buffer_rx[3] == ACTIVE_LOW); + } + break; + } + case AXIS_Y: + { + if (buffer_rx[3] != DISABLED) + { + LIM_SWITCH_Y_ACTIVE_LOW = (buffer_rx[3] == ACTIVE_LOW); + } + break; + } + case AXIS_Z: + { + if (buffer_rx[3] != DISABLED) + { + LIM_SWITCH_Z_ACTIVE_LOW = (buffer_rx[3] == ACTIVE_LOW); + } + break; + } } - case AXIS_Z: + break; + } + case SET_HOME_SAFETY_MERGIN: + { + switch (buffer_rx[2]) { - SCREW_PITCH_Z_MM = float(uint16_t(buffer_rx[3])*256+uint16_t(buffer_rx[4]))/1000; - steps_per_mm_Z = FULLSTEPS_PER_REV_Z*MICROSTEPPING_Z/SCREW_PITCH_Z_MM; - break; + case AXIS_X: + { + uint16_t margin = (uint16_t(buffer_rx[3]) << 8) + uint16_t(buffer_rx[4]); + float home_safety_margin_mm = float(margin) / 1000.0; + home_safety_margin[x] = tmc4361A_xmmToMicrosteps(&tmc4361[x], home_safety_margin_mm); + tmc4361A_enableHomingLimit(&tmc4361[x], lft_sw_pol[x], TMC4361_homing_sw[x], home_safety_margin[x]); + break; + } + case AXIS_Y: + { + uint16_t margin = (uint16_t(buffer_rx[3]) << 8) + uint16_t(buffer_rx[4]); + float home_safety_margin_mm = float(margin) / 1000.0; + home_safety_margin[y] = tmc4361A_xmmToMicrosteps(&tmc4361[y], home_safety_margin_mm); + tmc4361A_enableHomingLimit(&tmc4361[y], lft_sw_pol[y], TMC4361_homing_sw[y], home_safety_margin[y]); + break; + } + case AXIS_Z: + { + uint16_t margin = (uint16_t(buffer_rx[3]) << 8) + uint16_t(buffer_rx[4]); + float home_safety_margin_mm = float(margin) / 1000.0; + home_safety_margin[z] = tmc4361A_xmmToMicrosteps(&tmc4361[z], home_safety_margin_mm); + tmc4361A_enableHomingLimit(&tmc4361[z], lft_sw_pol[z], TMC4361_homing_sw[z], home_safety_margin[z]); + break; + } } - } - break; - } - case HOME_OR_ZERO: - { - // zeroing - if(buffer_rx[3]==HOME_OR_ZERO_ZERO) + break; + } + case SET_PID_ARGUMENTS: + { + int axis = buffer_rx[2]; + uint16_t p = (uint16_t(buffer_rx[3]) << 8) + uint16_t(buffer_rx[4]); + uint8_t i = uint8_t(buffer_rx[5]); + uint8_t d = uint8_t(buffer_rx[6]); + + axis_pid_arg[axis].p = p; + axis_pid_arg[axis].i = i; + axis_pid_arg[axis].d = d; + + break; + } + case CONFIGURE_STEPPER_DRIVER: { - switch(buffer_rx[2]) + switch (buffer_rx[2]) { case AXIS_X: - tmc4361A_setCurrentPosition(&tmc4361[x], 0); - X_pos = 0; - break; + { + int microstepping_setting = buffer_rx[3]; + if (microstepping_setting > 128) + microstepping_setting = 256; + MICROSTEPPING_X = microstepping_setting == 0 ? 1 : microstepping_setting; + steps_per_mm_X = FULLSTEPS_PER_REV_X * MICROSTEPPING_X / SCREW_PITCH_X_MM; + X_MOTOR_RMS_CURRENT_mA = uint16_t(buffer_rx[4]) * 256 + uint16_t(buffer_rx[5]); + X_MOTOR_I_HOLD = float(buffer_rx[6]) / 255; + tmc4361A_tmc2660_config(&tmc4361[x], (X_MOTOR_RMS_CURRENT_mA / 1000.0)*R_sense_xy / 0.2298, X_MOTOR_I_HOLD, 1, 1, 1, SCREW_PITCH_X_MM, FULLSTEPS_PER_REV_X, MICROSTEPPING_X); + tmc4361A_tmc2660_update(&tmc4361[x]); + break; + } case AXIS_Y: - tmc4361A_setCurrentPosition(&tmc4361[y], 0); - Y_pos = 0; - break; + { + int microstepping_setting = buffer_rx[3]; + if (microstepping_setting > 128) + microstepping_setting = 256; + MICROSTEPPING_Y = microstepping_setting == 0 ? 1 : microstepping_setting; + steps_per_mm_Y = FULLSTEPS_PER_REV_Y * MICROSTEPPING_Y / SCREW_PITCH_Y_MM; + Y_MOTOR_RMS_CURRENT_mA = uint16_t(buffer_rx[4]) * 256 + uint16_t(buffer_rx[5]); + Y_MOTOR_I_HOLD = float(buffer_rx[6]) / 255; + tmc4361A_tmc2660_config(&tmc4361[y], (Y_MOTOR_RMS_CURRENT_mA / 1000.0)*R_sense_xy / 0.2298, Y_MOTOR_I_HOLD, 1, 1, 1, SCREW_PITCH_Y_MM, FULLSTEPS_PER_REV_Y, MICROSTEPPING_Y); + tmc4361A_tmc2660_update(&tmc4361[y]); + break; + } case AXIS_Z: - tmc4361A_setCurrentPosition(&tmc4361[z], 0); - Z_pos = 0; - focusPosition = 0; - break; + { + int microstepping_setting = buffer_rx[3]; + if (microstepping_setting > 128) + microstepping_setting = 256; + MICROSTEPPING_Z = microstepping_setting == 0 ? 1 : microstepping_setting; + steps_per_mm_Z = FULLSTEPS_PER_REV_Z * MICROSTEPPING_Z / SCREW_PITCH_Z_MM; + Z_MOTOR_RMS_CURRENT_mA = uint16_t(buffer_rx[4]) * 256 + uint16_t(buffer_rx[5]); + Z_MOTOR_I_HOLD = float(buffer_rx[6]) / 255; + tmc4361A_tmc2660_config(&tmc4361[z], (Z_MOTOR_RMS_CURRENT_mA / 1000.0)*R_sense_z / 0.2298, Z_MOTOR_I_HOLD, 1, 1, 1, SCREW_PITCH_Z_MM, FULLSTEPS_PER_REV_Z, MICROSTEPPING_Z); + tmc4361A_tmc2660_update(&tmc4361[z]); + break; + } } + break; } - // atomic operation, no need to change mcu_cmd_execution_in_progress flag - // homing - else if(buffer_rx[3]==HOME_NEGATIVE || buffer_rx[3]==HOME_POSITIVE) + case SET_MAX_VELOCITY_ACCELERATION: { - switch(buffer_rx[2]) + switch (buffer_rx[2]) { case AXIS_X: - if (stage_PID_enabled[AXIS_X] == 1) - tmc4361A_set_PID(&tmc4361[AXIS_X], PID_DISABLE); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[x],-1); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[x],1); - homing_direction_X = buffer_rx[3]; - home_X_found = false; - if(homing_direction_X==HOME_NEGATIVE) // use the left limit switch for homing { - if( tmc4361A_readLimitSwitches(&tmc4361[x]) == LEFT_SW ) - { - // get out of the hysteresis zone - is_preparing_for_homing_X = true; - tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); - } - else - { - is_homing_X = true; - tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); - } + MAX_VELOCITY_X_mm = float(uint16_t(buffer_rx[3]) * 256 + uint16_t(buffer_rx[4])) / 100; + MAX_ACCELERATION_X_mm = float(uint16_t(buffer_rx[5]) * 256 + uint16_t(buffer_rx[6])) / 10; + tmc4361A_setMaxSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], MAX_VELOCITY_X_mm) ); + tmc4361A_setMaxAcceleration(&tmc4361[x], tmc4361A_ammToMicrosteps( &tmc4361[x], MAX_ACCELERATION_X_mm) ); + break; } - else // use the right limit switch for homing + case AXIS_Y: { - if( tmc4361A_readLimitSwitches(&tmc4361[x]) == RGHT_SW ) - { - // get out of the hysteresis zone - is_preparing_for_homing_X = true; - tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); - } - else - { - is_homing_X = true; - tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); - } + MAX_VELOCITY_Y_mm = float(uint16_t(buffer_rx[3]) * 256 + uint16_t(buffer_rx[4])) / 100; + MAX_ACCELERATION_Y_mm = float(uint16_t(buffer_rx[5]) * 256 + uint16_t(buffer_rx[6])) / 10; + tmc4361A_setMaxSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], MAX_VELOCITY_Y_mm) ); + tmc4361A_setMaxAcceleration(&tmc4361[y], tmc4361A_ammToMicrosteps( &tmc4361[y], MAX_ACCELERATION_Y_mm) ); + break; } - /* - if(digitalRead(X_LIM)==(LIM_SWITCH_X_ACTIVE_LOW?HIGH:LOW)) + case AXIS_Z: { - is_homing_X = true; - if(homing_direction_X==HOME_NEGATIVE) - stepper_X.setSpeed(-HOMING_VELOCITY_X*MAX_VELOCITY_X_mm*steps_per_mm_X); - else - stepper_X.setSpeed(HOMING_VELOCITY_X*MAX_VELOCITY_X_mm*steps_per_mm_X); + MAX_VELOCITY_Z_mm = float(uint16_t(buffer_rx[3]) * 256 + uint16_t(buffer_rx[4])) / 100; + MAX_ACCELERATION_Z_mm = float(uint16_t(buffer_rx[5]) * 256 + uint16_t(buffer_rx[6])) / 10; + tmc4361A_setMaxSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], MAX_VELOCITY_Z_mm) ); + tmc4361A_setMaxAcceleration(&tmc4361[z], tmc4361A_ammToMicrosteps( &tmc4361[z], MAX_ACCELERATION_Z_mm) ); + break; } - else + } + break; + } + case SET_LEAD_SCREW_PITCH: + { + switch (buffer_rx[2]) + { + case AXIS_X: { - // get out of the hysteresis zone - is_preparing_for_homing_X = true; - if(homing_direction_X==HOME_NEGATIVE) - stepper_X.setSpeed(HOMING_VELOCITY_X*MAX_VELOCITY_X_mm*steps_per_mm_X); - else - stepper_X.setSpeed(-HOMING_VELOCITY_X*MAX_VELOCITY_X_mm*steps_per_mm_X); + SCREW_PITCH_X_MM = float(uint16_t(buffer_rx[3]) * 256 + uint16_t(buffer_rx[4])) / 1000; + steps_per_mm_X = FULLSTEPS_PER_REV_X * MICROSTEPPING_X / SCREW_PITCH_X_MM; + break; } - */ - break; case AXIS_Y: - if (stage_PID_enabled[AXIS_Y] == 1) - tmc4361A_set_PID(&tmc4361[AXIS_Y], PID_DISABLE); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[y],-1); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[y],1); - homing_direction_Y = buffer_rx[3]; - home_Y_found = false; - if(homing_direction_Y==HOME_NEGATIVE) // use the left limit switch for homing { - if( tmc4361A_readLimitSwitches(&tmc4361[y]) == LEFT_SW ) - { - // get out of the hysteresis zone - is_preparing_for_homing_Y = true; - tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); - } - else - { - is_homing_Y = true; - tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); - } - } - else // use the right limit switch for homing - { - if( tmc4361A_readLimitSwitches(&tmc4361[y]) == RGHT_SW ) - { - // get out of the hysteresis zone - is_preparing_for_homing_Y = true; - tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); - } - else - { - is_homing_Y = true; - tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); - } + SCREW_PITCH_Y_MM = float(uint16_t(buffer_rx[3]) * 256 + uint16_t(buffer_rx[4])) / 1000; + steps_per_mm_Y = FULLSTEPS_PER_REV_Y * MICROSTEPPING_Y / SCREW_PITCH_Y_MM; + break; } - break; case AXIS_Z: - if (stage_PID_enabled[AXIS_Z] == 1) - tmc4361A_set_PID(&tmc4361[AXIS_Z], PID_DISABLE); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[z],-1); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[z],1); - homing_direction_Z = buffer_rx[3]; - home_Z_found = false; - if(homing_direction_Z==HOME_NEGATIVE) // use the left limit switch for homing { - if( tmc4361A_readLimitSwitches(&tmc4361[z]) == LEFT_SW ) - { - // get out of the hysteresis zone - is_preparing_for_homing_Z = true; - tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], RGHT_DIR*HOMING_VELOCITY_Z*MAX_VELOCITY_Z_mm )); - } - else - { - is_homing_Z = true; - tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], LEFT_DIR*HOMING_VELOCITY_Z*MAX_VELOCITY_Z_mm )); - } + SCREW_PITCH_Z_MM = float(uint16_t(buffer_rx[3]) * 256 + uint16_t(buffer_rx[4])) / 1000; + steps_per_mm_Z = FULLSTEPS_PER_REV_Z * MICROSTEPPING_Z / SCREW_PITCH_Z_MM; + break; } - else // use the right limit switch for homing - { - if( tmc4361A_readLimitSwitches(&tmc4361[z]) == RGHT_SW ) + } + break; + } + case HOME_OR_ZERO: + { + // zeroing + if (buffer_rx[3] == HOME_OR_ZERO_ZERO) + { + switch (buffer_rx[2]) + { + case AXIS_X: + tmc4361A_setCurrentPosition(&tmc4361[x], 0); + X_pos = 0; + break; + case AXIS_Y: + tmc4361A_setCurrentPosition(&tmc4361[y], 0); + Y_pos = 0; + break; + case AXIS_Z: + tmc4361A_setCurrentPosition(&tmc4361[z], 0); + Z_pos = 0; + focusPosition = 0; + break; + } + } + // atomic operation, no need to change mcu_cmd_execution_in_progress flag + // homing + else if (buffer_rx[3] == HOME_NEGATIVE || buffer_rx[3] == HOME_POSITIVE) + { + switch (buffer_rx[2]) + { + case AXIS_X: + if (stage_PID_enabled[AXIS_X] == 1) + tmc4361A_set_PID(&tmc4361[AXIS_X], PID_DISABLE); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[x], -1); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[x], 1); + homing_direction_X = buffer_rx[3]; + home_X_found = false; + if (homing_direction_X == HOME_NEGATIVE) // use the left limit switch for homing { - // get out of the hysteresis zone - is_preparing_for_homing_Z = true; - tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], LEFT_DIR*HOMING_VELOCITY_Z*MAX_VELOCITY_Z_mm )); + if ( tmc4361A_readLimitSwitches(&tmc4361[x]) == LEFT_SW ) + { + // get out of the hysteresis zone + is_preparing_for_homing_X = true; + tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); + } + else + { + is_homing_X = true; + tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); + } } - else + else // use the right limit switch for homing { - is_homing_Z = true; - tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], RGHT_DIR*HOMING_VELOCITY_Z*MAX_VELOCITY_Z_mm )); - // tmc4361A_moveTo(&tmc4361[y], tmc4361A_currentPosition(&tmc4361[y])+51200); // for debugging + if ( tmc4361A_readLimitSwitches(&tmc4361[x]) == RGHT_SW ) + { + // get out of the hysteresis zone + is_preparing_for_homing_X = true; + tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); + } + else + { + is_homing_X = true; + tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); + } } - } - break; - case AXES_XY: - if (stage_PID_enabled[AXIS_X] == 1) - tmc4361A_set_PID(&tmc4361[AXIS_X], PID_DISABLE); - if (stage_PID_enabled[AXIS_Y] == 1) - tmc4361A_set_PID(&tmc4361[AXIS_Y], PID_DISABLE); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[x],-1); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[x],1); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[y],-1); - tmc4361A_disableVirtualLimitSwitch(&tmc4361[y],1); - is_homing_XY = true; - home_X_found = false; - home_Y_found = false; - // homing x - homing_direction_X = buffer_rx[3]; - home_X_found = false; - if(homing_direction_X==HOME_NEGATIVE) // use the left limit switch for homing - { - if( tmc4361A_readLimitSwitches(&tmc4361[x]) == LEFT_SW ) - { + /* + if(digitalRead(X_LIM)==(LIM_SWITCH_X_ACTIVE_LOW?HIGH:LOW)) + { + is_homing_X = true; + if(homing_direction_X==HOME_NEGATIVE) + stepper_X.setSpeed(-HOMING_VELOCITY_X*MAX_VELOCITY_X_mm*steps_per_mm_X); + else + stepper_X.setSpeed(HOMING_VELOCITY_X*MAX_VELOCITY_X_mm*steps_per_mm_X); + } + else + { // get out of the hysteresis zone is_preparing_for_homing_X = true; - tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); + if(homing_direction_X==HOME_NEGATIVE) + stepper_X.setSpeed(HOMING_VELOCITY_X*MAX_VELOCITY_X_mm*steps_per_mm_X); + else + stepper_X.setSpeed(-HOMING_VELOCITY_X*MAX_VELOCITY_X_mm*steps_per_mm_X); + } + */ + break; + case AXIS_Y: + if (stage_PID_enabled[AXIS_Y] == 1) + tmc4361A_set_PID(&tmc4361[AXIS_Y], PID_DISABLE); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[y], -1); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[y], 1); + homing_direction_Y = buffer_rx[3]; + home_Y_found = false; + if (homing_direction_Y == HOME_NEGATIVE) // use the left limit switch for homing + { + if ( tmc4361A_readLimitSwitches(&tmc4361[y]) == LEFT_SW ) + { + // get out of the hysteresis zone + is_preparing_for_homing_Y = true; + tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); + } + else + { + is_homing_Y = true; + tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); + } } - else + else // use the right limit switch for homing { - is_homing_X = true; - tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); + if ( tmc4361A_readLimitSwitches(&tmc4361[y]) == RGHT_SW ) + { + // get out of the hysteresis zone + is_preparing_for_homing_Y = true; + tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); + } + else + { + is_homing_Y = true; + tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); + } } - } - else // use the right limit switch for homing - { - if( tmc4361A_readLimitSwitches(&tmc4361[x]) == RGHT_DIR ) + break; + case AXIS_Z: + if (stage_PID_enabled[AXIS_Z] == 1) + tmc4361A_set_PID(&tmc4361[AXIS_Z], PID_DISABLE); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[z], -1); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[z], 1); + homing_direction_Z = buffer_rx[3]; + home_Z_found = false; + if (homing_direction_Z == HOME_NEGATIVE) // use the left limit switch for homing { - // get out of the hysteresis zone - is_preparing_for_homing_X = true; - tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); + if ( tmc4361A_readLimitSwitches(&tmc4361[z]) == LEFT_SW ) + { + // get out of the hysteresis zone + is_preparing_for_homing_Z = true; + tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], RGHT_DIR * HOMING_VELOCITY_Z * MAX_VELOCITY_Z_mm )); + } + else + { + is_homing_Z = true; + tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], LEFT_DIR * HOMING_VELOCITY_Z * MAX_VELOCITY_Z_mm )); + } } - else + else // use the right limit switch for homing { - is_homing_X = true; - tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); + if ( tmc4361A_readLimitSwitches(&tmc4361[z]) == RGHT_SW ) + { + // get out of the hysteresis zone + is_preparing_for_homing_Z = true; + tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], LEFT_DIR * HOMING_VELOCITY_Z * MAX_VELOCITY_Z_mm )); + } + else + { + is_homing_Z = true; + tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], RGHT_DIR * HOMING_VELOCITY_Z * MAX_VELOCITY_Z_mm )); + // tmc4361A_moveTo(&tmc4361[y], tmc4361A_currentPosition(&tmc4361[y])+51200); // for debugging + } } - } - // homing y - homing_direction_Y = buffer_rx[4]; - home_Y_found = false; - if(homing_direction_Y==HOME_NEGATIVE) // use the left limit switch for homing - { - if( tmc4361A_readLimitSwitches(&tmc4361[y]) == LEFT_SW ) + break; + case AXES_XY: + if (stage_PID_enabled[AXIS_X] == 1) + tmc4361A_set_PID(&tmc4361[AXIS_X], PID_DISABLE); + if (stage_PID_enabled[AXIS_Y] == 1) + tmc4361A_set_PID(&tmc4361[AXIS_Y], PID_DISABLE); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[x], -1); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[x], 1); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[y], -1); + tmc4361A_disableVirtualLimitSwitch(&tmc4361[y], 1); + is_homing_XY = true; + home_X_found = false; + home_Y_found = false; + // homing x + homing_direction_X = buffer_rx[3]; + home_X_found = false; + if (homing_direction_X == HOME_NEGATIVE) // use the left limit switch for homing { - // get out of the hysteresis zone - is_preparing_for_homing_Y = true; - tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); + if ( tmc4361A_readLimitSwitches(&tmc4361[x]) == LEFT_SW ) + { + // get out of the hysteresis zone + is_preparing_for_homing_X = true; + tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); + } + else + { + is_homing_X = true; + tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); + } } - else + else // use the right limit switch for homing { - is_homing_Y = true; - tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); + if ( tmc4361A_readLimitSwitches(&tmc4361[x]) == RGHT_DIR ) + { + // get out of the hysteresis zone + is_preparing_for_homing_X = true; + tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); + } + else + { + is_homing_X = true; + tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); + } } - } - else // use the right limit switch for homing - { - if( tmc4361A_readLimitSwitches(&tmc4361[y]) == RGHT_DIR ) + // homing y + homing_direction_Y = buffer_rx[4]; + home_Y_found = false; + if (homing_direction_Y == HOME_NEGATIVE) // use the left limit switch for homing { - // get out of the hysteresis zone - is_preparing_for_homing_Y = true; - tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); + if ( tmc4361A_readLimitSwitches(&tmc4361[y]) == LEFT_SW ) + { + // get out of the hysteresis zone + is_preparing_for_homing_Y = true; + tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); + } + else + { + is_homing_Y = true; + tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); + } } - else + else // use the right limit switch for homing { - is_homing_Y = true; - tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); + if ( tmc4361A_readLimitSwitches(&tmc4361[y]) == RGHT_DIR ) + { + // get out of the hysteresis zone + is_preparing_for_homing_Y = true; + tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); + } + else + { + is_homing_Y = true; + tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); + } } - } - break; + break; + } + mcu_cmd_execution_in_progress = true; } - mcu_cmd_execution_in_progress = true; } - } case SET_OFFSET_VELOCITY: - { - if(enable_offset_velocity) { - switch(buffer_rx[2]) + if (enable_offset_velocity) { - case AXIS_X: - offset_velocity_x = float( int32_t(uint32_t(buffer_rx[2])*16777216 + uint32_t(buffer_rx[3])*65536 + uint32_t(buffer_rx[4])*256 + uint32_t(buffer_rx[5])) )/1000000; - break; - case AXIS_Y: - offset_velocity_y = float( int32_t(uint32_t(buffer_rx[2])*16777216 + uint32_t(buffer_rx[3])*65536 + uint32_t(buffer_rx[4])*256 + uint32_t(buffer_rx[5])) )/1000000; - break; + switch (buffer_rx[2]) + { + case AXIS_X: + offset_velocity_x = float( int32_t(uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6])) ) / 1000000; + break; + case AXIS_Y: + offset_velocity_y = float( int32_t(uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6])) ) / 1000000; + break; + } + break; } break; } - break; - } case TURN_ON_ILLUMINATION: - { - // mcu_cmd_execution_in_progress = true; - turn_on_illumination(); - // mcu_cmd_execution_in_progress = false; - // these are atomic operations - do not change the mcu_cmd_execution_in_progress flag - break; - } + { + // mcu_cmd_execution_in_progress = true; + turn_on_illumination(); + // mcu_cmd_execution_in_progress = false; + // these are atomic operations - do not change the mcu_cmd_execution_in_progress flag + break; + } case TURN_OFF_ILLUMINATION: - { - turn_off_illumination(); - break; - } + { + turn_off_illumination(); + break; + } case SET_ILLUMINATION: - { - set_illumination(buffer_rx[2],(uint16_t(buffer_rx[3])<<8) + uint16_t(buffer_rx[4])); //important to have "<<8" with in "()" - break; - } + { + set_illumination(buffer_rx[2], (uint16_t(buffer_rx[3]) << 8) + uint16_t(buffer_rx[4])); //important to have "<<8" with in "()" + break; + } case SET_ILLUMINATION_LED_MATRIX: - { - set_illumination_led_matrix(buffer_rx[2],buffer_rx[3],buffer_rx[4],buffer_rx[5]); - break; - } + { + set_illumination_led_matrix(buffer_rx[2], buffer_rx[3], buffer_rx[4], buffer_rx[5]); + break; + } case ACK_JOYSTICK_BUTTON_PRESSED: - { - joystick_button_pressed = false; - break; - } + { + joystick_button_pressed = false; + break; + } case ANALOG_WRITE_ONBOARD_DAC: - { - int dac = buffer_rx[2]; - uint16_t value = ( uint16_t(buffer_rx[3])*256 + uint16_t(buffer_rx[4]) ); - set_DAC8050x_output(dac,value); - } + { + int dac = buffer_rx[2]; + uint16_t value = ( uint16_t(buffer_rx[3]) * 256 + uint16_t(buffer_rx[4]) ); + set_DAC8050x_output(dac, value); + break; + } + case SET_DAC80508_REFDIV_GAIN: + { + uint8_t div = buffer_rx[2]; + uint8_t gains = buffer_rx[3]; + set_DAC8050x_gain(div, gains); + break; + } + case SET_ILLUMINATION_INTENSITY_FACTOR: + { + uint8_t factor = uint8_t(buffer_rx[2]); + if (factor > 100) factor = 100; + illumination_intensity_factor = float(factor) / 100; + break; + } case SET_STROBE_DELAY: - { - strobe_delay[buffer_rx[2]] = uint32_t(buffer_rx[3])*16777216 + uint32_t(buffer_rx[4])*65536 + uint32_t(buffer_rx[5])*256 + uint32_t(buffer_rx[6]); - break; - } + { + strobe_delay[buffer_rx[2]] = uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6]); + break; + } case SEND_HARDWARE_TRIGGER: - { - int camera_channel = buffer_rx[2] & 0x0f; - control_strobe[camera_channel] = buffer_rx[2] >> 7; - illumination_on_time[camera_channel] = uint32_t(buffer_rx[3])*16777216 + uint32_t(buffer_rx[4])*65536 + uint32_t(buffer_rx[5])*256 + uint32_t(buffer_rx[6]); - digitalWrite(camera_trigger_pins[camera_channel],LOW); - timestamp_trigger_rising_edge[camera_channel] = micros(); - trigger_output_level[camera_channel] = LOW; - break; - } + { + // Some (all?) the arrays used by the trigger timer interrupt use data types that don't have + // atomic writes, so we need to disable interrupts here to make sure the timer interrupt + // doesn't get partially written values. + noInterrupts(); + int camera_channel = buffer_rx[2] & 0x0f; + control_strobe[camera_channel] = buffer_rx[2] >> 7; + illumination_on_time[camera_channel] = uint32_t(buffer_rx[3]) * 16777216 + uint32_t(buffer_rx[4]) * 65536 + uint32_t(buffer_rx[5]) * 256 + uint32_t(buffer_rx[6]); + digitalWrite(camera_trigger_pins[camera_channel], LOW); + timestamp_trigger_rising_edge[camera_channel] = micros(); + trigger_output_level[camera_channel] = LOW; + interrupts(); + break; + } case SET_PIN_LEVEL: - { - int pin = buffer_rx[2]; - bool level = buffer_rx[3]; - digitalWrite(pin,level); - break; - } + { + int pin = buffer_rx[2]; + bool level = buffer_rx[3]; + digitalWrite(pin, level); + break; + } case CONFIGURE_STAGE_PID: - { - int axis = buffer_rx[2]; - int flip_direction = buffer_rx[3]; - int transitions_per_revolution = (buffer_rx[4] << 8) + buffer_rx[5]; - // Init encoder. transitions per revolution, velocity filter wait time (# of clock cycles), IIR filter exponent, vmean update frequency, invert direction (must increase as microsteps increases) - tmc4361A_init_ABN_encoder(&tmc4361[axis], transitions_per_revolution, 32, 4, 512, flip_direction); - // Init PID. target reach tolerance, position error tolerance, P, I, and D coefficients, max speed, winding limit, derivative update rate - if (axis == 0) - tmc4361A_init_PID(&tmc4361[axis], 25, 25, 512, 64, 0, tmc4361A_vmmToMicrosteps(&tmc4361[axis], MAX_VELOCITY_X_mm), 4096, 2); - else if (axis == 1) - tmc4361A_init_PID(&tmc4361[axis], 25, 25, 512, 64, 0, tmc4361A_vmmToMicrosteps(&tmc4361[axis], MAX_VELOCITY_Y_mm), 4096, 2); - else - tmc4361A_init_PID(&tmc4361[axis], 25, 25, 512, 64, 0, tmc4361A_vmmToMicrosteps(&tmc4361[axis], MAX_VELOCITY_Z_mm), 4096, 2); - break; - } + { + int axis = buffer_rx[2]; + int flip_direction = buffer_rx[3]; + int transitions_per_revolution = (buffer_rx[4] << 8) + buffer_rx[5]; + // Init encoder. transitions per revolution, velocity filter wait time (# of clock cycles), IIR filter exponent, vmean update frequency, invert direction (must increase as microsteps increases) + tmc4361A_init_ABN_encoder(&tmc4361[axis], transitions_per_revolution, 32, 4, 512, flip_direction); + // Init PID. target reach tolerance, position error tolerance, P, I, and D coefficients, max speed, winding limit, derivative update rate + if (axis == z) + tmc4361A_init_PID(&tmc4361[axis], 25, 25, axis_pid_arg[axis].p, axis_pid_arg[axis].i, axis_pid_arg[axis].d, tmc4361A_vmmToMicrosteps(&tmc4361[axis], MAX_VELOCITY_Z_mm), 4096, 2); + else if (axis == y) + tmc4361A_init_PID(&tmc4361[axis], 25, 25, axis_pid_arg[axis].p, axis_pid_arg[axis].i, axis_pid_arg[axis].d, tmc4361A_vmmToMicrosteps(&tmc4361[axis], MAX_VELOCITY_Y_mm), 32767, 2); + else + tmc4361A_init_PID(&tmc4361[axis], 25, 25, axis_pid_arg[axis].p, axis_pid_arg[axis].i, axis_pid_arg[axis].d, tmc4361A_vmmToMicrosteps(&tmc4361[axis], MAX_VELOCITY_X_mm), 32767, 2); + break; + } case ENABLE_STAGE_PID: - { - int axis = buffer_rx[2]; - tmc4361A_set_PID(&tmc4361[axis], PID_BPG0); - stage_PID_enabled[axis] = 1; - break; - } + { + int axis = buffer_rx[2]; + tmc4361A_set_PID(&tmc4361[axis], PID_BPG0); + stage_PID_enabled[axis] = 1; + break; + } case DISABLE_STAGE_PID: - { - int axis = buffer_rx[2]; - tmc4361A_set_PID(&tmc4361[axis], PID_DISABLE); - stage_PID_enabled[axis] = 0; - break; - } + { + int axis = buffer_rx[2]; + tmc4361A_set_PID(&tmc4361[axis], PID_DISABLE); + stage_PID_enabled[axis] = 0; + break; + } + case SET_AXIS_DISABLE_ENABLE: + { + int axis = buffer_rx[2]; + int status = buffer_rx[3]; + if (status == 0) { + tmc4361A_tmc2660_disable_driver(&tmc4361[axis]); + } + else { + tmc4361A_tmc2660_enable_driver(&tmc4361[axis]); + } + break; + } case INITIALIZE: - { - // reset z target position so that z does not move when "current position" for z is set to 0 - focusPosition = 0; - first_packet_from_joystick_panel = true; - // initilize TMC4361 and TMC2660 - for (int i = 0; i < N_MOTOR; i++) - tmc4361A_tmc2660_init(&tmc4361[i], clk_Hz_TMC4361); // set up ICs with SPI control and other parameters - // enable limit switch reading - tmc4361A_enableLimitSwitch(&tmc4361[x], lft_sw_pol[x], LEFT_SW, flip_limit_switch_x); - tmc4361A_enableLimitSwitch(&tmc4361[x], rht_sw_pol[x], RGHT_SW, flip_limit_switch_x); - tmc4361A_enableLimitSwitch(&tmc4361[y], lft_sw_pol[y], LEFT_SW, flip_limit_switch_y); - tmc4361A_enableLimitSwitch(&tmc4361[y], rht_sw_pol[y], RGHT_SW, flip_limit_switch_y); - tmc4361A_enableLimitSwitch(&tmc4361[z], rht_sw_pol[z], RGHT_SW, false); - tmc4361A_enableLimitSwitch(&tmc4361[z], lft_sw_pol[z], LEFT_SW, false); - // motion profile - uint32_t max_velocity_usteps[N_MOTOR]; - uint32_t max_acceleration_usteps[N_MOTOR]; - max_acceleration_usteps[x] = tmc4361A_ammToMicrosteps(&tmc4361[x], MAX_ACCELERATION_X_mm); - max_acceleration_usteps[y] = tmc4361A_ammToMicrosteps(&tmc4361[y], MAX_ACCELERATION_Y_mm); - max_acceleration_usteps[z] = tmc4361A_ammToMicrosteps(&tmc4361[z], MAX_ACCELERATION_Z_mm); - max_velocity_usteps[x] = tmc4361A_vmmToMicrosteps(&tmc4361[x], MAX_VELOCITY_X_mm); - max_velocity_usteps[y] = tmc4361A_vmmToMicrosteps(&tmc4361[y], MAX_VELOCITY_Y_mm); - max_velocity_usteps[z] = tmc4361A_vmmToMicrosteps(&tmc4361[z], MAX_VELOCITY_Z_mm); - for (int i = 0; i < N_MOTOR; i++) { - // initialize ramp with default values - tmc4361A_setMaxSpeed(&tmc4361[i], max_velocity_usteps[i]); - tmc4361A_setMaxAcceleration(&tmc4361[i], max_acceleration_usteps[i]); - tmc4361[i].rampParam[ASTART_IDX] = 0; - tmc4361[i].rampParam[DFINAL_IDX] = 0; - tmc4361A_sRampInit(&tmc4361[i]); + // reset z target position so that z does not move when "current position" for z is set to 0 + focusPosition = 0; + first_packet_from_joystick_panel = true; + // initilize TMC4361 and TMC2660 + for (int i = 0; i < N_MOTOR; i++) + tmc4361A_tmc2660_init(&tmc4361[i], clk_Hz_TMC4361); // set up ICs with SPI control and other parameters + // enable limit switch reading + tmc4361A_enableLimitSwitch(&tmc4361[x], lft_sw_pol[x], LEFT_SW, flip_limit_switch_x); + tmc4361A_enableLimitSwitch(&tmc4361[x], rht_sw_pol[x], RGHT_SW, flip_limit_switch_x); + tmc4361A_enableLimitSwitch(&tmc4361[y], lft_sw_pol[y], LEFT_SW, flip_limit_switch_y); + tmc4361A_enableLimitSwitch(&tmc4361[y], rht_sw_pol[y], RGHT_SW, flip_limit_switch_y); + tmc4361A_enableLimitSwitch(&tmc4361[z], rht_sw_pol[z], RGHT_SW, false); + tmc4361A_enableLimitSwitch(&tmc4361[z], lft_sw_pol[z], LEFT_SW, false); + // motion profile + uint32_t max_velocity_usteps[N_MOTOR]; + uint32_t max_acceleration_usteps[N_MOTOR]; + max_acceleration_usteps[x] = tmc4361A_ammToMicrosteps(&tmc4361[x], MAX_ACCELERATION_X_mm); + max_acceleration_usteps[y] = tmc4361A_ammToMicrosteps(&tmc4361[y], MAX_ACCELERATION_Y_mm); + max_acceleration_usteps[z] = tmc4361A_ammToMicrosteps(&tmc4361[z], MAX_ACCELERATION_Z_mm); + max_velocity_usteps[x] = tmc4361A_vmmToMicrosteps(&tmc4361[x], MAX_VELOCITY_X_mm); + max_velocity_usteps[y] = tmc4361A_vmmToMicrosteps(&tmc4361[y], MAX_VELOCITY_Y_mm); + max_velocity_usteps[z] = tmc4361A_vmmToMicrosteps(&tmc4361[z], MAX_VELOCITY_Z_mm); + for (int i = 0; i < N_MOTOR; i++) + { + // initialize ramp with default values + tmc4361A_setMaxSpeed(&tmc4361[i], max_velocity_usteps[i]); + tmc4361A_setMaxAcceleration(&tmc4361[i], max_acceleration_usteps[i]); + tmc4361[i].rampParam[ASTART_IDX] = 0; + tmc4361[i].rampParam[DFINAL_IDX] = 0; + tmc4361A_sRampInit(&tmc4361[i]); + } + + // homing switch settings + tmc4361A_enableHomingLimit(&tmc4361[x], lft_sw_pol[x], TMC4361_homing_sw[x], home_safety_margin[x]); + tmc4361A_enableHomingLimit(&tmc4361[y], lft_sw_pol[y], TMC4361_homing_sw[y], home_safety_margin[y]); + tmc4361A_enableHomingLimit(&tmc4361[z], rht_sw_pol[z], TMC4361_homing_sw[z], home_safety_margin[z]); + + // DAC init + set_DAC8050x_config(); + set_DAC8050x_default_gain(); + break; } - // homing switch settings - tmc4361A_enableHomingLimit(&tmc4361[x], lft_sw_pol[x], TMC4361_homing_sw[x]); - tmc4361A_enableHomingLimit(&tmc4361[y], lft_sw_pol[y], TMC4361_homing_sw[y]); - tmc4361A_enableHomingLimit(&tmc4361[z], rht_sw_pol[z], TMC4361_homing_sw[z]); - // DAC init - set_DAC8050x_config(); - set_DAC8050x_gain(); - break; - } case RESET: - { - mcu_cmd_execution_in_progress = false; - X_commanded_movement_in_progress = false; - Y_commanded_movement_in_progress = false; - Z_commanded_movement_in_progress = false; - is_homing_X = false; - is_homing_Y = false; - is_homing_Z = false; - is_homing_XY = false; - home_X_found = false; - home_Y_found = false; - home_Z_found = false; - is_preparing_for_homing_X = false; - is_preparing_for_homing_Y = false; - is_preparing_for_homing_Z = false; - cmd_id = 0; - break; - } + { + mcu_cmd_execution_in_progress = false; + X_commanded_movement_in_progress = false; + Y_commanded_movement_in_progress = false; + Z_commanded_movement_in_progress = false; + is_homing_X = false; + is_homing_Y = false; + is_homing_Z = false; + is_homing_XY = false; + home_X_found = false; + home_Y_found = false; + home_Z_found = false; + is_preparing_for_homing_X = false; + is_preparing_for_homing_Y = false; + is_preparing_for_homing_Z = false; + cmd_id = 0; + break; + } default: break; } @@ -1457,19 +1589,19 @@ void loop() { } // camera trigger - for(int camera_channel=0;camera_channel<6;camera_channel++) + for (int camera_channel = 0; camera_channel < 6; camera_channel++) { // end the trigger pulse - if(trigger_output_level[camera_channel] == LOW && (micros()-timestamp_trigger_rising_edge[camera_channel])>= TRIGGER_PULSE_LENGTH_us ) + if (trigger_output_level[camera_channel] == LOW && (micros() - timestamp_trigger_rising_edge[camera_channel]) >= TRIGGER_PULSE_LENGTH_us ) { - digitalWrite(camera_trigger_pins[camera_channel],HIGH); + digitalWrite(camera_trigger_pins[camera_channel], HIGH); trigger_output_level[camera_channel] = HIGH; } /* - // strobe pulse - if(control_strobe[camera_channel]) - { + // strobe pulse + if(control_strobe[camera_channel]) + { if(illumination_on_time[camera_channel] <= 30000) { // if the illumination on time is smaller than 30 ms, use delayMicroseconds to control the pulse length to avoid pulse length jitter (can be up to 20 us if using the code in the else branch) @@ -1496,88 +1628,88 @@ void loop() { strobe_output_level[camera_channel] = LOW; control_strobe[camera_channel] = false; } - } - } + } + } */ } // homing - preparing for homing - if(is_preparing_for_homing_X) + if (is_preparing_for_homing_X) { - if(homing_direction_X==HOME_NEGATIVE) // use the left limit switch for homing + if (homing_direction_X == HOME_NEGATIVE) // use the left limit switch for homing { - if(tmc4361A_readLimitSwitches(&tmc4361[x])!=LEFT_SW) + if (tmc4361A_readLimitSwitches(&tmc4361[x]) != LEFT_SW) { is_preparing_for_homing_X = false; is_homing_X = true; tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], LEFT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); } } else // use the right limit switch for homing { - if(tmc4361A_readLimitSwitches(&tmc4361[x])!=RGHT_SW) + if (tmc4361A_readLimitSwitches(&tmc4361[x]) != RGHT_SW) { is_preparing_for_homing_X = false; is_homing_X = true; tmc4361A_readInt(&tmc4361[x], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR*HOMING_VELOCITY_X*MAX_VELOCITY_X_mm )); + tmc4361A_setSpeed(&tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], RGHT_DIR * HOMING_VELOCITY_X * MAX_VELOCITY_X_mm )); } } } - if(is_preparing_for_homing_Y) + if (is_preparing_for_homing_Y) { - if(homing_direction_Y==HOME_NEGATIVE) // use the left limit switch for homing + if (homing_direction_Y == HOME_NEGATIVE) // use the left limit switch for homing { - if(tmc4361A_readLimitSwitches(&tmc4361[y])!=LEFT_SW) + if (tmc4361A_readLimitSwitches(&tmc4361[y]) != LEFT_SW) { is_preparing_for_homing_Y = false; is_homing_Y = true; tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], LEFT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); } } else // use the right limit switch for homing { - if(tmc4361A_readLimitSwitches(&tmc4361[y])!=RGHT_SW) + if (tmc4361A_readLimitSwitches(&tmc4361[y]) != RGHT_SW) { is_preparing_for_homing_Y = false; is_homing_Y = true; tmc4361A_readInt(&tmc4361[y], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR*HOMING_VELOCITY_Y*MAX_VELOCITY_Y_mm )); + tmc4361A_setSpeed(&tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], RGHT_DIR * HOMING_VELOCITY_Y * MAX_VELOCITY_Y_mm )); } } } - if(is_preparing_for_homing_Z) + if (is_preparing_for_homing_Z) { - if(homing_direction_Z==HOME_NEGATIVE) // use the left limit switch for homing + if (homing_direction_Z == HOME_NEGATIVE) // use the left limit switch for homing { - if(tmc4361A_readLimitSwitches(&tmc4361[z])!=LEFT_SW) + if (tmc4361A_readLimitSwitches(&tmc4361[z]) != LEFT_SW) { is_preparing_for_homing_Z = false; is_homing_Z = true; tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], LEFT_DIR*HOMING_VELOCITY_Z*MAX_VELOCITY_Z_mm )); + tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], LEFT_DIR * HOMING_VELOCITY_Z * MAX_VELOCITY_Z_mm )); } } else // use the right limit switch for homing { - if(tmc4361A_readLimitSwitches(&tmc4361[z])!=RGHT_SW) + if (tmc4361A_readLimitSwitches(&tmc4361[z]) != RGHT_SW) { is_preparing_for_homing_Z = false; is_homing_Z = true; tmc4361A_readInt(&tmc4361[z], TMC4361A_EVENTS); - tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], RGHT_DIR*HOMING_VELOCITY_Z*MAX_VELOCITY_Z_mm )); + tmc4361A_setSpeed(&tmc4361[z], tmc4361A_vmmToMicrosteps( &tmc4361[z], RGHT_DIR * HOMING_VELOCITY_Z * MAX_VELOCITY_Z_mm )); } } } // homing complete check - if(is_homing_X && !home_X_found) + if (is_homing_X && !home_X_found) { - if(homing_direction_X==HOME_NEGATIVE) // use the left limit switch for homing + if (homing_direction_X == HOME_NEGATIVE) // use the left limit switch for homing { - if(tmc4361A_readSwitchEvent(&tmc4361[x])==LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[x])==LEFT_SW) + if (tmc4361A_readSwitchEvent(&tmc4361[x]) == LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[x]) == LEFT_SW) { home_X_found = true; us_since_x_home_found = 0; @@ -1591,7 +1723,7 @@ void loop() { } else // use the right limit switch for homing { - if(tmc4361A_readSwitchEvent(&tmc4361[x])==RGHT_SW || tmc4361A_readLimitSwitches(&tmc4361[x])==RGHT_SW) + if (tmc4361A_readSwitchEvent(&tmc4361[x]) == RGHT_SW || tmc4361A_readLimitSwitches(&tmc4361[x]) == RGHT_SW) { home_X_found = true; us_since_x_home_found = 0; @@ -1604,11 +1736,11 @@ void loop() { } } } - if(is_homing_Y && !home_Y_found) + if (is_homing_Y && !home_Y_found) { - if(homing_direction_Y==HOME_NEGATIVE) // use the left limit switch for homing + if (homing_direction_Y == HOME_NEGATIVE) // use the left limit switch for homing { - if(tmc4361A_readSwitchEvent(&tmc4361[y])==LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[y])==LEFT_SW) + if (tmc4361A_readSwitchEvent(&tmc4361[y]) == LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[y]) == LEFT_SW) { home_Y_found = true; us_since_y_home_found = 0; @@ -1622,7 +1754,7 @@ void loop() { } else // use the right limit switch for homing { - if(tmc4361A_readSwitchEvent(&tmc4361[y])==RGHT_SW || tmc4361A_readLimitSwitches(&tmc4361[y])==RGHT_SW) + if (tmc4361A_readSwitchEvent(&tmc4361[y]) == RGHT_SW || tmc4361A_readLimitSwitches(&tmc4361[y]) == RGHT_SW) { home_Y_found = true; us_since_y_home_found = 0; @@ -1635,11 +1767,11 @@ void loop() { } } } - if(is_homing_Z && !home_Z_found) + if (is_homing_Z && !home_Z_found) { - if(homing_direction_Z==HOME_NEGATIVE) // use the left limit switch for homing + if (homing_direction_Z == HOME_NEGATIVE) // use the left limit switch for homing { - if(tmc4361A_readSwitchEvent(&tmc4361[z])==LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[z])==LEFT_SW) + if (tmc4361A_readSwitchEvent(&tmc4361[z]) == LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[z]) == LEFT_SW) { home_Z_found = true; us_since_z_home_found = 0; @@ -1653,7 +1785,7 @@ void loop() { } else // use the right limit switch for homing { - if(tmc4361A_readSwitchEvent(&tmc4361[z])==RGHT_SW || tmc4361A_readLimitSwitches(&tmc4361[z])==RGHT_SW) + if (tmc4361A_readSwitchEvent(&tmc4361[z]) == RGHT_SW || tmc4361A_readLimitSwitches(&tmc4361[z]) == RGHT_SW) { home_Z_found = true; us_since_z_home_found = 0; @@ -1666,39 +1798,39 @@ void loop() { } } } - + // finish homing - if(is_homing_X && home_X_found && ( tmc4361A_currentPosition(&tmc4361[x]) == tmc4361A_targetPosition(&tmc4361[x]) || us_since_x_home_found > 500*1000 ) ) + if (is_homing_X && home_X_found && ( tmc4361A_currentPosition(&tmc4361[x]) == tmc4361A_targetPosition(&tmc4361[x]) || us_since_x_home_found > 500 * 1000 ) ) { // clear_matrix(matrix); // debug - tmc4361A_setCurrentPosition(&tmc4361[x],0); - if(stage_PID_enabled[AXIS_X]) + tmc4361A_setCurrentPosition(&tmc4361[x], 0); + if (stage_PID_enabled[AXIS_X]) tmc4361A_set_PID(&tmc4361[AXIS_X], PID_BPG0); X_pos = 0; is_homing_X = false; X_commanded_movement_in_progress = false; X_commanded_target_position = 0; - if(is_homing_XY==false) + if (is_homing_XY == false) mcu_cmd_execution_in_progress = false; } - if(is_homing_Y && home_Y_found && ( tmc4361A_currentPosition(&tmc4361[y]) == tmc4361A_targetPosition(&tmc4361[y]) || us_since_y_home_found > 500*1000 ) ) + if (is_homing_Y && home_Y_found && ( tmc4361A_currentPosition(&tmc4361[y]) == tmc4361A_targetPosition(&tmc4361[y]) || us_since_y_home_found > 500 * 1000 ) ) { // clear_matrix(matrix); // debug - tmc4361A_setCurrentPosition(&tmc4361[y],0); - if(stage_PID_enabled[AXIS_Y]) + tmc4361A_setCurrentPosition(&tmc4361[y], 0); + if (stage_PID_enabled[AXIS_Y]) tmc4361A_set_PID(&tmc4361[AXIS_Y], PID_BPG0); Y_pos = 0; is_homing_Y = false; Y_commanded_movement_in_progress = false; Y_commanded_target_position = 0; - if(is_homing_XY==false) + if (is_homing_XY == false) mcu_cmd_execution_in_progress = false; } - if(is_homing_Z && home_Z_found && ( tmc4361A_currentPosition(&tmc4361[z]) == tmc4361A_targetPosition(&tmc4361[z]) || us_since_z_home_found > 500*1000 ) ) + if (is_homing_Z && home_Z_found && ( tmc4361A_currentPosition(&tmc4361[z]) == tmc4361A_targetPosition(&tmc4361[z]) || us_since_z_home_found > 500 * 1000 ) ) { // clear_matrix(matrix); // debug - tmc4361A_setCurrentPosition(&tmc4361[z],0); - if(stage_PID_enabled[AXIS_Z]) + tmc4361A_setCurrentPosition(&tmc4361[z], 0); + if (stage_PID_enabled[AXIS_Z]) tmc4361A_set_PID(&tmc4361[AXIS_Z], PID_BPG0); Z_pos = 0; focusPosition = 0; @@ -1709,113 +1841,114 @@ void loop() { } // homing complete - if(is_homing_XY && home_X_found && !is_homing_X && home_Y_found && !is_homing_Y) + if (is_homing_XY && home_X_found && !is_homing_X && home_Y_found && !is_homing_Y) { is_homing_XY = false; mcu_cmd_execution_in_progress = false; } - if(flag_read_joystick) + if (flag_read_joystick) { - // read x joystick - if(!X_commanded_movement_in_progress && !is_homing_X && !is_preparing_for_homing_X) //if(stepper_X.distanceToGo()==0) // only read joystick when computer commanded travel has finished - doens't work - { - // joystick at motion position - if(abs(joystick_delta_x)>0) - { - tmc4361A_setSpeed( &tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], offset_velocity_x + (joystick_delta_x/32768.0)*MAX_VELOCITY_X_mm ) ); - } - // joystick at rest position - else - { - if(enable_offset_velocity) - tmc4361A_setSpeed( &tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], offset_velocity_x ) ); - else - tmc4361A_stop(&tmc4361[x]); // tmc4361A_setSpeed( &tmc4361[x], 0 ) causes problems for zeroing - } - } - - // read y joystick - if(!Y_commanded_movement_in_progress && !is_homing_Y && !is_preparing_for_homing_Y) - { - // joystick at motion position - if(abs(joystick_delta_y)>0) - { - tmc4361A_setSpeed( &tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], offset_velocity_y + (joystick_delta_y/32768.0)*MAX_VELOCITY_Y_mm ) ); - } - // joystick at rest position - else - { - if(enable_offset_velocity) - tmc4361A_setSpeed( &tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], offset_velocity_y ) ); - else - tmc4361A_stop(&tmc4361[y]); // tmc4361A_setSpeed( &tmc4361[y], 0 ) causes problems for zeroing - } - } + if (us_since_last_joystick_update > interval_send_joystick_update) + { + us_since_last_joystick_update = 0; + + // read x joystick + if (!X_commanded_movement_in_progress && !is_homing_X && !is_preparing_for_homing_X) //if(stepper_X.distanceToGo()==0) // only read joystick when computer commanded travel has finished - doens't work + { + // joystick at motion position + if (abs(joystick_delta_x) > 0) + tmc4361A_setSpeed( &tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], offset_velocity_x + (joystick_delta_x / 32768.0)*MAX_VELOCITY_X_mm ) ); + // joystick at rest position + else + { + if (enable_offset_velocity) + tmc4361A_setSpeed( &tmc4361[x], tmc4361A_vmmToMicrosteps( &tmc4361[x], offset_velocity_x ) ); + else + tmc4361A_stop(&tmc4361[x]); // tmc4361A_setSpeed( &tmc4361[x], 0 ) causes problems for zeroing + } + } + + // read y joystick + if (!Y_commanded_movement_in_progress && !is_homing_Y && !is_preparing_for_homing_Y) + { + // joystick at motion position + if (abs(joystick_delta_y) > 0) + tmc4361A_setSpeed( &tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], offset_velocity_y + (joystick_delta_y / 32768.0)*MAX_VELOCITY_Y_mm ) ); + // joystick at rest position + else + { + if (enable_offset_velocity) + tmc4361A_setSpeed( &tmc4361[y], tmc4361A_vmmToMicrosteps( &tmc4361[y], offset_velocity_y ) ); + else + tmc4361A_stop(&tmc4361[y]); // tmc4361A_setSpeed( &tmc4361[y], 0 ) causes problems for zeroing + } + } + } // set the read joystick flag to false flag_read_joystick = false; } /* - // handle limits (moption from joystick control or offset velocity) - if( tmc4361A_currentPosition(&tmc4361[x])>=X_POS_LIMIT && tmc4361A_vmmToMicrosteps( &tmc4361[x], offset_velocity_x + (joystick_delta_x/32768.0)*MAX_VELOCITY_X_mm )>0 && !X_commanded_movement_in_progress ) - { + // handle limits (moption from joystick control or offset velocity) + if( tmc4361A_currentPosition(&tmc4361[x])>=X_POS_LIMIT && tmc4361A_vmmToMicrosteps( &tmc4361[x], offset_velocity_x + (joystick_delta_x/32768.0)*MAX_VELOCITY_X_mm )>0 && !X_commanded_movement_in_progress ) + { tmc4361A_stop(&tmc4361[x]); - } - if( tmc4361A_currentPosition(&tmc4361[x])<=X_NEG_LIMIT && tmc4361A_vmmToMicrosteps( &tmc4361[x], offset_velocity_x + (joystick_delta_x/32768.0)*MAX_VELOCITY_X_mm )<0 && !X_commanded_movement_in_progress ) - { + } + if( tmc4361A_currentPosition(&tmc4361[x])<=X_NEG_LIMIT && tmc4361A_vmmToMicrosteps( &tmc4361[x], offset_velocity_x + (joystick_delta_x/32768.0)*MAX_VELOCITY_X_mm )<0 && !X_commanded_movement_in_progress ) + { tmc4361A_stop(&tmc4361[x]); - } - if( tmc4361A_currentPosition(&tmc4361[y])>=Y_POS_LIMIT && tmc4361A_vmmToMicrosteps( &tmc4361[y], offset_velocity_y + (joystick_delta_y/32768.0)*MAX_VELOCITY_Y_mm )>0 && !Y_commanded_movement_in_progress ) - { + } + if( tmc4361A_currentPosition(&tmc4361[y])>=Y_POS_LIMIT && tmc4361A_vmmToMicrosteps( &tmc4361[y], offset_velocity_y + (joystick_delta_y/32768.0)*MAX_VELOCITY_Y_mm )>0 && !Y_commanded_movement_in_progress ) + { tmc4361A_stop(&tmc4361[y]); - } - if( tmc4361A_currentPosition(&tmc4361[y])<=Y_NEG_LIMIT && tmc4361A_vmmToMicrosteps( &tmc4361[y], offset_velocity_y + (joystick_delta_y/32768.0)*MAX_VELOCITY_Y_mm )<0 && !Y_commanded_movement_in_progress ) - { + } + if( tmc4361A_currentPosition(&tmc4361[y])<=Y_NEG_LIMIT && tmc4361A_vmmToMicrosteps( &tmc4361[y], offset_velocity_y + (joystick_delta_y/32768.0)*MAX_VELOCITY_Y_mm )<0 && !Y_commanded_movement_in_progress ) + { tmc4361A_stop(&tmc4361[y]); - } + } */ - + // focus control - if(focusPosition > Z_POS_LIMIT) + if (focusPosition > Z_POS_LIMIT) focusPosition = Z_POS_LIMIT; - if(focusPosition < Z_NEG_LIMIT) + if (focusPosition < Z_NEG_LIMIT) focusPosition = Z_NEG_LIMIT; - if(is_homing_Z==false && is_preparing_for_homing_Z==false) + if (is_homing_Z == false && is_preparing_for_homing_Z == false) tmc4361A_moveTo(&tmc4361[z], focusPosition); // send position update to computer - if(us_since_last_pos_update > interval_send_pos_update) + if (us_since_last_pos_update > interval_send_pos_update) { us_since_last_pos_update = 0; - + buffer_tx[0] = cmd_id; - if(checksum_error) + if (checksum_error) buffer_tx[1] = CMD_CHECKSUM_ERROR; // cmd_execution_status else - buffer_tx[1] = mcu_cmd_execution_in_progress; // cmd_execution_status - - uint32_t X_pos_int32t = uint32_t( X_use_encoder?X_pos:int32_t(tmc4361A_currentPosition(&tmc4361[x])) ); - buffer_tx[2] = byte(X_pos_int32t>>24); - buffer_tx[3] = byte((X_pos_int32t>>16)%256); - buffer_tx[4] = byte((X_pos_int32t>>8)%256); - buffer_tx[5] = byte((X_pos_int32t)%256); - - uint32_t Y_pos_int32t = uint32_t( Y_use_encoder?Y_pos:int32_t(tmc4361A_currentPosition(&tmc4361[y])) ); - buffer_tx[6] = byte(Y_pos_int32t>>24); - buffer_tx[7] = byte((Y_pos_int32t>>16)%256); - buffer_tx[8] = byte((Y_pos_int32t>>8)%256); - buffer_tx[9] = byte((Y_pos_int32t)%256); - - uint32_t Z_pos_int32t = uint32_t( Z_use_encoder?Z_pos:int32_t(tmc4361A_currentPosition(&tmc4361[z])) ); - buffer_tx[10] = byte(Z_pos_int32t>>24); - buffer_tx[11] = byte((Z_pos_int32t>>16)%256); - buffer_tx[12] = byte((Z_pos_int32t>>8)%256); - buffer_tx[13] = byte((Z_pos_int32t)%256); + buffer_tx[1] = mcu_cmd_execution_in_progress ? IN_PROGRESS : COMPLETED_WITHOUT_ERRORS; // cmd_execution_status + + uint32_t X_pos_int32t = uint32_t( X_use_encoder ? X_pos : int32_t(tmc4361A_currentPosition(&tmc4361[x])) ); + buffer_tx[2] = byte(X_pos_int32t >> 24); + buffer_tx[3] = byte((X_pos_int32t >> 16) % 256); + buffer_tx[4] = byte((X_pos_int32t >> 8) % 256); + buffer_tx[5] = byte((X_pos_int32t) % 256); + + uint32_t Y_pos_int32t = uint32_t( Y_use_encoder ? Y_pos : int32_t(tmc4361A_currentPosition(&tmc4361[y])) ); + buffer_tx[6] = byte(Y_pos_int32t >> 24); + buffer_tx[7] = byte((Y_pos_int32t >> 16) % 256); + buffer_tx[8] = byte((Y_pos_int32t >> 8) % 256); + buffer_tx[9] = byte((Y_pos_int32t) % 256); + + uint32_t Z_pos_int32t = uint32_t( Z_use_encoder ? Z_pos : int32_t(tmc4361A_currentPosition(&tmc4361[z])) ); + buffer_tx[10] = byte(Z_pos_int32t >> 24); + buffer_tx[11] = byte((Z_pos_int32t >> 16) % 256); + buffer_tx[12] = byte((Z_pos_int32t >> 8) % 256); + buffer_tx[13] = byte((Z_pos_int32t) % 256); // fail-safe clearing of the joystick_button_pressed bit (in case the ack is not received) - if(joystick_button_pressed && millis() - joystick_button_pressed_timestamp > 1000) + if (joystick_button_pressed && millis() - joystick_button_pressed_timestamp > 1000) joystick_button_pressed = false; buffer_tx[18] &= ~ (1 << BIT_POS_JOYSTICK_BUTTON); // clear the joystick button bit @@ -1841,65 +1974,76 @@ void loop() { } - // check if commanded position has been reached - if(X_commanded_movement_in_progress && tmc4361A_currentPosition(&tmc4361[x])==X_commanded_target_position && !is_homing_X) // homing is handled separately - { - X_commanded_movement_in_progress = false; - mcu_cmd_execution_in_progress = false || Y_commanded_movement_in_progress || Z_commanded_movement_in_progress; - } - if(Y_commanded_movement_in_progress && tmc4361A_currentPosition(&tmc4361[y])==Y_commanded_target_position && !is_homing_Y) - { - Y_commanded_movement_in_progress = false; - mcu_cmd_execution_in_progress = false || X_commanded_movement_in_progress || Z_commanded_movement_in_progress; - } - if(Z_commanded_movement_in_progress && tmc4361A_currentPosition(&tmc4361[z])==Z_commanded_target_position && !is_homing_Z) - { - Z_commanded_movement_in_progress = false; - mcu_cmd_execution_in_progress = false || X_commanded_movement_in_progress || Y_commanded_movement_in_progress; + // keep checking position process at suitable frequence + if(us_since_last_check_position > interval_check_position) { + us_since_last_check_position = 0; + + // check if commanded position has been reached + if (X_commanded_movement_in_progress && tmc4361A_currentPosition(&tmc4361[x]) == X_commanded_target_position && !is_homing_X && !tmc4361A_isRunning(&tmc4361[x], stage_PID_enabled[x])) // homing is handled separately + { + X_commanded_movement_in_progress = false; + mcu_cmd_execution_in_progress = false || Y_commanded_movement_in_progress || Z_commanded_movement_in_progress; + } + if (Y_commanded_movement_in_progress && tmc4361A_currentPosition(&tmc4361[y]) == Y_commanded_target_position && !is_homing_Y && !tmc4361A_isRunning(&tmc4361[y], stage_PID_enabled[y])) + { + Y_commanded_movement_in_progress = false; + mcu_cmd_execution_in_progress = false || X_commanded_movement_in_progress || Z_commanded_movement_in_progress; + } + if (Z_commanded_movement_in_progress && tmc4361A_currentPosition(&tmc4361[z]) == Z_commanded_target_position && !is_homing_Z && !tmc4361A_isRunning(&tmc4361[z], stage_PID_enabled[z])) + { + Z_commanded_movement_in_progress = false; + mcu_cmd_execution_in_progress = false || X_commanded_movement_in_progress || Y_commanded_movement_in_progress; + } } - // at limit - if(X_commanded_movement_in_progress && !is_homing_X) // homing is handled separately - { - // if( tmc4361A_readLimitSwitches(&tmc4361[x])==LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[x])==RGHT_SW ) - if( ( X_direction == LEFT_DIR && tmc4361A_readSwitchEvent(&tmc4361[x]) == LEFT_SW ) || ( X_direction == RGHT_DIR && tmc4361A_readSwitchEvent(&tmc4361[x]) == RGHT_SW ) ) + if (us_since_last_check_limit > interval_check_limit) { + us_since_last_check_limit = 0; + + // at limit + if (X_commanded_movement_in_progress && !is_homing_X) // homing is handled separately { - X_commanded_movement_in_progress = false; - mcu_cmd_execution_in_progress = false || Y_commanded_movement_in_progress || Z_commanded_movement_in_progress; + uint8_t event = tmc4361A_readSwitchEvent(&tmc4361[x]); + // if( tmc4361A_readLimitSwitches(&tmc4361[x])==LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[x])==RGHT_SW ) + if ( ( X_direction == LEFT_DIR && event == LEFT_SW ) || ( X_direction == RGHT_DIR && event == RGHT_SW ) ) + { + X_commanded_movement_in_progress = false; + mcu_cmd_execution_in_progress = false || Y_commanded_movement_in_progress || Z_commanded_movement_in_progress; + } } - } - if(Y_commanded_movement_in_progress && !is_homing_Y) // homing is handled separately - { - //if( tmc4361A_readLimitSwitches(&tmc4361[y])==LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[y])==RGHT_SW ) - if( ( Y_direction == LEFT_DIR && tmc4361A_readSwitchEvent(&tmc4361[y]) == LEFT_SW ) || ( Y_direction == RGHT_DIR && tmc4361A_readSwitchEvent(&tmc4361[y]) == RGHT_SW ) ) + if (Y_commanded_movement_in_progress && !is_homing_Y) // homing is handled separately { - Y_commanded_movement_in_progress = false; - mcu_cmd_execution_in_progress = false || X_commanded_movement_in_progress || Z_commanded_movement_in_progress; + uint8_t event = tmc4361A_readSwitchEvent(&tmc4361[y]); + //if( tmc4361A_readLimitSwitches(&tmc4361[y])==LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[y])==RGHT_SW ) + if ( ( Y_direction == LEFT_DIR && event == LEFT_SW ) || ( Y_direction == RGHT_DIR && event == RGHT_SW ) ) + { + Y_commanded_movement_in_progress = false; + mcu_cmd_execution_in_progress = false || X_commanded_movement_in_progress || Z_commanded_movement_in_progress; + } } - } - if(Z_commanded_movement_in_progress && !is_homing_Z) // homing is handled separately - { - // if( tmc4361A_readLimitSwitches(&tmc4361[z])==LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[z])==RGHT_SW ) - if( ( Z_direction == LEFT_DIR && tmc4361A_readSwitchEvent(&tmc4361[z]) == LEFT_SW ) || ( Z_direction == RGHT_DIR && tmc4361A_readSwitchEvent(&tmc4361[z]) == RGHT_SW ) ) + if (Z_commanded_movement_in_progress && !is_homing_Z) // homing is handled separately { - Z_commanded_movement_in_progress = false; - mcu_cmd_execution_in_progress = false || X_commanded_movement_in_progress || Y_commanded_movement_in_progress; + uint8_t event = tmc4361A_readSwitchEvent(&tmc4361[z]); + // if( tmc4361A_readLimitSwitches(&tmc4361[z])==LEFT_SW || tmc4361A_readLimitSwitches(&tmc4361[z])==RGHT_SW ) + if ( ( Z_direction == LEFT_DIR && event == LEFT_SW ) || ( Z_direction == RGHT_DIR && event == RGHT_SW ) ) + { + Z_commanded_movement_in_progress = false; + mcu_cmd_execution_in_progress = false || X_commanded_movement_in_progress || Y_commanded_movement_in_progress; + } } } - } /*************************************************** - * - * timer interrupt - * + + timer interrupt + ***************************************************/ - + // timer interrupt /* -// IntervalTimer stops working after SPI.begin() -void timer_interruptHandler() -{ + // IntervalTimer stops working after SPI.begin() + void timer_interruptHandler() + { SerialUSB.println("timer event"); counter_send_pos_update = counter_send_pos_update + 1; if(counter_send_pos_update==interval_send_pos_update/TIMER_PERIOD) @@ -1908,23 +2052,23 @@ void timer_interruptHandler() counter_send_pos_update = 0; SerialUSB.println("send pos update"); } -} + } */ /***************************************************************************************************/ /********************************************* utils *********************************************/ /***************************************************************************************************/ -long signed2NBytesUnsigned(long signedLong,int N) +long signed2NBytesUnsigned(long signedLong, int N) { - long NBytesUnsigned = signedLong + pow(256L,N)/2; + long NBytesUnsigned = signedLong + pow(256L, N) / 2; //long NBytesUnsigned = signedLong + 8388608L; return NBytesUnsigned; } static inline int sgn(int val) { - if (val < 0) return -1; - if (val==0) return 0; - return 1; + if (val < 0) return -1; + if (val == 0) return 0; + return 1; } /***************************************************************************************************/ @@ -1933,19 +2077,19 @@ static inline int sgn(int val) { void set_all(CRGB * matrix, uint8_t r, uint8_t g, uint8_t b) { for (int i = 0; i < DOTSTAR_NUM_LEDS; i++) - matrix[i].setRGB(r,g,b); + matrix[i].setRGB(r, g, b); } void set_left(CRGB * matrix, uint8_t r, uint8_t g, uint8_t b) { - for (int i = 0; i < DOTSTAR_NUM_LEDS/2; i++) - matrix[i].setRGB(r,g,b); + for (int i = 0; i < DOTSTAR_NUM_LEDS / 2; i++) + matrix[i].setRGB(r, g, b); } void set_right(CRGB * matrix, uint8_t r, uint8_t g, uint8_t b) { - for (int i = DOTSTAR_NUM_LEDS/2; i < DOTSTAR_NUM_LEDS; i++) - matrix[i].setRGB(r,g,b); + for (int i = DOTSTAR_NUM_LEDS / 2; i < DOTSTAR_NUM_LEDS; i++) + matrix[i].setRGB(r, g, b); } void set_top(CRGB * matrix, uint8_t r, uint8_t g, uint8_t b) @@ -1989,79 +2133,79 @@ void set_bottom(CRGB * matrix, uint8_t r, uint8_t g, uint8_t b) void set_low_na(CRGB * matrix, uint8_t r, uint8_t g, uint8_t b) { // matrix[44].setRGB(r,g,b); - matrix[45].setRGB(r,g,b); - matrix[46].setRGB(r,g,b); + matrix[45].setRGB(r, g, b); + matrix[46].setRGB(r, g, b); // matrix[47].setRGB(r,g,b); - matrix[56].setRGB(r,g,b); - matrix[57].setRGB(r,g,b); - matrix[58].setRGB(r,g,b); - matrix[59].setRGB(r,g,b); - matrix[68].setRGB(r,g,b); - matrix[69].setRGB(r,g,b); - matrix[70].setRGB(r,g,b); - matrix[71].setRGB(r,g,b); + matrix[56].setRGB(r, g, b); + matrix[57].setRGB(r, g, b); + matrix[58].setRGB(r, g, b); + matrix[59].setRGB(r, g, b); + matrix[68].setRGB(r, g, b); + matrix[69].setRGB(r, g, b); + matrix[70].setRGB(r, g, b); + matrix[71].setRGB(r, g, b); // matrix[80].setRGB(r,g,b); - matrix[81].setRGB(r,g,b); - matrix[82].setRGB(r,g,b); + matrix[81].setRGB(r, g, b); + matrix[82].setRGB(r, g, b); // matrix[83].setRGB(r,g,b); } void set_left_dot(CRGB * matrix, uint8_t r, uint8_t g, uint8_t b) { - matrix[3].setRGB(r,g,b); - matrix[4].setRGB(r,g,b); - matrix[11].setRGB(r,g,b); - matrix[12].setRGB(r,g,b); + matrix[3].setRGB(r, g, b); + matrix[4].setRGB(r, g, b); + matrix[11].setRGB(r, g, b); + matrix[12].setRGB(r, g, b); } void set_right_dot(CRGB * matrix, uint8_t r, uint8_t g, uint8_t b) { - matrix[115].setRGB(r,g,b); - matrix[116].setRGB(r,g,b); - matrix[123].setRGB(r,g,b); - matrix[124].setRGB(r,g,b); + matrix[115].setRGB(r, g, b); + matrix[116].setRGB(r, g, b); + matrix[123].setRGB(r, g, b); + matrix[124].setRGB(r, g, b); } void clear_matrix(CRGB * matrix) { for (int i = 0; i < DOTSTAR_NUM_LEDS; i++) - matrix[i].setRGB(0,0,0); + matrix[i].setRGB(0, 0, 0); FastLED.show(); } void turn_on_LED_matrix_pattern(CRGB * matrix, int pattern, uint8_t led_matrix_r, uint8_t led_matrix_g, uint8_t led_matrix_b) { - led_matrix_r = (float(led_matrix_r)/255)*LED_MATRIX_MAX_INTENSITY; - led_matrix_g = (float(led_matrix_g)/255)*LED_MATRIX_MAX_INTENSITY; - led_matrix_b = (float(led_matrix_b)/255)*LED_MATRIX_MAX_INTENSITY; + led_matrix_r = (float(led_matrix_r) / 255) * LED_MATRIX_MAX_INTENSITY; + led_matrix_g = (float(led_matrix_g) / 255) * LED_MATRIX_MAX_INTENSITY; + led_matrix_b = (float(led_matrix_b) / 255) * LED_MATRIX_MAX_INTENSITY; // clear matrix set_all(matrix, 0, 0, 0); - - switch(pattern) + + switch (pattern) { case ILLUMINATION_SOURCE_LED_ARRAY_FULL: - set_all(matrix, led_matrix_g*GREEN_ADJUSTMENT_FACTOR, led_matrix_r*RED_ADJUSTMENT_FACTOR, led_matrix_b*BLUE_ADJUSTMENT_FACTOR); + set_all(matrix, led_matrix_g * GREEN_ADJUSTMENT_FACTOR, led_matrix_r * RED_ADJUSTMENT_FACTOR, led_matrix_b * BLUE_ADJUSTMENT_FACTOR); break; case ILLUMINATION_SOURCE_LED_ARRAY_LEFT_HALF: - set_left(matrix, led_matrix_g*GREEN_ADJUSTMENT_FACTOR, led_matrix_r*RED_ADJUSTMENT_FACTOR, led_matrix_b*BLUE_ADJUSTMENT_FACTOR); + set_left(matrix, led_matrix_g * GREEN_ADJUSTMENT_FACTOR, led_matrix_r * RED_ADJUSTMENT_FACTOR, led_matrix_b * BLUE_ADJUSTMENT_FACTOR); break; case ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_HALF: - set_right(matrix, led_matrix_g*GREEN_ADJUSTMENT_FACTOR, led_matrix_r*RED_ADJUSTMENT_FACTOR, led_matrix_b*BLUE_ADJUSTMENT_FACTOR); + set_right(matrix, led_matrix_g * GREEN_ADJUSTMENT_FACTOR, led_matrix_r * RED_ADJUSTMENT_FACTOR, led_matrix_b * BLUE_ADJUSTMENT_FACTOR); break; case ILLUMINATION_SOURCE_LED_ARRAY_LEFTB_RIGHTR: - set_left(matrix,0,0,led_matrix_b*BLUE_ADJUSTMENT_FACTOR); - set_right(matrix,0,led_matrix_r*RED_ADJUSTMENT_FACTOR,0); + set_left(matrix, 0, 0, led_matrix_b * BLUE_ADJUSTMENT_FACTOR); + set_right(matrix, 0, led_matrix_r * RED_ADJUSTMENT_FACTOR, 0); break; case ILLUMINATION_SOURCE_LED_ARRAY_LOW_NA: - set_low_na(matrix, led_matrix_g*GREEN_ADJUSTMENT_FACTOR, led_matrix_r*RED_ADJUSTMENT_FACTOR, led_matrix_b*BLUE_ADJUSTMENT_FACTOR); + set_low_na(matrix, led_matrix_g * GREEN_ADJUSTMENT_FACTOR, led_matrix_r * RED_ADJUSTMENT_FACTOR, led_matrix_b * BLUE_ADJUSTMENT_FACTOR); break; case ILLUMINATION_SOURCE_LED_ARRAY_LEFT_DOT: - set_left_dot(matrix, led_matrix_g*GREEN_ADJUSTMENT_FACTOR, led_matrix_r*RED_ADJUSTMENT_FACTOR, led_matrix_b*BLUE_ADJUSTMENT_FACTOR); + set_left_dot(matrix, led_matrix_g * GREEN_ADJUSTMENT_FACTOR, led_matrix_r * RED_ADJUSTMENT_FACTOR, led_matrix_b * BLUE_ADJUSTMENT_FACTOR); break; case ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_DOT: - set_right_dot(matrix, led_matrix_g*GREEN_ADJUSTMENT_FACTOR, led_matrix_r*RED_ADJUSTMENT_FACTOR, led_matrix_b*BLUE_ADJUSTMENT_FACTOR); + set_right_dot(matrix, led_matrix_g * GREEN_ADJUSTMENT_FACTOR, led_matrix_r * RED_ADJUSTMENT_FACTOR, led_matrix_b * BLUE_ADJUSTMENT_FACTOR); break; case ILLUMINATION_SOURCE_LED_ARRAY_TOP_HALF: set_top(matrix, led_matrix_g*GREEN_ADJUSTMENT_FACTOR, led_matrix_r*RED_ADJUSTMENT_FACTOR, led_matrix_b*BLUE_ADJUSTMENT_FACTOR); diff --git a/software/.gitignore b/software/.gitignore index 3921c2af0..02383095b 100644 --- a/software/.gitignore +++ b/software/.gitignore @@ -2,3 +2,4 @@ *.txt *_configurations.xml cache/ +drivers and libraries/daheng camera/ diff --git a/software/README.md b/software/README.md index 7b1013d30..cb2b08f9f 100644 --- a/software/README.md +++ b/software/README.md @@ -1,10 +1,18 @@ ## Setting up the environments Run the following script in terminal to clone the repo and set up the environment ``` -wget https://raw.githubusercontent.com/hongquanli/octopi-research/master/software/setup_22.04.sh +wget https://raw.githubusercontent.com/Cephla-Lab/Squid/master/software/setup_22.04.sh chmod +x setup_22.04.sh ./setup_22.04.sh ``` + +Then run this script to set up cuda +``` +wget https://raw.githubusercontent.com/Cephla-Lab/Squid/master/software/setup_cuda_22.04.sh +chmod +x setup_cuda_22.04.sh +./setup_cuda_22.04.sh +``` + Reboot the computer to finish the installation. ## Optional or Hardware-specific dependencies @@ -12,6 +20,7 @@ Reboot the computer to finish the installation.
image stitching dependencies (optional) For optional image stitching using ImageJ, additionally run the following: + ``` sudo apt-get update sudo apt-get install openjdk-11-jdk @@ -38,7 +47,7 @@ export PATH=$JAVA_HOME/bin:$PATH Installing drivers and libraries for FLIR camera support Go to FLIR's page for downloading their Spinnaker SDK (https://www.flir.com/support/products/spinnaker-sdk/) and register. -Open the software/drivers and libraries/flir folder in terminal and run the following +Open the `software/drivers and libraries/flir` folder in terminal and run the following ``` sh ./install_spinnaker.sh sh ./install_PySpin.sh @@ -53,6 +62,69 @@ sudo cp drivers\ and\ libraries/toupcam/linux/udev/99-toupcam.rules /etc/udev/ru ```
+
+Installing drivers and libraries for Hamamatsu camera support + +Open the `software/drivers and libraries/hamamatsu` folder in terminal and run the following +``` +sh ./install_hamamatsu.sh +``` +
+ +
+Installing drivers and libraries for iDS camera support +- Software: + +Go to iDS's page for downloading their software (https://en.ids-imaging.com/download-details/1009877.html?os=linux&version=&bus=64&floatcalc=). Register and log in. + +Open the `software/drivers and libraries/ids` folder in terminal and run the following +``` +sh ./install_ids.sh +``` + +You will be asked to enter sudo password. + +- Firmware (optional): + +If you would like to update the firmware of the camera (optional), download the Vision firmware update (GUF file) on the same page. + +Open the `software/drivers and libraries/ids/ids-peak_2.11.0.0-178_amd64/bin` folder in terminal and run the following +``` +./ids_peak_cockpit +``` + +This will start the iDS peak Cockpit software. Then: +1. Open the camera manager by clicking on the camera manager icon in the main menu. +2. Select the camera in the camera manager. +3. Click on the firmware update icon in the menu to open the dialog for selecting the update file for the Vision firmware (*.guf). +4. Select the update file. +5. Click on "Open". + +The update is started and the camera is updated. Note: If you select an incorrect update file by mistake, you will see the message "The update file is incompatible". +After the update is complete, you can close the iDS peak Cockpit software. (Reference: https://en.ids-imaging.com/tl_files/downloads/usb3-vision/firmware/ReadMe.html) + +
+ +
+Installing drivers and libraries for Tucsen camera support + +Open the `software/drivers and libraries/tucsen` folder in terminal and run the following to log in as a root user +``` +sudo -s +``` + +The following steps should be run as root user +``` +sh ./install_tucsen.sh +``` + +After installation, run the following to log out +``` +exit +``` + +
+ ## Configuring the software Copy the .ini file associated with the microscope configuration to the software folder. Make modifications as needed (e.g. `camera_type`, `support_laser_autofocus`,`focus_camera_exposure_time_ms`) diff --git a/software/add_desktop_launcher_malaria.sh b/software/add_desktop_launcher_malaria.sh deleted file mode 100755 index 6b32f4f40..000000000 --- a/software/add_desktop_launcher_malaria.sh +++ /dev/null @@ -1,3 +0,0 @@ -cp octopi-research-malaria.desktop ~/.local/share/applications/ -chmod u+x ~/.local/share/applications/octopi-research-malaria.desktop -cp ~/.local/share/applications/octopi-research-malaria.desktop ~/Desktop/ \ No newline at end of file diff --git a/software/configurations/configuration_HCS_v2.ini b/software/configurations/configuration_HCS_v2.ini index 0de93bcef..0ac51cde4 100644 --- a/software/configurations/configuration_HCS_v2.ini +++ b/software/configurations/configuration_HCS_v2.ini @@ -1,15 +1,34 @@ [GENERAL] -camera_type=Default -_camera_type_options=[Default, FLIR, Toupcam] +camera_type = Default +_camera_type_options = [Default, FLIR, Toupcam] rotate_image_angle = None -flip_image = Vertical +flip_image = Both _flip_image_options = [Vertical, Horizontal, Both, None] camera_reverse_x = False -_camera_reverse_x_options = [True,False] +_camera_reverse_x_options = [True, False] camera_reverse_y = False _camera_reverse_y_options = [True, False] default_pixel_format = MONO8 -_default_pixel_format_options = [MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12] +_default_pixel_format_options = [MONO8, MONO12, MONO14, MONO16, BAYER_RG8, BAYER_RG12] +controller_sn = None +support_scimicroscopy_led_array = False +scimicroscopy_led_array_sn = None +scimicroscopy_led_array_distance = 50 +scimicroscopy_led_array_default_na = 0.8 +scimicroscopy_led_array_default_color = [1, 1, 1] +scimicroscopy_led_array_turn_on_delay = 0.03 +enable_spinning_disk_confocal = False +use_ldi_serial_control = False +xlight_emission_filter_mapping = {405: 1, 470: 2, 555: 3, 640: 4, 730: 5} +xlight_serial_number = "" +xlight_sleep_time_for_wheel = 0.25 +xlight_validate_wheel_pos = False +use_napari_for_live_view = False +use_napari_for_multipoint = True +use_napari_for_tiled_display = False +is_hcs = True +enable_stitcher = False +stitch_complete_acquisition = False stage_movement_sign_x = 1 stage_movement_sign_y = 1 stage_movement_sign_z = -1 @@ -23,13 +42,13 @@ tracking_movement_sign_y = 1 tracking_movement_sign_z = 1 tracking_movement_sign_theta = 1 use_encoder_x = False -_use_encoder_x_options = [True,False] +_use_encoder_x_options = [True, False] use_encoder_y = False -_use_encoder_y_options = [True,False] +_use_encoder_y_options = [True, False] use_encoder_z = False -_use_encoder_z_options = [True,False] +_use_encoder_z_options = [True, False] use_encoder_theta = False -_use_encoder_theta_options = [True,False] +_use_encoder_theta_options = [True, False] encoder_pos_sign_x = 1 encoder_pos_sign_y = 1 encoder_pos_sign_z = 1 @@ -45,9 +64,9 @@ fullsteps_per_rev_theta = 200 screw_pitch_x_mm = 2.54 screw_pitch_y_mm = 2.54 screw_pitch_z_mm = 0.3 -microstepping_default_x = 256 -microstepping_default_y = 256 -microstepping_default_z = 256 +microstepping_default_x = 32 +microstepping_default_y = 32 +microstepping_default_z = 32 microstepping_default_theta = 256 x_motor_rms_current_ma = 1000 y_motor_rms_current_ma = 1000 @@ -57,82 +76,109 @@ y_motor_i_hold = 0.25 z_motor_i_hold = 0.5 max_velocity_x_mm = 30 max_velocity_y_mm = 30 -max_velocity_z_mm = 4 +max_velocity_z_mm = 3.8 max_acceleration_x_mm = 500 max_acceleration_y_mm = 500 max_acceleration_z_mm = 100 +enable_pid_x = False +_enable_pid_x_options = [True, False] +enable_pid_y = False +_enable_pid_y_options = [True, False] +enable_pid_z = False +_enable_pid_z_options = [True, False] +has_encoder_x = False +_has_encoder_x_options = [True, False] +has_encoder_y = False +_has_encoder_y_options = [True, False] +has_encoder_z = False +_has_encoder_z_options = [True, False] +encoder_flip_dir_x = True +_encoder_flip_dir_x_options = [True, False] +encoder_flip_dir_y = True +_encoder_flip_dir_y_options = [True, False] +encoder_flip_dir_z = True +_encoder_flip_dir_z_options = [True, False] +encoder_resolution_um_x = 0.05 +encoder_resolution_um_y = 0.05 +encoder_resolution_um_z = 0.1 scan_stabilization_time_ms_x = 25 scan_stabilization_time_ms_y = 25 scan_stabilization_time_ms_z = 20 homing_enabled_x = True -_homing_enabled_x_options = [True,False] +_homing_enabled_x_options = [True, False] homing_enabled_y = True -_homing_enabled_y_options = [True,False] +_homing_enabled_y_options = [True, False] homing_enabled_z = True -_homing_enabled_z_options = [True,False] +_homing_enabled_z_options = [True, False] sleep_time_s = 0.005 led_matrix_r_factor = 1.0 led_matrix_g_factor = 1.0 led_matrix_b_factor = 1.0 default_saving_path = /Downloads multipoint_autofocus_channel = BF LED matrix full -multipoint_autofocus_enable_by_default = True -_multipoint_autofocus_enable_by_default_options=[True,False] -multipoint_bf_saving_option = Green Channel Only +_multipoint_autofocus_enable_by_default_options = [True, False] +multipoint_bf_saving_option = Raw run_custom_multipoint = False default_display_crop = 85 -camera_pixel_size_um = {"IMX226":1.85,"IMX250":3.45,"IMX252":3.45,"PYTHON300":4.8} -objectives = {"2x":{"magnification":2, "NA":0.10, "tube_lens_f_mm":180},"4x":{"magnification":4, "NA":0.13, "tube_lens_f_mm":180}, "10x":{"magnification":10, "NA":0.25, "tube_lens_f_mm":180}, "10x (Mitutoyo)":{"magnification":10, "NA":0.25, "tube_lens_f_mm":200}, "20x (Boli)":{"magnification":20, "NA":0.4, "tube_lens_f_mm":180}, "20x (Nikon)":{"magnification":20, "NA":0.45, "tube_lens_f_mm":200}, "20x":{"magnification":20, "NA":0.4, "tube_lens_f_mm":180}, "40x":{"magnification":40, "NA":0.6, "tube_lens_f_mm":180}} +camera_pixel_size_um = {"IMX226": 1.85, "IMX250": 3.45, "IMX252": 3.45, "PYTHON300": 4.8} +objectives = {"2x": {"magnification": 2, "NA": 0.10, "tube_lens_f_mm": 180}, "4x": {"magnification": 4, "NA": 0.13, "tube_lens_f_mm": 180}, "10x": {"magnification": 10, "NA": 0.25, "tube_lens_f_mm": 180}, "10x (Mitutoyo)": {"magnification": 10, "NA": 0.25, "tube_lens_f_mm": 200}, "20x": {"magnification": 20, "NA": 0.4, "tube_lens_f_mm": 180}, "20x (Boli)": {"magnification": 20, "NA": 0.4, "tube_lens_f_mm": 180}, "20x (Nikon)": {"magnification": 20, "NA": 0.45, "tube_lens_f_mm": 200}, "40x": {"magnification": 40, "NA": 0.6, "tube_lens_f_mm": 180}} tube_lens_mm = 50 camera_sensor = IMX226 -_camera_sensor_options = [IMX226,IMX250,IMX252,PYTHON300] +_camera_sensor_options = [IMX226, IMX250, IMX252, PYTHON300] default_objective = 20x -_default_objective_options = [2x,4x,10x,10x (Mitutoyo), 20x (Boli), 20x (Nikon), 40x] +_default_objective_options = [2x, 4x, 10x, 10x (Mitutoyo), 20x (Boli), 20x (Nikon), 40x] do_fluorescence_rtp = False -_do_fluorescence_rtp_options = [True,False] +_do_fluorescence_rtp_options = [True, False] sort_during_multipoint = False -_sort_during_multipoint_options = [True,False] +_sort_during_multipoint_options = [True, False] default_z_pos_mm = 2.287 enable_tracking = False -_enable_tracking_options = [True,False] -trackers = ["csrt", "kcf", "mil", "tld", "medianflow","mosse","daSiamRPN"] +_enable_tracking_options = [True, False] +trackers = ["csrt", "kcf", "mil", "tld", "medianflow", "mosse", "daSiamRPN"] tracking_show_microscope_configurations = False -_tracking_show_microscope_configurations_options = [True,False] +_tracking_show_microscope_configurations_options = [True, False] -wellplate_format=384 -_wellplate_format_options=[384,96,24,12,6] -x_mm_384_wellplate_upperleft=12.41 -y_mm_384_wellplate_upperleft=11.18 +wellplate_format = 384 +_wellplate_format_options = [1536, 384, 96, 24, 12, 6] +x_mm_384_wellplate_upperleft = 12.41 +y_mm_384_wellplate_upperleft = 11.18 -wellplate_offset_x_mm=0 -wellplate_offset_y_mm=0 +wellplate_offset_x_mm = 0 +wellplate_offset_y_mm = 0 -focus_measure_operator=GLVA -controller_version=Teensy -support_laser_autofocus=True -focus_camera_type=Default -_focus_camera_type_options=[Default, FLIR, Toupcam] -_support_laser_autofocus_options=[True,False] -main_camera_model=MER2-1220-32U3M -focus_camera_model=MER2-630-60U3M -focus_camera_exposure_time_ms=0.8 +focus_measure_operator = GLVA +controller_version = Teensy +support_laser_autofocus = True +focus_camera_type = Default +_focus_camera_type_options = [Default, FLIR, Toupcam] +_support_laser_autofocus_options = [True, False] +main_camera_model = MER2-1220-32U3M +focus_camera_model = MER2-630-60U3M +focus_camera_exposure_time_ms = 0.8 -use_glass_top=True -_use_glass_top_options=[True,False] +use_glass_top = True +_use_glass_top_options = [True, False] -has_two_interfaces=True -_has_two_interfaces_options=[True,False] +has_two_interfaces = True +_has_two_interfaces_options = [True, False] -enable_flexible_multipoint=True -_enable_flexible_multipoint_options=[True,False] +enable_flexible_multipoint = True +_enable_flexible_multipoint_options = [True, False] -default_multipoint_nx=1 -default_multipoint_ny=1 -enable_spinning_disk_confocal=False -_enable_spinning_disk_confocal_options=[True,False] +default_multipoint_nx = 1 +default_multipoint_ny = 1 -inverted_objective=True -_inverted_objective_options=[True,False] +inverted_objective = True +_inverted_objective_options = [True, False] + +filter_controller_enable = False +_filter_controller_enable_options = [True, False] + +illumination_intensity_factor = 0.6 + +awb_ratios_r = 1.375 +awb_ratios_g = 1 +awb_ratios_b = 1.4141 [LIMIT_SWITCH_POLARITY] x_home = 1 @@ -148,6 +194,8 @@ offset_column_1_mm = 20 offset_row_a_mm = 20 [AF] +multipoint_reflection_autofocus_enable_by_default = True +multipoint_autofocus_enable_by_default = False stop_threshold = 0.85 crop_width = 800 crop_height = 800 @@ -177,10 +225,31 @@ x_positive = 112.5 x_negative = 10 y_positive = 76 y_negative = 6 -z_positive = 6 +z_positive = 7 +z_negative = 0.05 [SLIDE_POSITION] loading_x_mm = 0.5 loading_y_mm = 0.5 scanning_x_mm = 20 scanning_y_mm = 20 + +[OUTPUT_GAINS] +refdiv = False +_refdiv_options = [True, False] +channel0_gain = False +_channel0_gain_options = [True, False] +channel1_gain = False +_channel1_gain_options = [True, False] +channel2_gain = False +_channel2_gain_options = [True, False] +channel3_gain = False +_channel3_gain_options = [True, False] +channel4_gain = False +_channel4_gain_options = [True, False] +channel5_gain = False +_channel5_gain_options = [True, False] +channel6_gain = False +_channel6_gain_options = [True, False] +channel7_gain = True +_channel7_gain_options = [True, False] diff --git a/software/configurations/configuration_Squid+.ini b/software/configurations/configuration_Squid+.ini new file mode 100644 index 000000000..d63846ae8 --- /dev/null +++ b/software/configurations/configuration_Squid+.ini @@ -0,0 +1,252 @@ +[GENERAL] +camera_type = Toupcam +_camera_type_options = [Default, FLIR, Toupcam] +rotate_image_angle = None +flip_image = None +_flip_image_options = [Vertical, Horizontal, Both, None] +camera_reverse_x = False +_camera_reverse_x_options = [True, False] +camera_reverse_y = False +_camera_reverse_y_options = [True, False] +default_pixel_format = MONO16 +_default_pixel_format_options = [MONO8, MONO12, MONO14, MONO16, BAYER_RG8, BAYER_RG12] +controller_sn = None +support_scimicroscopy_led_array = False +scimicroscopy_led_array_sn = None +scimicroscopy_led_array_distance = 50 +scimicroscopy_led_array_default_na = 0.8 +scimicroscopy_led_array_default_color = [1, 1, 1] +scimicroscopy_led_array_turn_on_delay = 0.03 +enable_spinning_disk_confocal = False +use_ldi_serial_control = False +xlight_emission_filter_mapping = {405: 1, 470: 2, 555: 3, 640: 4, 730: 5} +xlight_serial_number = "" +xlight_sleep_time_for_wheel = 0.25 +xlight_validate_wheel_pos = False +use_napari_for_live_view = False +use_napari_for_multipoint = True +use_napari_for_tiled_display = False +is_hcs = True +stage_movement_sign_x = 1 +stage_movement_sign_y = 1 +stage_movement_sign_z = -1 +stage_movement_sign_theta = 1 +stage_pos_sign_x = 1 +stage_pos_sign_y = 1 +stage_pos_sign_z = -1 +stage_pos_sign_theta = 1 +tracking_movement_sign_x = 1 +tracking_movement_sign_y = 1 +tracking_movement_sign_z = 1 +tracking_movement_sign_theta = 1 +use_encoder_x = False +_use_encoder_x_options = [True, False] +use_encoder_y = False +_use_encoder_y_options = [True, False] +use_encoder_z = False +_use_encoder_z_options = [True, False] +use_encoder_theta = False +_use_encoder_theta_options = [True, False] +encoder_pos_sign_x = 1 +encoder_pos_sign_y = 1 +encoder_pos_sign_z = 1 +encoder_pos_sign_theta = 1 +encoder_step_size_x_mm = 100e-6 +encoder_step_size_y_mm = 100e-6 +encoder_step_size_z_mm = 100e-6 +encoder_step_size_theta = 1 +fullsteps_per_rev_x = 200 +fullsteps_per_rev_y = 200 +fullsteps_per_rev_z = 200 +fullsteps_per_rev_theta = 200 +screw_pitch_x_mm = 2.54 +screw_pitch_y_mm = 2.54 +screw_pitch_z_mm = 0.3 +microstepping_default_x = 16 +microstepping_default_y = 16 +microstepping_default_z = 16 +microstepping_default_theta = 256 +x_motor_rms_current_ma = 1000 +y_motor_rms_current_ma = 1000 +z_motor_rms_current_ma = 500 +x_motor_i_hold = 0.25 +y_motor_i_hold = 0.25 +z_motor_i_hold = 0.5 +max_velocity_x_mm = 30 +max_velocity_y_mm = 30 +max_velocity_z_mm = 3.8 +max_acceleration_x_mm = 500 +max_acceleration_y_mm = 500 +max_acceleration_z_mm = 100 +enable_pid_x = False +_enable_pid_x_options = [True, False] +enable_pid_y = False +_enable_pid_y_options = [True, False] +enable_pid_z = False +_enable_pid_z_options = [True, False] +has_encoder_x = False +_has_encoder_x_options = [True, False] +has_encoder_y = False +_has_encoder_y_options = [True, False] +has_encoder_z = False +_has_encoder_z_options = [True, False] +encoder_flip_dir_x = True +_encoder_flip_dir_x_options = [True, False] +encoder_flip_dir_y = True +_encoder_flip_dir_y_options = [True, False] +encoder_flip_dir_z = True +_encoder_flip_dir_z_options = [True, False] +encoder_resolution_um_x = 0.05 +encoder_resolution_um_y = 0.05 +encoder_resolution_um_z = 0.1 +scan_stabilization_time_ms_x = 25 +scan_stabilization_time_ms_y = 25 +scan_stabilization_time_ms_z = 20 +homing_enabled_x = True +_homing_enabled_x_options = [True, False] +homing_enabled_y = True +_homing_enabled_y_options = [True, False] +homing_enabled_z = True +_homing_enabled_z_options = [True, False] +sleep_time_s = 0.005 +led_matrix_r_factor = 1.0 +led_matrix_g_factor = 1.0 +led_matrix_b_factor = 1.0 +default_saving_path = /Downloads +multipoint_autofocus_channel = BF LED matrix full +multipoint_autofocus_enable_by_default = False +_multipoint_autofocus_enable_by_default_options = [True, False] +multipoint_bf_saving_option = Green Channel Only +run_custom_multipoint = False +default_display_crop = 85 +camera_pixel_size_um = {"IMX226": 1.85, "IMX250": 3.45, "IMX252": 3.45, "PYTHON300": 4.8, "IMX571": 7.52} +tube_lens_mm = 180 +camera_sensor = IMX571 +_camera_sensor_options = [IMX226, IMX250, IMX252, PYTHON300, IMX571] +default_objective = 20x +_default_objective_options = [2x, 4x, 10x, 10x (Mitutoyo), 20x, 20x (Boli), 20x (Nikon), 40x, 50x, 60x] +do_fluorescence_rtp = False +_do_fluorescence_rtp_options = [True, False] +sort_during_multipoint = False +_sort_during_multipoint_options = [True, False] +default_z_pos_mm = 2.287 +enable_tracking = False +_enable_tracking_options = [True, False] +trackers = ["csrt", "kcf", "mil", "tld", "medianflow", "mosse", "daSiamRPN"] +tracking_show_microscope_configurations = False +_tracking_show_microscope_configurations_options = [True, False] + +wellplate_format = 384 +_wellplate_format_options = [1536, 384, 96, 24, 12, 6] +x_mm_384_wellplate_upperleft = 12.41 +y_mm_384_wellplate_upperleft = 11.18 + +wellplate_offset_x_mm = 0 +wellplate_offset_y_mm = 0 + +focus_measure_operator = GLVA +controller_version = Teensy +support_laser_autofocus = True +focus_camera_type = Default +_focus_camera_type_options = [Default, FLIR, Toupcam] +_support_laser_autofocus_options = [True, False] +main_camera_model = MER2-1220-32U3M +focus_camera_model = MER2-630-60U3M +focus_camera_exposure_time_ms = 0.8 + +use_glass_top = True +_use_glass_top_options = [True, False] + +has_two_interfaces = True +_has_two_interfaces_options = [True, False] + +enable_flexible_multipoint = True +_enable_flexible_multipoint_options = [True, False] + +default_multipoint_nx = 1 +default_multipoint_ny = 1 + +inverted_objective = True +_inverted_objective_options = [True, False] + +filter_controller_enable = False +_filter_controller_enable_options = [True, False] + +illumination_intensity_factor = 0.6 + +awb_ratios_r = 1.375 +awb_ratios_g = 1 +awb_ratios_b = 1.4141 + +[LIMIT_SWITCH_POLARITY] +x_home = 1 +y_home = 1 +z_home = 0 + +[PLATE_READER] +number_of_rows = 8 +number_of_columns = 12 +row_spacing_mm = 9 +column_spacing_mm = 9 +offset_column_1_mm = 20 +offset_row_a_mm = 20 + +[AF] +stop_threshold = 0.85 +crop_width = 800 +crop_height = 800 + +[TRACKING] +search_area_ratio = 10 +cropped_img_ratio = 10 +bbox_scale_factor = 1.2 +default_tracker = csrt +init_methods = ["roi"] +default_init_method = roi +_default_init_method_options = [roi] +default_display_crop = 100 + +[ACQUISITION] +crop_width = 3000 +crop_height = 3000 +number_of_fovs_per_af = 3 +image_format = bmp +image_display_scaling_factor = 0.85 +dx = 0.9 +dy = 0.9 +dz = 1.5 + +[SOFTWARE_POS_LIMIT] +x_positive = 115 +x_negative = 5 +y_positive = 76 +y_negative = 4 +z_positive = 6 +z_negative = 0.05 + +[SLIDE_POSITION] +loading_x_mm = 0.5 +loading_y_mm = 0.5 +scanning_x_mm = 20 +scanning_y_mm = 20 + +[OUTPUT_GAINS] +refdiv = False +_refdiv_options = [True, False] +channel0_gain = False +_channel0_gain_options = [True, False] +channel1_gain = False +_channel1_gain_options = [True, False] +channel2_gain = False +_channel2_gain_options = [True, False] +channel3_gain = False +_channel3_gain_options = [True, False] +channel4_gain = False +_channel4_gain_options = [True, False] +channel5_gain = False +_channel5_gain_options = [True, False] +channel6_gain = False +_channel6_gain_options = [True, False] +channel7_gain = True +_channel7_gain_options = [True, False] + diff --git a/software/configurations/configuration_Squid+_H117_ORCA.ini b/software/configurations/configuration_Squid+_H117_ORCA.ini new file mode 100644 index 000000000..b136bbbcb --- /dev/null +++ b/software/configurations/configuration_Squid+_H117_ORCA.ini @@ -0,0 +1,264 @@ +[GENERAL] +# Prior stage +USE_PRIOR_STAGE = True +PRIOR_STAGE_SN = "AQ0427ZH" +# Piezo +ENABLE_OBJECTIVE_PIEZO = True +# the value of OBJECTIVE_PIEZO_CONTROL_VOLTAGE_RANGE is 2.5 or 5 +OBJECTIVE_PIEZO_CONTROL_VOLTAGE_RANGE = 5 +OBJECTIVE_PIEZO_RANGE_UM = 600 +OBJECTIVE_PIEZO_HOME_UM = 20 +OBJECTIVE_PIEZO_FLIP_DIR = True + +camera_type = Hamamatsu +_camera_type_options = [Default, FLIR, Toupcam, Hamamatsu] +rotate_image_angle = None +flip_image = None +_flip_image_options = [Vertical, Horizontal, Both, None] +camera_reverse_x = False +_camera_reverse_x_options = [True, False] +camera_reverse_y = False +_camera_reverse_y_options = [True, False] +default_pixel_format = MONO16 +_default_pixel_format_options = [MONO8, MONO16] +controller_sn = None +support_scimicroscopy_led_array = False +scimicroscopy_led_array_sn = None +scimicroscopy_led_array_distance = 50 +scimicroscopy_led_array_default_na = 0.8 +scimicroscopy_led_array_default_color = [1, 1, 1] +scimicroscopy_led_array_turn_on_delay = 0.03 +enable_spinning_disk_confocal = False +use_ldi_serial_control = False +xlight_emission_filter_mapping = {405: 1, 470: 2, 555: 3, 640: 4, 730: 5} +xlight_serial_number = "" +xlight_sleep_time_for_wheel = 0.25 +xlight_validate_wheel_pos = False +use_napari_for_live_view = False +use_napari_for_multipoint = True +use_napari_for_tiled_display = False +is_hcs = True +stage_movement_sign_x = 1 +stage_movement_sign_y = 1 +stage_movement_sign_z = -1 +stage_movement_sign_theta = 1 +stage_pos_sign_x = 1 +stage_pos_sign_y = 1 +stage_pos_sign_z = -1 +stage_pos_sign_theta = 1 +tracking_movement_sign_x = 1 +tracking_movement_sign_y = 1 +tracking_movement_sign_z = 1 +tracking_movement_sign_theta = 1 +use_encoder_x = False +_use_encoder_x_options = [True, False] +use_encoder_y = False +_use_encoder_y_options = [True, False] +use_encoder_z = False +_use_encoder_z_options = [True, False] +use_encoder_theta = False +_use_encoder_theta_options = [True, False] +encoder_pos_sign_x = 1 +encoder_pos_sign_y = 1 +encoder_pos_sign_z = 1 +encoder_pos_sign_theta = 1 +encoder_step_size_x_mm = 100e-6 +encoder_step_size_y_mm = 100e-6 +encoder_step_size_z_mm = 100e-6 +encoder_step_size_theta = 1 +fullsteps_per_rev_x = 200 +fullsteps_per_rev_y = 200 +fullsteps_per_rev_z = 200 +fullsteps_per_rev_theta = 200 +screw_pitch_x_mm = 2.54 +screw_pitch_y_mm = 2.54 +screw_pitch_z_mm = 0.3 +microstepping_default_x = 256 +microstepping_default_y = 256 +microstepping_default_z = 256 +microstepping_default_theta = 256 +x_motor_rms_current_ma = 1000 +y_motor_rms_current_ma = 1000 +z_motor_rms_current_ma = 500 +x_motor_i_hold = 0.25 +y_motor_i_hold = 0.25 +z_motor_i_hold = 0.5 +max_velocity_x_mm = 30 +max_velocity_y_mm = 30 +max_velocity_z_mm = 3.8 +max_acceleration_x_mm = 500 +max_acceleration_y_mm = 500 +max_acceleration_z_mm = 100 +enable_pid_x = False +_enable_pid_x_options = [True, False] +enable_pid_y = False +_enable_pid_y_options = [True, False] +enable_pid_z = False +_enable_pid_z_options = [True, False] +has_encoder_x = False +_has_encoder_x_options = [True, False] +has_encoder_y = False +_has_encoder_y_options = [True, False] +has_encoder_z = False +_has_encoder_z_options = [True, False] +encoder_flip_dir_x = True +_encoder_flip_dir_x_options = [True, False] +encoder_flip_dir_y = True +_encoder_flip_dir_y_options = [True, False] +encoder_flip_dir_z = True +_encoder_flip_dir_z_options = [True, False] +encoder_resolution_um_x = 0.05 +encoder_resolution_um_y = 0.05 +encoder_resolution_um_z = 0.1 +scan_stabilization_time_ms_x = 25 +scan_stabilization_time_ms_y = 25 +scan_stabilization_time_ms_z = 20 +homing_enabled_x = False +_homing_enabled_x_options = [True, False] +homing_enabled_y = False +_homing_enabled_y_options = [True, False] +homing_enabled_z = True +_homing_enabled_z_options = [True, False] +sleep_time_s = 0.005 +led_matrix_r_factor = 1.0 +led_matrix_g_factor = 1.0 +led_matrix_b_factor = 1.0 +default_saving_path = /Downloads +multipoint_autofocus_channel = BF LED matrix full +multipoint_autofocus_enable_by_default = True +_multipoint_autofocus_enable_by_default_options = [True, False] +multipoint_bf_saving_option = Green Channel Only +run_custom_multipoint = False +default_display_crop = 85 +camera_pixel_size_um = {"IMX226": 1.85, "IMX250": 3.45, "IMX252": 3.45, "PYTHON300": 4.8, "Hamamatsu": 6.5} +objectives = {"2x": {"magnification": 2, "NA": 0.10, "tube_lens_f_mm": 180}, "4x": {"magnification": 4, "NA": 0.13, "tube_lens_f_mm": 180}, "10x": {"magnification": 10, "NA": 0.25, "tube_lens_f_mm": 180}, "10x (Mitutoyo)": {"magnification": 10, "NA": 0.25, "tube_lens_f_mm": 200}, "20x (Boli)": {"magnification": 20, "NA": 0.4, "tube_lens_f_mm": 180}, "20x (Nikon)": {"magnification": 20, "NA": 0.45, "tube_lens_f_mm": 200}, "20x": {"magnification": 20, "NA": 0.4, "tube_lens_f_mm": 180}, "40x": {"magnification": 40, "NA": 0.6, "tube_lens_f_mm": 180}} +tube_lens_mm = 50 +camera_sensor = Hamamatsu +_camera_sensor_options = [IMX226, IMX250, IMX252, PYTHON300, Hamamatsu] +default_objective = 20x +_default_objective_options = [2x, 4x, 10x, 10x (Mitutoyo), 20x (Boli), 20x (Nikon), 40x] +do_fluorescence_rtp = False +_do_fluorescence_rtp_options = [True, False] +sort_during_multipoint = False +_sort_during_multipoint_options = [True, False] +default_z_pos_mm = 2.287 +enable_tracking = False +_enable_tracking_options = [True, False] +trackers = ["csrt", "kcf", "mil", "tld", "medianflow", "mosse", "daSiamRPN"] +tracking_show_microscope_configurations = False +_tracking_show_microscope_configurations_options = [True, False] + +wellplate_format = 384 +_wellplate_format_options = [1536, 384, 96, 24, 12, 6] +x_mm_384_wellplate_upperleft = 12.41 +y_mm_384_wellplate_upperleft = 11.18 + +wellplate_offset_x_mm = 0 +wellplate_offset_y_mm = 0 + +focus_measure_operator = GLVA +controller_version = Teensy +support_laser_autofocus = True +focus_camera_type = Default +_focus_camera_type_options = [Default, FLIR, Toupcam] +_support_laser_autofocus_options = [True, False] +main_camera_model = C15440-20UP +focus_camera_model = MER2-630-60U3M +focus_camera_exposure_time_ms = 0.8 + +use_glass_top = True +_use_glass_top_options = [True, False] + +has_two_interfaces = True +_has_two_interfaces_options = [True, False] + +enable_flexible_multipoint = True +_enable_flexible_multipoint_options = [True, False] + +default_multipoint_nx = 1 +default_multipoint_ny = 1 + +inverted_objective = True +_inverted_objective_options = [True, False] + +filter_controller_enable = False +_filter_controller_enable_options = [True, False] + +illumination_intensity_factor = 0.6 + +awb_ratios_r = 1.375 +awb_ratios_g = 1 +awb_ratios_b = 1.4141 + +[LIMIT_SWITCH_POLARITY] +x_home = 1 +y_home = 1 +z_home = 0 + +[PLATE_READER] +number_of_rows = 8 +number_of_columns = 12 +row_spacing_mm = 9 +column_spacing_mm = 9 +offset_column_1_mm = 20 +offset_row_a_mm = 20 + +[AF] +stop_threshold = 0.85 +crop_width = 800 +crop_height = 800 + +[TRACKING] +search_area_ratio = 10 +cropped_img_ratio = 10 +bbox_scale_factor = 1.2 +default_tracker = csrt +init_methods = ["roi"] +default_init_method = roi +_default_init_method_options = [roi] +default_display_crop = 100 + +[ACQUISITION] +crop_width = 3000 +crop_height = 3000 +number_of_fovs_per_af = 3 +image_format = bmp +image_display_scaling_factor = 0.85 +dx = 0.9 +dy = 0.9 +dz = 1.5 + +[SOFTWARE_POS_LIMIT] +x_positive = 112.5 +x_negative = 10 +y_positive = 76 +y_negative = 6 +z_positive = 6 +z_negative = 0.05 + +[SLIDE_POSITION] +loading_x_mm = 0.5 +loading_y_mm = 0.5 +scanning_x_mm = 20 +scanning_y_mm = 20 + +[OUTPUT_GAINS] +refdiv = False +_refdiv_options = [True, False] +channel0_gain = False +_channel0_gain_options = [True, False] +channel1_gain = False +_channel1_gain_options = [True, False] +channel2_gain = False +_channel2_gain_options = [True, False] +channel3_gain = False +_channel3_gain_options = [True, False] +channel4_gain = False +_channel4_gain_options = [True, False] +channel5_gain = False +_channel5_gain_options = [True, False] +channel6_gain = False +_channel6_gain_options = [True, False] +channel7_gain = True +_channel7_gain_options = [True, False] + diff --git a/software/configurations/configuration_octopi_v2.ini b/software/configurations/configuration_octopi_v2.ini index e6cb2a821..343e426cc 100644 --- a/software/configurations/configuration_octopi_v2.ini +++ b/software/configurations/configuration_octopi_v2.ini @@ -1,11 +1,17 @@ [GENERAL] rotate_image_angle = None -flip_image =Vertical +flip_image = None _flip_image_options=[Vertical, Horizontal, Both] camera_reverse_x = False -_camera_reverse_x_options = [True,False] +_camera_reverse_x_options = [True, False] camera_reverse_y = False _camera_reverse_y_options = [True, False] +use_napari_for_live_view = False +use_napari_for_multipoint = False +use_napari_for_tiled_display = False +is_hcs = False +enable_stitcher = False +do_fluorescence_rtp = False stage_movement_sign_x = -1 stage_movement_sign_y = 1 stage_movement_sign_z = -1 @@ -67,15 +73,18 @@ _homing_enabled_y_options=[True,False] homing_enabled_z = True _homing_enabled_z_options=[True,False] sleep_time_s = 0.005 -led_matrix_r_factor = 1.8 +led_matrix_r_factor = 1.0 led_matrix_g_factor = 0.8 led_matrix_b_factor = 0.7 -default_saving_path = /Desktop/Data -multipoint_autofocus_channel = BF LED matrix left half +default_saving_path = /Downloads +multipoint_autofocus_channel = BF LED matrix full multipoint_autofocus_enable_by_default = True -multipoint_bf_saving_option = Green Channel Only +multipoint_bf_saving_option = RGB2GRAY +multipoint_use_piezo_for_zstacks = False +z_stacking_config = FROM CENTER + run_custom_multipoint = False -default_display_crop = 60 +default_display_crop = 90 camera_pixel_size_um = {"IMX226":1.85,"IMX250":3.45,"IMX252":3.45,"PYTHON300":4.8} objectives = {"2x":{"magnification":2, "NA":0.10, "tube_lens_f_mm":180},"4x":{"magnification":4, "NA":0.13, "tube_lens_f_mm":180}, "10x":{"magnification":10, "NA":0.25, "tube_lens_f_mm":180}, "10x (Mitutoyo)":{"magnification":10, "NA":0.25, "tube_lens_f_mm":200}, "20x (Boli)":{"magnification":20, "NA":0.4, "tube_lens_f_mm":180}, "20x (Nikon)":{"magnification":20, "NA":0.45, "tube_lens_f_mm":200}, "40x":{"magnification":40, "NA":0.6, "tube_lens_f_mm":180}} tube_lens_mm = 50 @@ -85,25 +94,28 @@ default_objective = 10x (Mitutoyo) _default_objective_options=[2x,4x,10x,10x (Mitutoyo), 20x (Boli), 20x (Nikon), 40x] controller_version = Teensy -sort_during_multipoint=False -_sort_during_multipoint_options=[True,False] +sort_during_multipoint = True +_sort_during_multipoint_options = [True,False] -disp_th_during_multipoint=0.9 +classification_th = 0.3 +disp_th_during_multipoint = 0.3 -classification_model_path=/home/cephla/Documents/tmp/model_perf_r34_b32.pt -segmentation_model_path=models/m2unet_model_flat_erode1_wdecay5_smallbatch/model_4000_11.pth -classification_test_mode=False -_classification_test_mode_options=[True,False] +two_classification_models = True +classification_model_path = models/resnet18_en/version1/best.pt +classification_model_path2 = models/resnet18_en/version2/best.pt +segmentation_model_path = models/m2unet_model_flat_erode1_wdecay5_smallbatch/model_4000_11.pth +classification_test_mode = True +_classification_test_mode_options = [True,False] -segmentation_crop=1500 +segmentation_crop = 1500 -default_z_pos_mm =6.5 +default_z_pos_mm = 6.5 -use_trt_segmentation=False -_use_trt_segmentation_options=[True,False] +use_trt_segmentation = False +_use_trt_segmentation_options = [True,False] enable_tracking = False -_enable_tracking_options=[True,False] +_enable_tracking_options = [True,False] [LIMIT_SWITCH_POLARITY] x_home = 1 @@ -132,6 +144,7 @@ init_methods = ["roi"] default_init_method = roi _default_init_method_options = [roi] trackers = ["csrt", "kcf", "mil", "tld", "medianflow","mosse","daSiamRPN"] +enable_tracking = False tracking_show_microscope_configurations = False _tracking_show_microscope_configurations_options = [True,False] default_display_crop = 100 @@ -141,7 +154,7 @@ crop_width = 3000 crop_height = 3000 number_of_fovs_per_af = 3 image_format = bmp -image_display_scaling_factor = 0.3 +image_display_scaling_factor = 1.0 dx = 0.9 dy = 0.9 dz = 1.5 diff --git a/software/control/NL5.py b/software/control/NL5.py new file mode 100644 index 000000000..bacd6471b --- /dev/null +++ b/software/control/NL5.py @@ -0,0 +1,171 @@ +import control.RCM_API as RCM_API +import json + +class NL5: + + def __init__(self): + + self.rcm = RCM_API.RCM_API() + self.rcm.initialize_device(simulated=False) + self.load_settings() + + def set_scan_amplitude(self,amplitude): + self.scan_amplitude = amplitude + self.rcm.set_float_parameter(self.rcm.AMPLITUDE_X,amplitude) + + def set_offset_x(self,offset_x): + self.offset_x = offset_x + self.rcm.set_float_parameter(self.rcm.OFFSET_SCAN_X,offset_x) + + def start_acquisition(self): + ret = self.rcm.start_acquisition() + + def start_continuous_acquisition(self): + self.rcm.start_acquisition() + + def stop_continuous_acquisition(self): + self.rcm.stop_continuous_acquisition() + + def set_bypass(self, enabled): + if enabled: + self.rcm.set_bypass(1) + else: + self.rcm.set_bypass(0) + + def set_active_channel(self, channel): + self.active_channel = channel + for i in range(1, 5): + self.rcm.set_integer_parameter(getattr(self.rcm, f'LASER_{i}_SELECTED'), 1 if i == channel else 0) + + def set_laser_power(self,channel,power): + self.rcm.set_integer_parameter(getattr(self.rcm,f'LASER_{channel}_POWER'),power) + + def set_bypass_offset(self, offset): + self.bypass_offset = offset + self.rcm.set_float_parameter(self.rcm.BYPASS_OFFSET,offset) + + def set_line_speed(self,speed,save_setting=False): + self.line_speed = speed + self.rcm.set_integer_parameter(self.rcm.LINE_FREQUENCY,speed) # speed in mrad/s + if save_setting: + self.save_settings() + + def set_fov_x(self,fov_x): + self.fov_x = fov_x + self.rcm.set_integer_parameter(self.rcm.FIELD_OF_VIEW_X,fov_x) + self.save_settings() + + def set_exposure_delay(self,exposure_delay_ms): + self.exposure_delay_ms = exposure_delay_ms + self.rcm.set_integer_parameter(self.rcm.EXPOSURE_DELAY,exposure_delay_ms) + + def load_settings(self): + try: + with open('NL5_settings.json', 'r') as file: + settings = json.load(file) + self.scan_amplitude = settings.get("scan_amplitude", 70.0) + self.offset_x = settings.get("offset_x", 0.0) + self.bypass_offset = settings.get("bypass_offset", 0.0) + self.fov_x = settings.get("fov_x", 2048) + self.exposure_delay_ms = settings.get("exposure_delay_ms", 30) + self.line_speed = settings.get("line_speed", 3000) + + except FileNotFoundError: + self.scan_amplitude = 70.0 + self.offset_x = 0.0 + self.bypass_offset = 0.0 + self.exposure_delay_ms = 30 + self.line_speed = 3000 + self.fov_x = 2048 + + def save_settings(self): + settings = { + "scan_amplitude": self.scan_amplitude, + "offset_x": self.offset_x, + "bypass_offset": self.bypass_offset, + "fov_x": self.fov_x, + "exposure_delay_ms": self.exposure_delay_ms, + "line_speed": self.line_speed + } + with open('NL5_settings.json', 'w') as file: + json.dump(settings, file) + + +class NL5_Simulation: + + def __init__(self): + self.load_settings() + + def set_scan_amplitude(self,amplitude): + self.scan_amplitude = amplitude + pass + + def set_offset_x(self,offset_x): + self.offset_x = offset_x + pass + + def start_acquisition(self): + pass + + def start_continuous_acquisition(self): + pass + + def stop_continuous_acquisition(self): + pass + + def set_bypass(self, enabled): + pass + + def set_active_channel(self, channel): + pass + + def set_laser_power(self,channel,power): + pass + + def set_bypass_offset(self, offset): + self.bypass_offset = offset + pass + + def set_line_speed(self,speed, save_setting = False): + self.line_speed = speed + if save_setting: + self.save_settings() + + def set_fov_x(self,fov_x): + self.fov_x = fov_x + self.save_settings() + + def set_exposure_delay(self,exposure_delay_ms): + self.exposure_delay_ms = exposure_delay_ms + pass + + def load_settings(self): + try: + with open('NL5_settings.json', 'r') as file: + settings = json.load(file) + self.scan_amplitude = settings.get("scan_amplitude", 70.0) + self.offset_x = settings.get("offset_x", 0.0) + self.bypass_offset = settings.get("bypass_offset", 0.0) + self.fov_x = settings.get("fov_x", 2048) + self.exposure_delay_ms = settings.get("exposure_delay_ms", 30) + self.line_speed = settings.get("line_speed", 3000) + + except FileNotFoundError: + self.scan_amplitude = 70.0 + self.offset_x = 0.0 + self.bypass_offset = 0.0 + self.exposure_delay_ms = 30 + self.line_speed = 3000 + self.fov_x = 2048 + + def save_settings(self): + settings = { + "scan_amplitude": self.scan_amplitude, + "offset_x": self.offset_x, + "bypass_offset": self.bypass_offset, + "fov_x": self.fov_x, + "exposure_delay_ms": self.exposure_delay_ms, + "line_speed": self.line_speed + } + with open('NL5_settings.json', 'w') as file: + json.dump(settings, file) \ No newline at end of file diff --git a/software/control/NL5Widget.py b/software/control/NL5Widget.py new file mode 100644 index 000000000..996269282 --- /dev/null +++ b/software/control/NL5Widget.py @@ -0,0 +1,137 @@ +import sys +from PyQt5.QtWidgets import QApplication, QWidget, QGridLayout, QHBoxLayout, QVBoxLayout, QLabel, QDoubleSpinBox, QSpinBox, QPushButton, QCheckBox, QDialog, QDialogButtonBox +from PyQt5.QtCore import Qt + + +class NL5SettingsDialog(QDialog): + def __init__(self, nl5): + super().__init__() + self.nl5 = nl5 + self.setWindowTitle("NL5 Settings") + + layout = QGridLayout() + + # Scan amplitude control + layout.addWidget(QLabel("Scan Amplitude"), 0, 0) + self.scan_amplitude_input = QDoubleSpinBox() + self.scan_amplitude_input.setValue(self.nl5.scan_amplitude) + layout.addWidget(self.scan_amplitude_input, 0, 1) + + # Offset X control + layout.addWidget(QLabel("Offset X"), 1, 0) + self.offset_x_input = QDoubleSpinBox() + self.offset_x_input.setMinimum(-30) + self.offset_x_input.setValue(self.nl5.offset_x) + layout.addWidget(self.offset_x_input, 1, 1) + + # Bypass offset control + layout.addWidget(QLabel("Bypass Offset"), 2, 0) + self.bypass_offset_input = QDoubleSpinBox() + self.bypass_offset_input.setMinimum(-30) + self.bypass_offset_input.setValue(self.nl5.bypass_offset) + layout.addWidget(self.bypass_offset_input, 2, 1) + + # Dialog buttons + buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) + buttons.accepted.connect(self.accept) + buttons.rejected.connect(self.reject) + layout.addWidget(buttons, 3, 0, 1, 2) + + self.setLayout(layout) + + def accept(self): + self.nl5.set_scan_amplitude(self.scan_amplitude_input.value()) + self.nl5.set_offset_x(self.offset_x_input.value()) + self.nl5.set_bypass_offset(self.bypass_offset_input.value()) + self.nl5.save_settings() + super().accept() + + +class NL5Widget(QWidget): + def __init__(self, nl5): + super().__init__() + + self.nl5 = nl5 + + # Create layout + layout1 = QHBoxLayout() + layout2 = QHBoxLayout() + + # Exposure delay control + layout1.addWidget(QLabel("Exposure Delay")) + self.exposure_delay_input = QSpinBox() + self.exposure_delay_input.setValue(self.nl5.exposure_delay_ms) + self.exposure_delay_input.setSuffix(' ms') + self.exposure_delay_input.valueChanged.connect(self.update_exposure_delay) + layout1.addWidget(self.exposure_delay_input) + layout1.addStretch() + + # Line speed control + layout1.addWidget(QLabel("Line Speed")) + self.line_speed_input = QSpinBox() + self.line_speed_input.setMaximum(20000) + self.line_speed_input.setValue(self.nl5.line_speed) + self.line_speed_input.setSuffix(' mrad/s') + self.line_speed_input.valueChanged.connect(self.update_line_speed) + layout1.addWidget(self.line_speed_input) + layout1.addStretch() + + + # FOV X control + layout1.addWidget(QLabel("FOV")) + # layout1.addWidget(QLabel("FOV X")) + self.fov_x_input = QSpinBox() + self.fov_x_input.setMaximum(4000) + self.fov_x_input.setValue(self.nl5.fov_x) + self.fov_x_input.setSuffix(' px') + self.fov_x_input.valueChanged.connect(self.update_fov_x) + layout1.addWidget(self.fov_x_input) + + # Bypass control + self.bypass_button = QPushButton("Enable Bypass") + self.bypass_button.setCheckable(True) + self.bypass_button.toggled.connect(self.update_bypass) + layout2.addWidget(self.bypass_button) + + # Start acquisition button + self.start_acquisition_button = QPushButton("Start Acquisition") + self.start_acquisition_button.clicked.connect(self.nl5.start_acquisition) + # layout2.addWidget(self.start_acquisition_button, 3, 0, 1, 4) + + # NL5 Settings button + self.settings_button = QPushButton("Edit NL5 Settings") + self.settings_button.clicked.connect(self.show_settings_dialog) + layout2.addWidget(self.settings_button) + + layout = QVBoxLayout() + layout.addLayout(layout1) + layout.addLayout(layout2) + self.setLayout(layout) + + def show_settings_dialog(self): + dialog = NL5SettingsDialog(self.nl5) + dialog.exec_() + + def update_bypass(self, checked): + self.nl5.set_bypass(checked) + self.start_acquisition_button.setEnabled(not checked) + + def update_exposure_delay(self, value): + self.nl5.set_exposure_delay(value) + + def update_line_speed(self, value): + self.nl5.set_line_speed(value) + + def update_fov_x(self, value): + self.nl5.set_fov_x(value) + + +if __name__ == "__main__": + app = QApplication(sys.argv) + + import NL5 + nl5 = NL5.NL5() + widget = NL5Widget(nl5) + widget.show() + + sys.exit(app.exec_()) \ No newline at end of file diff --git a/software/control/RCM_API.py b/software/control/RCM_API.py new file mode 100644 index 000000000..7fc77e443 --- /dev/null +++ b/software/control/RCM_API.py @@ -0,0 +1,119 @@ +import ctypes +from ctypes import c_int, c_char_p, create_string_buffer +from pycparser import c_parser, c_ast, parse_file +import re +import sys + +C_TO_CTYPES = { + 'void': None, + 'int': ctypes.c_int, + 'char': ctypes.c_char, + 'char*': ctypes.c_char_p, + 'bool': ctypes.c_bool, + 'double': ctypes.c_double, + # Add more mappings as needed +} + +def extract_macros_from_header(file_path): + macros = {} + with open(file_path, 'r') as file: + for line in file: + # Ignore lines containing keywords like "_declspec" + if "_declspec" in line or "_BUILD_DLL_" in line: + continue + match = re.match(r'#define\s+(\w+)\s+(\d+)', line) + if match: + name, value = match.groups() + macros[name] = int(value) + return macros + + +def extract_functions_from_header(file_path): + functions = [] + with open(file_path, 'r') as file: + content = file.read() + # Match function prototypes + matches = re.findall(r'\b(\w[\w\s\*]+)\s+(\w+)\s*\(([^)]*)\)\s*;', content) + for ret_type, func_name, params in matches: + param_list = [] + if params.strip(): # Check if there are parameters + for param in params.split(','): + param = param.strip() + param_type = ' '.join(param.split()[:-1]) + param_list.append(C_TO_CTYPES.get(param_type.strip(), ctypes.c_void_p)) + functions.append({ + 'name': func_name, + # 'return_type': C_TO_CTYPES.get(ret_type.strip(), ctypes.c_void_p), + 'return_type': c_int, + 'arg_types': param_list + }) + return functions + + +class RCM_API: + def __init__(self): + + # Load the header + macros = extract_macros_from_header('.\\RCM_API.h') + functions = extract_functions_from_header('.\\RCM_API.h') + + # Load the DLL + self.rcm_api = ctypes.CDLL('.\\RCM_API.dll') + + # Set constants from macros + for name, value in macros.items(): + setattr(self, name, int(value)) + # print(name + ' ' + str(value)) + self.ERR_OK = -1 + + # Dynamically define functions from the header file + for func in functions: + # print(func) + func_name = func['name'] + ret_type = func['return_type'] + arg_types = func['arg_types'] + function = getattr(self.rcm_api, func_name) + function.restype = ret_type + function.argtypes = arg_types + setattr(self, func_name, function) + + def get_string_parameter(self, param: int): + buffer = create_string_buffer(100) + result = self.getStringParameter(param, buffer) + if result == self.ERR_OK: + return buffer.value.decode() + else: + return None + + def set_integer_parameter(self, param: int, value: int): + return self.setIntegerParameter(param, value) + + def set_float_parameter(self, param: int, value: float): + return self.setFloatParameter(param, value) + + def initialize_device(self, simulated: bool): + return self.initializeDevice(simulated) + + def get_device_type(self): + return self.getDeviceType() + + def start_acquisition(self): + return self.startAcquisition() + + def set_bypass(self, mode: int): + return self.setBypass(mode) + + def start_continuous_acquisition(self): + return self.startContinuousAcquisition() + + def stop_continuous_acquisition(self): + return self.stopContinuousAcquisition() + + def get_full_error(self): + err_code = c_int() + buffer = create_string_buffer(100) + result = self.getFullError(ctypes.byref(err_code), buffer) + if result == self.ERR_OK: + return err_code.value, buffer.value.decode() + else: + return None \ No newline at end of file diff --git a/software/control/TUCam.py b/software/control/TUCam.py new file mode 100755 index 000000000..dc395cc77 --- /dev/null +++ b/software/control/TUCam.py @@ -0,0 +1,928 @@ +#!/usr/bin/env python +# coding: utf-8 +''' +Created on 2023-12-12 +@author:fdy +''' + +import ctypes +from ctypes import * +from enum import Enum +import time + +#加载SDK动态库 +# 32bit +#TUSDKdll = OleDLL("./lib/x86/TUCam.dll") +# 64bit +TUSDKdll = cdll.LoadLibrary("libTUCam.so.1") + +# class typedef enum TUCAM status: +class TUCAMRET(Enum): + TUCAMRET_SUCCESS = 0x00000001 + TUCAMRET_FAILURE = 0x80000000 + + # initialization error + TUCAMRET_NO_MEMORY = 0x80000101 + TUCAMRET_NO_RESOURCE = 0x80000102 + TUCAMRET_NO_MODULE = 0x80000103 + TUCAMRET_NO_DRIVER = 0x80000104 + TUCAMRET_NO_CAMERA = 0x80000105 + TUCAMRET_NO_GRABBER = 0x80000106 + TUCAMRET_NO_PROPERTY = 0x80000107 + + TUCAMRET_FAILOPEN_CAMERA = 0x80000110 + TUCAMRET_FAILOPEN_BULKIN = 0x80000111 + TUCAMRET_FAILOPEN_BULKOUT = 0x80000112 + TUCAMRET_FAILOPEN_CONTROL = 0x80000113 + TUCAMRET_FAILCLOSE_CAMERA = 0x80000114 + + TUCAMRET_FAILOPEN_FILE = 0x80000115 + TUCAMRET_FAILOPEN_CODEC = 0x80000116 + TUCAMRET_FAILOPEN_CONTEXT = 0x80000117 + + # status error + TUCAMRET_INIT = 0x80000201 + TUCAMRET_BUSY = 0x80000202 + TUCAMRET_NOT_INIT = 0x80000203 + TUCAMRET_EXCLUDED = 0x80000204 + TUCAMRET_NOT_BUSY = 0x80000205 + TUCAMRET_NOT_READY = 0x80000206 + # wait error + TUCAMRET_ABORT = 0x80000207 + TUCAMRET_TIMEOUT = 0x80000208 + TUCAMRET_LOSTFRAME = 0x80000209 + TUCAMRET_MISSFRAME = 0x8000020A + TUCAMRET_USB_STATUS_ERROR = 0x8000020B + + # calling error + TUCAMRET_INVALID_CAMERA = 0x80000301 + TUCAMRET_INVALID_HANDLE = 0x80000302 + TUCAMRET_INVALID_OPTION = 0x80000303 + TUCAMRET_INVALID_IDPROP = 0x80000304 + TUCAMRET_INVALID_IDCAPA = 0x80000305 + TUCAMRET_INVALID_IDPARAM = 0x80000306 + TUCAMRET_INVALID_PARAM = 0x80000307 + TUCAMRET_INVALID_FRAMEIDX = 0x80000308 + TUCAMRET_INVALID_VALUE = 0x80000309 + TUCAMRET_INVALID_EQUAL = 0x8000030A + TUCAMRET_INVALID_CHANNEL = 0x8000030B + TUCAMRET_INVALID_SUBARRAY = 0x8000030C + TUCAMRET_INVALID_VIEW = 0x8000030D + TUCAMRET_INVALID_PATH = 0x8000030E + TUCAMRET_INVALID_IDVPROP = 0x8000030F + + TUCAMRET_NO_VALUETEXT = 0x80000310 + TUCAMRET_OUT_OF_RANGE = 0x80000311 + + TUCAMRET_NOT_SUPPORT = 0x80000312 + TUCAMRET_NOT_WRITABLE = 0x80000313 + TUCAMRET_NOT_READABLE = 0x80000314 + + TUCAMRET_WRONG_HANDSHAKE = 0x80000410 + TUCAMRET_NEWAPI_REQUIRED = 0x80000411 + + TUCAMRET_ACCESSDENY = 0x80000412 + + TUCAMRET_NO_CORRECTIONDATA = 0x80000501 + + TUCAMRET_INVALID_PRFSETS = 0x80000601 + TUCAMRET_INVALID_IDPPROP = 0x80000602 + + TUCAMRET_DECODE_FAILURE = 0x80000701 + TUCAMRET_COPYDATA_FAILURE = 0x80000702 + TUCAMRET_ENCODE_FAILURE = 0x80000703 + TUCAMRET_WRITE_FAILURE = 0x80000704 + + # camera or bus trouble + TUCAMRET_FAIL_READ_CAMERA = 0x83001001 + TUCAMRET_FAIL_WRITE_CAMERA = 0x83001002 + TUCAMRET_OPTICS_UNPLUGGED = 0x83001003 + + TUCAMRET_RECEIVE_FINISH = 0x00000002 + TUCAMRET_EXTERNAL_TRIGGER = 0x00000003 + +# typedef enum information id +class TUCAM_IDINFO(Enum): + TUIDI_BUS = 0x01 + TUIDI_VENDOR = 0x02 + TUIDI_PRODUCT = 0x03 + TUIDI_VERSION_API = 0x04 + TUIDI_VERSION_FRMW = 0x05 + TUIDI_VERSION_FPGA = 0x06 + TUIDI_VERSION_DRIVER = 0x07 + TUIDI_TRANSFER_RATE = 0x08 + TUIDI_CAMERA_MODEL = 0x09 + TUIDI_CURRENT_WIDTH = 0x0A + TUIDI_CURRENT_HEIGHT = 0x0B + TUIDI_CAMERA_CHANNELS = 0x0C + TUIDI_BCDDEVICE = 0x0D + TUIDI_TEMPALARMFLAG = 0x0E + TUIDI_UTCTIME = 0x0F + TUIDI_LONGITUDE_LATITUDE = 0x10 + TUIDI_WORKING_TIME = 0x11 + TUIDI_FAN_SPEED = 0x12 + TUIDI_FPGA_TEMPERATURE = 0x13 + TUIDI_PCBA_TEMPERATURE = 0x14 + TUIDI_ENV_TEMPERATURE = 0x15 + TUIDI_DEVICE_ADDRESS = 0x16 + TUIDI_USB_PORT_ID = 0x17 + TUIDI_CONNECTSTATUS = 0x18 + TUIDI_TOTALBUFFRAMES = 0x19 + TUIDI_CURRENTBUFFRAMES = 0x1A + TUIDI_HDRRATIO = 0x1B + TUIDI_HDRKHVALUE = 0x1C + TUIDI_ZEROTEMPERATURE_VALUE = 0x1D + TUIDI_ENDINFO = 0x1F + +# typedef enum capability id +class TUCAM_IDCAPA(Enum): + TUIDC_RESOLUTION = 0x00 + TUIDC_PIXELCLOCK = 0x01 + TUIDC_BITOFDEPTH = 0x02 + TUIDC_ATEXPOSURE = 0x03 + TUIDC_HORIZONTAL = 0x04 + TUIDC_VERTICAL = 0x05 + TUIDC_ATWBALANCE = 0x06 + TUIDC_FAN_GEAR = 0x07 + TUIDC_ATLEVELS = 0x08 + TUIDC_SHIFT = 0x09 + TUIDC_HISTC = 0x0A + TUIDC_CHANNELS = 0x0B + TUIDC_ENHANCE = 0x0C + TUIDC_DFTCORRECTION = 0x0D + TUIDC_ENABLEDENOISE = 0x0E + TUIDC_FLTCORRECTION = 0x0F + TUIDC_RESTARTLONGTM = 0x10 + TUIDC_DATAFORMAT = 0x11 + TUIDC_DRCORRECTION = 0x12 + TUIDC_VERCORRECTION = 0x13 + TUIDC_MONOCHROME = 0x14 + TUIDC_BLACKBALANCE = 0x15 + TUIDC_IMGMODESELECT = 0x16 + TUIDC_CAM_MULTIPLE = 0x17 + TUIDC_ENABLEPOWEEFREQUENCY = 0x18 + TUIDC_ROTATE_R90 = 0x19 + TUIDC_ROTATE_L90 = 0x1A + TUIDC_NEGATIVE = 0x1B + TUIDC_HDR = 0x1C + TUIDC_ENABLEIMGPRO = 0x1D + TUIDC_ENABLELED = 0x1E + TUIDC_ENABLETIMESTAMP = 0x1F + TUIDC_ENABLEBLACKLEVEL = 0x20 + TUIDC_ATFOCUS = 0x21 + TUIDC_ATFOCUS_STATUS = 0x22 + TUIDC_PGAGAIN = 0x23 + TUIDC_ATEXPOSURE_MODE = 0x24 + TUIDC_BINNING_SUM = 0x25 + TUIDC_BINNING_AVG = 0x26 + TUIDC_FOCUS_C_MOUNT = 0x27 + TUIDC_ENABLEPI = 0x28 + TUIDC_ATEXPOSURE_STATUS = 0x29 + TUIDC_ATWBALANCE_STATUS = 0x2A + TUIDC_TESTIMGMODE = 0x2B + TUIDC_SENSORRESET = 0x2C + TUIDC_PGAHIGH = 0x2D + TUIDC_PGALOW = 0x2E + TUIDC_PIXCLK1_EN = 0x2F + TUIDC_PIXCLK2_EN = 0x30 + TUIDC_ATLEVELGEAR = 0x31 + TUIDC_ENABLEDSNU = 0x32 + TUIDC_ENABLEOVERLAP = 0x33 + TUIDC_CAMSTATE = 0x34 + TUIDC_ENABLETRIOUT = 0x35 + TUIDC_ROLLINGSCANMODE = 0x36 + TUIDC_ROLLINGSCANLTD = 0x37 + TUIDC_ROLLINGSCANSLIT = 0x38 + TUIDC_ROLLINGSCANDIR = 0x39 + TUIDC_ROLLINGSCANRESET = 0x3A + TUIDC_ENABLETEC = 0x3B + TUIDC_ENABLEBLC = 0x3C + TUIDC_ENABLETHROUGHFOG = 0x3D + TUIDC_ENABLEGAMMA = 0x3E + TUIDC_ENABLEFILTER = 0x3F + TUIDC_ENABLEHLC = 0x40 + TUIDC_CAMPARASAVE = 0x41 + TUIDC_CAMPARALOAD = 0x42 + TUIDC_ENABLEISP = 0x43 + TUIDC_BUFFERHEIGHT = 0x44 + TUIDC_VISIBILITY = 0x45 + TUIDC_SHUTTER = 0x46 + TUIDC_SIGNALFILTER = 0x47 + TUIDC_ENDCAPABILITY = 0x48 + + +# typedef enum property id +class TUCAM_IDPROP(Enum): + TUIDP_GLOBALGAIN = 0x00 + TUIDP_EXPOSURETM = 0x01 + TUIDP_BRIGHTNESS = 0x02 + TUIDP_BLACKLEVEL = 0x03 + TUIDP_TEMPERATURE = 0x04 + TUIDP_SHARPNESS = 0x05 + TUIDP_NOISELEVEL = 0x06 + TUIDP_HDR_KVALUE = 0x07 + + # image process property + TUIDP_GAMMA = 0x08 + TUIDP_CONTRAST = 0x09 + TUIDP_LFTLEVELS = 0x0A + TUIDP_RGTLEVELS = 0x0B + TUIDP_CHNLGAIN = 0x0C + TUIDP_SATURATION = 0x0D + TUIDP_CLRTEMPERATURE = 0x0E + TUIDP_CLRMATRIX = 0x0F + TUIDP_DPCLEVEL = 0x10 + TUIDP_BLACKLEVELHG = 0x11 + TUIDP_BLACKLEVELLG = 0x12 + TUIDP_POWEEFREQUENCY = 0x13 + TUIDP_HUE = 0x14 + TUIDP_LIGHT = 0x15 + TUIDP_ENHANCE_STRENGTH = 0x16 + TUIDP_NOISELEVEL_3D = 0x17 + TUIDP_FOCUS_POSITION = 0x18 + + TUIDP_FRAME_RATE = 0x19 + TUIDP_START_TIME = 0x1A + TUIDP_FRAME_NUMBER = 0x1B + TUIDP_INTERVAL_TIME = 0x1C + TUIDP_GPS_APPLY = 0x1D + TUIDP_AMB_TEMPERATURE = 0x1E + TUIDP_AMB_HUMIDITY = 0x1F + TUIDP_AUTO_CTRLTEMP = 0x20 + + TUIDP_AVERAGEGRAY = 0x21 + TUIDP_AVERAGEGRAYTHD = 0x22 + TUIDP_ENHANCETHD = 0x23 + TUIDP_ENHANCEPARA = 0x24 + TUIDP_EXPOSUREMAX = 0x25 + TUIDP_EXPOSUREMIN = 0x26 + TUIDP_GAINMAX = 0x27 + TUIDP_GAINMIN = 0x28 + TUIDP_THROUGHFOGPARA = 0x29 + TUIDP_ATLEVEL_PERCENTAGE = 0x2A + TUIDP_TEMPERATURE_TARGET = 0x2B + + TUIDP_PIXELRATIO = 0x2C + + TUIDP_ENDPROPERTY = 0x2D + +# typedef enum calculate roi id +class TUCAM_IDCROI(Enum): + TUIDCR_WBALANCE = 0x00 + TUIDCR_BBALANCE = 0x01 + TUIDCR_BLOFFSET = 0x02 + TUIDCR_FOCUS = 0x03 + TUIDCR_EXPOSURETM = 0x04 + TUIDCR_END = 0x05 + +# typedef enum the capture mode +class TUCAM_CAPTURE_MODES(Enum): + TUCCM_SEQUENCE = 0x00 + TUCCM_TRIGGER_STANDARD = 0x01 + TUCCM_TRIGGER_SYNCHRONOUS = 0x02 + TUCCM_TRIGGER_GLOBAL = 0x03 + TUCCM_TRIGGER_SOFTWARE = 0x04 + TUCCM_TRIGGER_GPS = 0x05 + TUCCM_TRIGGER_STANDARD_NONOVERLAP = 0x11 + +# typedef enum the image formats +class TUIMG_FORMATS(Enum): + TUFMT_RAW = 0x01 + TUFMT_TIF = 0x02 + TUFMT_PNG = 0x04 + TUFMT_JPG = 0x08 + TUFMT_BMP = 0x10 + +# typedef enum the register formats +class TUREG_FORMATS(Enum): + TUREG_SN = 0x01 + TUREG_DATA = 0x02 + +# trigger mode +# typedef enum the trigger exposure time mode +class TUCAM_TRIGGER_EXP(Enum): + TUCTE_EXPTM = 0x00 + TUCTE_WIDTH = 0x01 + +# typedef enum the trigger edge mode +class TUCAM_TRIGGER_EDGE(Enum): + TUCTD_RISING = 0x01 + TUCTD_FAILING = 0x00 + +# outputtrigger mode +# typedef enum the output trigger port mode +class TUCAM_OUTPUTTRG_PORT(Enum): + TUPORT_ONE = 0x00 + TUPORT_TWO = 0x01 + TUPORT_THREE = 0x02 + +# typedef enum the output trigger kind mode +class TUCAM_OUTPUTTRG_KIND(Enum): + TUOPT_GND = 0x00 + TUOPT_VCC = 0x01 + TUOPT_IN = 0x02 + TUOPT_EXPSTART = 0x03 + TUOPT_EXPGLOBAL = 0x04 + TUOPT_READEND = 0x05 + +# typedef enum the output trigger edge mode +class TUCAM_OUTPUTTRG_EDGE(Enum): + TUOPT_RISING = 0x00 + TUOPT_FAILING = 0x01 + +# typedef enum the frame formats +class TUFRM_FORMATS(Enum): + TUFRM_FMT_RAW = 0x10 + TUFRM_FMT_USUAl = 0x11 + TUFRM_FMT_RGB888 = 0x12 + +# element type +class TUELEM_TYPE(Enum): + TU_ElemValue = 0x00 + TU_ElemBase = 0x01 + TU_ElemInteger = 0x02 + TU_ElemBoolean = 0x03 + TU_ElemCommand = 0x04 + TU_ElemFloat = 0x05 + TU_ElemString = 0x06 + TU_ElemRegister = 0x07 + TU_ElemCategory = 0x08 + TU_ElemEnumeration = 0x09 + TU_ElemEnumEntry = 0x0A + TU_ElemPort = 0x0B + +# access mode of a node +class TUACCESS_MODE(Enum): + TU_AM_NI = 0x00 + TU_AM_NA = 0x01 + TU_AM_WO = 0x02 + TU_AM_RO = 0x03 + TU_AM_RW = 0x04 + +class TU_VISIBILITY(Enum): + TU_VS_Beginner = 0x00 + TU_VS_Expert = 0x01 + TU_VS_Guru = 0x02 + TU_VS_Invisible = 0x03 + TU_VS_UndefinedVisibility = 0x10 + +class TU_REPRESENTATION(Enum): + TU_REPRESENTATION_LINEAR = 0x00 + TU_REPRESENTATION_LOGARITHMIC = 0x01 + TU_REPRESENTATION_BOOLEAN = 0x02 + TU_REPRESENTATION_PURE_NUMBER = 0x03 + TU_REPRESENTATION_HEX_NUMBER = 0x04 + TU_REPRESENTATION_UNDEFINDED = 0x05 + TU_REPRESENTATION_IPV4ADDRESS = 0x06 + TU_REPRESENTATION_MACADDRESS = 0x07 + TU_REPRESENTATION_TIMESTAMP = 0x08 + TU_REPRESENTATION_PTPFRAMECNT = 0x09 + +class TUXML_DEVICE(Enum): + TU_CAMERA_XML = 0x00 + TU_CAMERALINK_XML = 0x01 + +# struct defines +# the camera initialize struct +class TUCAM_INIT(Structure): + _fields_ = [ + ("uiCamCount", c_uint32), + ("pstrConfigPath", c_char_p) # c_char * 8 c_char_p + ] +# the camera open struct +class TUCAM_OPEN(Structure): + _fields_ = [ + ("uiIdxOpen", c_uint32), + ("hIdxTUCam", c_void_p) # ("hIdxTUCam", c_void_p) + ] + +# the image open struct +class TUIMG_OPEN(Structure): + _fields_ = [ + ("pszfileName", c_void_p), + ("hIdxTUImg", c_void_p) + ] + +# the camera value text struct +class TUCAM_VALUE_INFO(Structure): + _fields_ = [ + ("nID", c_int32), + ("nValue", c_int32), + ("pText", c_char_p), + ("nTextSize", c_int32) + ] + +# the camera value text struct +class TUCAM_VALUE_TEXT(Structure): + _fields_ = [ + ("nID", c_int32), + ("dbValue", c_double), + ("pText", c_char_p), + ("nTextSize", c_int32) + ] + +# the camera capability attribute +class TUCAM_CAPA_ATTR(Structure): + _fields_ = [ + ("idCapa", c_int32), + ("nValMin", c_int32), + ("nValMax", c_int32), + ("nValDft", c_int32), + ("nValStep", c_int32) + ] + +# the camera property attribute +class TUCAM_PROP_ATTR(Structure): + _fields_ = [ + ("idProp", c_int32), + ("nIdxChn", c_int32), + ("dbValMin", c_double), + ("dbValMax", c_double), + ("dbValDft", c_double), + ("dbValStep", c_double) + ] + +# the camera roi attribute +class TUCAM_ROI_ATTR(Structure): + _fields_ = [ + ("bEnable", c_int32), + ("nHOffset", c_int32), + ("nVOffset", c_int32), + ("nWidth", c_int32), + ("nHeight", c_int32) + ] + +# the camera multi roi attribute +# the camera size attribute +class TUCAM_SIZE_ATTR(Structure): + _fields_ = [ + ("nHOffset", c_int32), + ("nVOffset", c_int32), + ("nWidth", c_int32), + ("nHeight", c_int32) + ] + +class TUCAM_MULTIROI_ATTR(Structure): + _fields_ = [ + ("bLimit", c_int32), + ("nROIStatus", c_int32), + ("sizeAttr", TUCAM_SIZE_ATTR) + ] + +# the camera roi calculate attribute +class TUCAM_CALC_ROI_ATTR(Structure): + _fields_ = [ + ("bEnable", c_int32), + ("idCalc", c_int32), + ("nHOffset", c_int32), + ("nVOffset", c_int32), + ("nWidth", c_int32), + ("nHeight", c_int32) + ] + +# the camera trigger attribute +class TUCAM_TRIGGER_ATTR(Structure): + _fields_ = [ + ("nTgrMode", c_int32), + ("nExpMode", c_int32), + ("nEdgeMode", c_int32), + ("nDelayTm", c_int32), + ("nFrames", c_int32), + ("nBufFrames", c_int32) + ] + +# the camera trigger out attribute +class TUCAM_TRGOUT_ATTR(Structure): + _fields_ = [ + ("nTgrOutPort", c_int32), + ("nTgrOutMode", c_int32), + ("nEdgeMode", c_int32), + ("nDelayTm", c_int32), + ("nWidth", c_int32) + ] + +# the camera any bin attribute +class TUCAM_BIN_ATTR(Structure): + _fields_ = [ + ("bEnable", c_int32), + ("nMode", c_int32), + ("nWidth", c_int32), + ("nHeight", c_int32) + ] + +# Define the struct of image header +class TUCAM_IMG_HEADER(Structure): + _fields_ = [ + ("szSignature", c_char * 8), + ("usHeader", c_ushort), + ("usOffset", c_ushort), + ("usWidth", c_ushort), + ("usHeight", c_ushort), + ("uiWidthStep", c_uint), + ("ucDepth", c_ubyte), + ("ucFormat", c_ubyte), + ("ucChannels", c_ubyte), + ("ucElemBytes", c_ubyte), + ("ucFormatGet", c_ubyte), + ("uiIndex", c_uint), + ("uiImgSize", c_uint), + ("uiRsdSize", c_uint), + ("uiHstSize", c_uint), + ("pImgData", c_void_p), + ("pImgHist", c_void_p), + ("usLLevels", c_ushort), + ("usRLevels", c_ushort), + ("ucRsd1", c_char * 64), + ("dblExposure", c_double), + ("ucRsd2", c_char * 170), + ("dblTimeStamp", c_double), + ("dblTimeLast", c_double), + ("ucRsd3", c_char * 32), + ("ucGPSTimeStampYear", c_ubyte), + ("ucGPSTimeStampMonth", c_ubyte), + ("ucGPSTimeStampDay", c_ubyte), + ("ucGPSTimeStampHour", c_ubyte), + ("ucGPSTimeStampMin", c_ubyte), + ("ucGPSTimeStampSec", c_ubyte), + ("nGPSTimeStampNs", c_int), + ("ucRsd4", c_char * 639) + ] + +# the camera frame struct +class TUCAM_FRAME(Structure): + _fields_ = [ + ("szSignature", c_char * 8), + ("usHeader", c_ushort), + ("usOffset", c_ushort), + ("usWidth", c_ushort), + ("usHeight", c_ushort), + ("uiWidthStep", c_uint), + ("ucDepth", c_ubyte), + ("ucFormat", c_ubyte), + ("ucChannels", c_ubyte), + ("ucElemBytes", c_ubyte), + ("ucFormatGet", c_ubyte), + ("uiIndex", c_uint), + ("uiImgSize", c_uint), + ("uiRsdSize", c_uint), + ("uiHstSize", c_uint), + ("pBuffer", c_void_p) + ] + +# the camera frame struct +class TUCAM_RAWIMG_HEADER(Structure): + _fields_ = [ + ("usWidth", c_ushort), + ("usHeight", c_ushort), + ("usXOffset", c_ushort), + ("usYOffset", c_ushort), + ("usXPadding", c_ushort), + ("usYPadding", c_ushort), + ("usOffset", c_ushort), + ("ucDepth", c_ubyte), + ("ucChannels", c_ubyte), + ("ucElemBytes", c_ubyte), + ("uiIndex", c_uint), + ("uiImgSize", c_uint), + ("uiPixelFormat", c_uint), + ("dblExposure", c_double), + ("pImgData", c_void_p), + ("dblTimeStamp", c_double), + ("dblTimeLast", c_double) + ] + +# the file save struct +class TUCAM_FILE_SAVE(Structure): + _fields_ = [ + ("nSaveFmt", c_int32), + ("pstrSavePath", c_char_p), + ("pFrame", POINTER(TUCAM_FRAME)) + ] + +# the record save struct +class TUCAM_REC_SAVE(Structure): + _fields_ = [ + ("nCodec", c_int32), + ("pstrSavePath", c_char_p), + ("fFps", c_float) + ] + +# the register read/write struct +class TUCAM_REG_RW(Structure): + _fields_ = [ + ("nRegType", c_int32), + ("pBuf", c_char_p), + ("nBufSize", c_int32) + ] + +# the subtract background struct +class TUCAM_IMG_BACKGROUND(Structure): + _fields_ = [ + ("bEnable", c_int32), + ("ImgHeader", TUCAM_RAWIMG_HEADER) + ] + +# the math struct +class TUCAM_IMG_MATH(Structure): + _fields_ = [ + ("bEnable", c_int32), + ("nMode", c_int32), + ("usGray", c_ushort) + ] + +# the genicam node element +class TUCAM_VALUEINT(Structure): + _fields_ = [ + ("nVal", c_int64), + ("nMin", c_int64), + ("nMax", c_int64), + ("nStep", c_int64), + ("nDefault", c_int64) + ] + +class TUCAM_VALUEDOUBLE(Structure): + _fields_ = [ + ("dbVal", c_double), + ("dbMin", c_double), + ("dbMax", c_double), + ("dbStep", c_double), + ("dbDefault", c_double) + ] + +class TUCAM_UNION(Union): + _fields_ = [ + ("Int64", TUCAM_VALUEINT), + ("Double", TUCAM_VALUEDOUBLE) + ] + +class TUCAM_ELEMENT(Structure): + _fields_ = [ + ("IsLocked", c_uint8), + ("Level", c_uint8), + ("Representation", c_ushort), + ("Type", c_int32), #TUELEM_TYPE + ("Access", c_int32), #TUACCESS_MODE + ("Visibility", c_int32), #TU_VISIBILITY + ("nReserve", c_int32), + ("uValue", TUCAM_UNION), + ("pName", c_char_p), + ("pDisplayName", c_char_p), + ("pTransfer", c_char_p), + ("pDesc", c_char_p), + ("pUnit", c_char_p), + ("pEntries", ctypes.POINTER(ctypes.c_char_p)), + ("PollingTime", c_int64), + ("DisplayPrecision", c_int64) + ] + +BUFFER_CALLBACK = eval('CFUNCTYPE')(c_void_p) +CONTEXT_CALLBACK = eval('CFUNCTYPE')(c_void_p) + +# the API fuction +# Initialize uninitialize and misc +TUCAM_Api_Init = TUSDKdll.TUCAM_Api_Init +TUCAM_Api_Uninit = TUSDKdll.TUCAM_Api_Uninit +TUCAM_Dev_Open = TUSDKdll.TUCAM_Dev_Open +TUCAM_Dev_Open.argtypes = [POINTER(TUCAM_OPEN)] +TUCAM_Dev_Open.restype = TUCAMRET +TUCAM_Dev_Close = TUSDKdll.TUCAM_Dev_Close +TUCAM_Dev_Close.argtypes = [c_void_p] +TUCAM_Dev_Close.restype = TUCAMRET + +# Get some device information (VID/PID/Version) +TUCAM_Dev_GetInfo = TUSDKdll.TUCAM_Dev_GetInfo +TUCAM_Dev_GetInfo.argtypes = [c_void_p, POINTER(TUCAM_VALUE_INFO)] +TUCAM_Dev_GetInfo.restype = TUCAMRET +TUCAM_Dev_GetInfoEx = TUSDKdll.TUCAM_Dev_GetInfoEx +TUCAM_Dev_GetInfoEx.argtypes = [c_uint, POINTER(TUCAM_VALUE_INFO)] +TUCAM_Dev_GetInfoEx.restype = TUCAMRET + +# Capability control +TUCAM_Capa_GetAttr = TUSDKdll.TUCAM_Capa_GetAttr +TUCAM_Capa_GetAttr.argtypes = [c_void_p, POINTER(TUCAM_CAPA_ATTR)] +TUCAM_Capa_GetAttr.restype = TUCAMRET +TUCAM_Capa_GetValue = TUSDKdll.TUCAM_Capa_GetValue +TUCAM_Capa_GetValue.argtypes = [c_void_p, c_int32, c_void_p] +TUCAM_Capa_GetValue.restype = TUCAMRET +TUCAM_Capa_SetValue = TUSDKdll.TUCAM_Capa_SetValue +TUCAM_Capa_SetValue.argtypes = [c_void_p, c_int32, c_int32] +TUCAM_Capa_SetValue.restype = TUCAMRET +TUCAM_Capa_GetValueText = TUSDKdll.TUCAM_Capa_GetValueText +TUCAM_Capa_GetValueText.argtypes = [c_void_p, POINTER(TUCAM_VALUE_TEXT)] +TUCAM_Capa_GetValueText.restype = TUCAMRET + +# Property control +TUCAM_Prop_GetAttr = TUSDKdll.TUCAM_Prop_GetAttr +TUCAM_Prop_GetAttr.argtypes = [c_void_p, POINTER(TUCAM_PROP_ATTR)] +TUCAM_Prop_GetAttr.restype = TUCAMRET +TUCAM_Prop_GetValue = TUSDKdll.TUCAM_Prop_GetValue +TUCAM_Prop_GetValue.argtypes = [c_void_p, c_int32, c_void_p, c_int32] +TUCAM_Prop_GetValue.restype = TUCAMRET +TUCAM_Prop_SetValue = TUSDKdll.TUCAM_Prop_SetValue +TUCAM_Prop_SetValue.argtypes = [c_void_p, c_int32, c_double, c_int32] +TUCAM_Prop_SetValue.restype = TUCAMRET +TUCAM_Prop_GetValueText = TUSDKdll.TUCAM_Prop_GetValueText +TUCAM_Prop_GetValueText.argtypes = [c_void_p, POINTER(TUCAM_VALUE_TEXT), c_int32] +TUCAM_Prop_GetValueText.restype = TUCAMRET + +# Buffer control +TUCAM_Buf_Alloc = TUSDKdll.TUCAM_Buf_Alloc +TUCAM_Buf_Alloc.argtypes = [c_void_p, POINTER(TUCAM_FRAME)] +TUCAM_Buf_Alloc.restype = TUCAMRET +TUCAM_Buf_Release = TUSDKdll.TUCAM_Buf_Release +TUCAM_Buf_Release.argtypes = [c_void_p] +TUCAM_Buf_Release.restype = TUCAMRET +TUCAM_Buf_AbortWait = TUSDKdll.TUCAM_Buf_AbortWait +TUCAM_Buf_AbortWait.argtypes = [c_void_p] +TUCAM_Buf_AbortWait.restype = TUCAMRET +TUCAM_Buf_WaitForFrame = TUSDKdll.TUCAM_Buf_WaitForFrame +TUCAM_Buf_WaitForFrame.argtypes = [c_void_p, POINTER(TUCAM_FRAME), c_int32] +TUCAM_Buf_WaitForFrame.restype = TUCAMRET +TUCAM_Buf_CopyFrame = TUSDKdll.TUCAM_Buf_CopyFrame +TUCAM_Buf_CopyFrame.argtypes = [c_void_p, POINTER(TUCAM_FRAME)] +TUCAM_Buf_CopyFrame.restype = TUCAMRET + +# Buffer CallBack Function +TUCAM_Buf_DataCallBack = TUSDKdll.TUCAM_Buf_DataCallBack +TUCAM_Buf_DataCallBack.argtypes = [c_void_p, BUFFER_CALLBACK, c_void_p] +TUCAM_Buf_DataCallBack.restype = TUCAMRET +# Get Buffer Data +TUCAM_Buf_GetData = TUSDKdll.TUCAM_Buf_GetData +TUCAM_Buf_GetData.argtypes = [c_void_p, POINTER(TUCAM_RAWIMG_HEADER)] +TUCAM_Buf_GetData.restype = TUCAMRET + +# Capturing control +TUCAM_Cap_SetROI = TUSDKdll.TUCAM_Cap_SetROI +TUCAM_Cap_SetROI.argtypes = [c_void_p, TUCAM_ROI_ATTR] +TUCAM_Cap_SetROI.restype = TUCAMRET +TUCAM_Cap_GetROI = TUSDKdll.TUCAM_Cap_GetROI +TUCAM_Cap_GetROI.argtypes = [c_void_p, POINTER(TUCAM_ROI_ATTR)] +TUCAM_Cap_GetROI.restype = TUCAMRET + +# MultiROI +TUCAM_Cap_SetMultiROI = TUSDKdll.TUCAM_Cap_SetMultiROI +TUCAM_Cap_SetMultiROI.argtypes = [c_void_p, TUCAM_MULTIROI_ATTR] +TUCAM_Cap_SetMultiROI.restype = TUCAMRET +TUCAM_Cap_GetMultiROI = TUSDKdll.TUCAM_Cap_GetMultiROI +TUCAM_Cap_GetMultiROI.argtypes = [c_void_p, POINTER(TUCAM_MULTIROI_ATTR)] +TUCAM_Cap_GetMultiROI.restype = TUCAMRET + +# Trigger +TUCAM_Cap_SetTrigger = TUSDKdll.TUCAM_Cap_SetTrigger +TUCAM_Cap_SetTrigger.argtypes = [c_void_p, TUCAM_TRIGGER_ATTR] +TUCAM_Cap_SetTrigger.restype = TUCAMRET +TUCAM_Cap_GetTrigger = TUSDKdll.TUCAM_Cap_GetTrigger +TUCAM_Cap_GetTrigger.argtypes = [c_void_p, POINTER(TUCAM_TRIGGER_ATTR)] +TUCAM_Cap_GetTrigger.restype = TUCAMRET +TUCAM_Cap_DoSoftwareTrigger = TUSDKdll.TUCAM_Cap_DoSoftwareTrigger +TUCAM_Cap_DoSoftwareTrigger.argtypes = [c_void_p] +TUCAM_Cap_DoSoftwareTrigger.restype = TUCAMRET + +# Trigger Out +TUCAM_Cap_SetTriggerOut = TUSDKdll.TUCAM_Cap_SetTriggerOut +TUCAM_Cap_SetTriggerOut.argtypes = [c_void_p, TUCAM_TRGOUT_ATTR] +TUCAM_Cap_SetTriggerOut.restype = TUCAMRET +TUCAM_Cap_GetTriggerOut = TUSDKdll.TUCAM_Cap_GetTriggerOut +TUCAM_Cap_SetTriggerOut.argtypes = [c_void_p, POINTER(TUCAM_TRGOUT_ATTR)] +TUCAM_Cap_SetTriggerOut.restype = TUCAMRET + +# Capturing +TUCAM_Cap_Start = TUSDKdll.TUCAM_Cap_Start +TUCAM_Cap_Start.argtypes = [c_void_p, c_uint] +TUCAM_Cap_Start.restype = TUCAMRET +TUCAM_Cap_Stop = TUSDKdll.TUCAM_Cap_Stop +TUCAM_Cap_Stop.argtypes = [c_void_p] +TUCAM_Cap_Stop.restype = TUCAMRET + +# File control +# Image +TUCAM_File_SaveImage = TUSDKdll.TUCAM_File_SaveImage +TUCAM_File_SaveImage.argtypes = [c_void_p, TUCAM_FILE_SAVE] +TUCAM_File_SaveImage.restype = TUCAMRET + +# Profiles +TUCAM_File_LoadProfiles = TUSDKdll.TUCAM_File_LoadProfiles +TUCAM_File_LoadProfiles.argtypes = [c_void_p, c_void_p] +TUCAM_File_LoadProfiles.restype = TUCAMRET +TUCAM_File_SaveProfiles = TUSDKdll.TUCAM_File_SaveProfiles +TUCAM_File_SaveProfiles.argtypes = [c_void_p, c_void_p] +TUCAM_File_SaveProfiles.restype = TUCAMRET + +# Video +TUCAM_Rec_Start = TUSDKdll.TUCAM_Rec_Start +TUCAM_Rec_Start.argtypes = [c_void_p, TUCAM_REC_SAVE] +TUCAM_Rec_Start.restype = TUCAMRET +TUCAM_Rec_AppendFrame = TUSDKdll.TUCAM_Rec_AppendFrame +TUCAM_Rec_AppendFrame.argtypes = [c_void_p, POINTER(TUCAM_FRAME)] +TUCAM_Rec_AppendFrame.restype = TUCAMRET +TUCAM_Rec_Stop = TUSDKdll.TUCAM_Rec_Stop +TUCAM_Rec_Stop.argtypes = [c_void_p] +TUCAM_Rec_Stop.restype = TUCAMRET + +TUIMG_File_Open = TUSDKdll.TUIMG_File_Open +TUIMG_File_Open.argtypes = [POINTER(TUIMG_OPEN), POINTER(POINTER(TUCAM_FRAME))] +TUIMG_File_Open.restype = TUCAMRET +TUIMG_File_Close = TUSDKdll.TUIMG_File_Close +TUIMG_File_Close.argtypes = [c_void_p] +TUIMG_File_Close.restype = TUCAMRET + +# Calculatr roi +TUCAM_Calc_SetROI = TUSDKdll.TUCAM_Calc_SetROI +TUCAM_Calc_SetROI.argtypes = [c_void_p, TUCAM_CALC_ROI_ATTR] +TUCAM_Calc_SetROI.restype = TUCAMRET +TUCAM_Calc_GetROI = TUSDKdll.TUCAM_Calc_GetROI +TUCAM_Calc_GetROI.argtypes = [c_void_p, POINTER(TUCAM_CALC_ROI_ATTR)] +TUCAM_Calc_GetROI.restype = TUCAMRET + +# Extened control +TUCAM_Reg_Read = TUSDKdll.TUCAM_Reg_Read +TUCAM_Reg_Read.argtypes = [c_void_p, TUCAM_REG_RW] +TUCAM_Reg_Read.restype = TUCAMRET +TUCAM_Reg_Write = TUSDKdll.TUCAM_Reg_Write +TUCAM_Reg_Write.argtypes = [c_void_p, TUCAM_REG_RW] +TUCAM_Reg_Write.restype = TUCAMRET + +# Get GrayValue +TUCAM_Get_GrayValue = TUSDKdll.TUCAM_Get_GrayValue +TUCAM_Get_GrayValue.argtypes = [c_void_p, c_int32, c_int32, c_void_p] +TUCAM_Get_GrayValue.restype = TUCAMRET + +# Find color temperature index value according to RGB +TUCAM_Index_GetColorTemperature = TUSDKdll.TUCAM_Index_GetColorTemperature +TUCAM_Index_GetColorTemperature.argtypes = [c_void_p, c_int32, c_int32, c_int32, c_void_p] +TUCAM_Index_GetColorTemperature.restype = TUCAMRET + +# Set record save mode +TUCAM_Rec_SetAppendMode = TUSDKdll.TUCAM_Rec_SetAppendMode +TUCAM_Rec_SetAppendMode.argtypes = [c_void_p, c_uint] +TUCAM_Rec_SetAppendMode.restype = TUCAMRET + +# Any-BIN +TUCAM_Cap_SetBIN = TUSDKdll.TUCAM_Cap_SetBIN +TUCAM_Cap_SetBIN.argtypes = [c_void_p, TUCAM_BIN_ATTR] +TUCAM_Cap_SetBIN.restype = TUCAMRET +TUCAM_Cap_GetBIN = TUSDKdll.TUCAM_Cap_GetBIN +TUCAM_Cap_GetBIN.argtypes = [c_void_p, POINTER(TUCAM_BIN_ATTR)] +TUCAM_Cap_GetBIN.restype = TUCAMRET + +# Subtract background +TUCAM_Cap_SetBackGround = TUSDKdll.TUCAM_Cap_SetMath +TUCAM_Cap_SetBackGround.argtypes = [c_void_p, TUCAM_IMG_BACKGROUND] +TUCAM_Cap_SetBackGround.restype = TUCAMRET +TUCAM_Cap_GetBackGround = TUSDKdll.TUCAM_Cap_GetMath +TUCAM_Cap_GetBackGround.argtypes = [c_void_p, POINTER(TUCAM_IMG_BACKGROUND)] +TUCAM_Cap_GetBackGround.restype = TUCAMRET + +# Math +TUCAM_Cap_SetMath = TUSDKdll.TUCAM_Cap_SetMath +TUCAM_Cap_SetMath.argtypes = [c_void_p, TUCAM_IMG_MATH] +TUCAM_Cap_SetMath.restype = TUCAMRET +TUCAM_Cap_GetMath = TUSDKdll.TUCAM_Cap_GetMath +TUCAM_Cap_GetMath.argtypes = [c_void_p, POINTER(TUCAM_IMG_MATH)] +TUCAM_Cap_GetMath.restype = TUCAMRET + +# GenICam Element Attribute pName +TUCAM_GenICam_ElementAttr = TUSDKdll.TUCAM_GenICam_ElementAttr +TUCAM_GenICam_ElementAttr.argtypes = [c_void_p, POINTER(TUCAM_ELEMENT), c_void_p, c_int32] +TUCAM_GenICam_ElementAttr.restype = TUCAMRET + +# GenICam Element Attribute Next +TUCAM_GenICam_ElementAttrNext = TUSDKdll.TUCAM_GenICam_ElementAttrNext +TUCAM_GenICam_ElementAttrNext.argtypes = [c_void_p, POINTER(TUCAM_ELEMENT), c_void_p, c_int32] +TUCAM_GenICam_ElementAttrNext.restype = TUCAMRET + +# GenICam Set Element Value +TUCAM_GenICam_SetElementValue = TUSDKdll.TUCAM_GenICam_SetElementValue +TUCAM_GenICam_SetElementValue.argtypes = [c_void_p, POINTER(TUCAM_ELEMENT), c_int32] +TUCAM_GenICam_SetElementValue.restype = TUCAMRET + +# GenICam Get Element Value +TUCAM_GenICam_GetElementValue = TUSDKdll.TUCAM_GenICam_GetElementValue +TUCAM_GenICam_GetElementValue.argtypes = [c_void_p, POINTER(TUCAM_ELEMENT), c_int32] +TUCAM_GenICam_GetElementValue.restype = TUCAMRET + +# GenICam Set Register Value +TUCAM_GenICam_SetRegisterValue = TUSDKdll.TUCAM_GenICam_SetRegisterValue +TUCAM_GenICam_SetRegisterValue.argtypes = [c_void_p, c_void_p, c_int64, c_int64] +TUCAM_GenICam_SetRegisterValue.restype = TUCAMRET + +# GenICam Get Register Value +TUCAM_GenICam_GetRegisterValue = TUSDKdll.TUCAM_GenICam_GetRegisterValue +TUCAM_GenICam_GetRegisterValue.argtypes = [c_void_p, c_void_p, c_int64, c_int64] +TUCAM_GenICam_GetRegisterValue.restype = TUCAMRET + +# Only CXP Support +TUCAM_Cap_AnnounceBuffer = TUSDKdll.TUCAM_Cap_AnnounceBuffer +TUCAM_Cap_AnnounceBuffer.argtypes = [c_void_p, c_uint, c_void_p] +TUCAM_Cap_AnnounceBuffer.restype = TUCAMRET +TUCAM_Cap_ClearBuffer = TUSDKdll.TUCAM_Cap_ClearBuffer +TUCAM_Cap_ClearBuffer.argtypes = [c_void_p] +TUCAM_Cap_ClearBuffer.restype = TUCAMRET + +# FFC Coefficient Load or Save +#TUCAM_File_LoadFFCCoefficient = TUSDKdll.TUCAM_File_LoadFFCCoefficient +#TUCAM_File_LoadFFCCoefficient.argtypes = [c_void_p, c_void_p] +#TUCAM_File_LoadFFCCoefficient.restype = TUCAMRET +#TUCAM_File_SaveFFCCoefficient = TUSDKdll.TUCAM_File_SaveFFCCoefficient +#TUCAM_File_SaveFFCCoefficient.argtypes = [c_void_p, c_void_p] +#TUCAM_File_SaveFFCCoefficient.restype = TUCAMRET diff --git a/software/control/_def.py b/software/control/_def.py index 62d4a4be0..fe3c5ca0c 100644 --- a/software/control/_def.py +++ b/software/control/_def.py @@ -1,9 +1,15 @@ import os +import sys import glob -import numpy as np from pathlib import Path from configparser import ConfigParser import json +import csv + +import squid.logging + +log = squid.logging.get_logger(__name__) + def conf_attribute_reader(string_value): """ @@ -58,7 +64,7 @@ def populate_class_from_dict(myclass, options): class TriggerMode: SOFTWARE = 'Software Trigger' HARDWARE = 'Hardware Trigger' - CONTINUOUS = 'Continuous Acqusition' + CONTINUOUS = 'Continuous Acquisition' class Acquisition: CROP_WIDTH = 3000 @@ -66,6 +72,15 @@ class Acquisition: NUMBER_OF_FOVS_PER_AF = 3 IMAGE_FORMAT = 'bmp' IMAGE_DISPLAY_SCALING_FACTOR = 0.3 + PSEUDO_COLOR = False + MERGE_CHANNELS = False + PSEUDO_COLOR_MAP = { + "405": {"hex": 0x0000FF}, # blue + "488": {"hex": 0x00FF00}, # green + "561": {"hex": 0xFFCF00}, # yellow + "638": {"hex": 0xFF0000}, # red + "730": {"hex": 0x770000} # dark red + } DX = 0.9 DY = 0.9 DZ = 1.5 @@ -80,11 +95,6 @@ class MicrocontrollerDef: CMD_LENGTH = 8 N_BYTES_POS = 4 -class Microcontroller2Def: - MSG_LENGTH = 4 - CMD_LENGTH = 8 - N_BYTES_POS = 4 - USE_SEPARATE_MCU_FOR_DAC = False class MCU_PINS: @@ -117,6 +127,8 @@ class CMD_SET: SET_ILLUMINATION_LED_MATRIX = 13 ACK_JOYSTICK_BUTTON_PRESSED = 14 ANALOG_WRITE_ONBOARD_DAC = 15 + SET_DAC80508_REFDIV_GAIN = 16 + SET_ILLUMINATION_INTENSITY_FACTOR = 17 MOVETO_X = 6 MOVETO_Y = 7 MOVETO_Z = 8 @@ -129,8 +141,11 @@ class CMD_SET: CONFIGURE_STAGE_PID = 25 ENABLE_STAGE_PID = 26 DISABLE_STAGE_PID = 27 + SET_HOME_SAFETY_MERGIN = 28 + SET_PID_ARGUMENTS = 29 SEND_HARDWARE_TRIGGER = 30 SET_STROBE_DELAY = 31 + SET_AXIS_DISABLE_ENABLE = 32 SET_PIN_LEVEL = 41 INITIALIZE = 254 RESET = 255 @@ -174,13 +189,13 @@ class LIMIT_SWITCH_POLARITY: class ILLUMINATION_CODE: - ILLUMINATION_SOURCE_LED_ARRAY_FULL = 0; + ILLUMINATION_SOURCE_LED_ARRAY_FULL = 0 ILLUMINATION_SOURCE_LED_ARRAY_LEFT_HALF = 1 ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_HALF = 2 ILLUMINATION_SOURCE_LED_ARRAY_LEFTB_RIGHTR = 3 - ILLUMINATION_SOURCE_LED_ARRAY_LOW_NA = 4; - ILLUMINATION_SOURCE_LED_ARRAY_LEFT_DOT = 5; - ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_DOT = 6; + ILLUMINATION_SOURCE_LED_ARRAY_LOW_NA = 4 + ILLUMINATION_SOURCE_LED_ARRAY_LEFT_DOT = 5 + ILLUMINATION_SOURCE_LED_ARRAY_RIGHT_DOT = 6 ILLUMINATION_SOURCE_LED_EXTERNAL_FET = 20 ILLUMINATION_SOURCE_405NM = 11 ILLUMINATION_SOURCE_488NM = 12 @@ -205,6 +220,8 @@ class CAMERA_CONFIG: ROI_WIDTH_DEFAULT = 3104 ROI_HEIGHT_DEFAULT = 2084 +PRINT_CAMERA_FPS = True + ########################################################### #### machine specific configurations - to be overridden ### ########################################################### @@ -281,6 +298,39 @@ class CAMERA_CONFIG: MAX_ACCELERATION_Y_mm = 500 MAX_ACCELERATION_Z_mm = 20 +# config encoder arguments +HAS_ENCODER_X = False +HAS_ENCODER_Y = False +HAS_ENCODER_Z = False + +# enable PID control +ENABLE_PID_X = False +ENABLE_PID_Y = False +ENABLE_PID_Z = False + +# PID arguments +PID_P_X = int(1<<12) +PID_I_X = int(0) +PID_D_X = int(0) + +PID_P_Y = int(1<<12) +PID_I_Y = int(0) +PID_D_Y = int(0) + +PID_P_Z = int(1<<12) +PID_I_Z = int(0) +PID_D_Z = int(1) + +# flip direction True or False +ENCODER_FLIP_DIR_X = True +ENCODER_FLIP_DIR_Y = True +ENCODER_FLIP_DIR_Z = True + +# distance for each count (um) +ENCODER_RESOLUTION_UM_X = 0.05 +ENCODER_RESOLUTION_UM_Y = 0.05 +ENCODER_RESOLUTION_UM_Z = 0.1 + # end of actuator specific configurations SCAN_STABILIZATION_TIME_MS_X = 160 @@ -311,17 +361,9 @@ class PLATE_READER: DEFAULT_DISPLAY_CROP = 100 # value ranges from 1 to 100 - image display crop size CAMERA_PIXEL_SIZE_UM = {'IMX290':2.9,'IMX178':2.4,'IMX226':1.85,'IMX250':3.45,'IMX252':3.45,'IMX273':3.45,'IMX264':3.45,'IMX265':3.45,'IMX571':3.76,'PYTHON300':4.8} -OBJECTIVES = {'2x':{'magnification':2, 'NA':0.10, 'tube_lens_f_mm':180}, - '4x':{'magnification':4, 'NA':0.13, 'tube_lens_f_mm':180}, - '10x':{'magnification':10, 'NA':0.25, 'tube_lens_f_mm':180}, - '10x (Mitutoyo)':{'magnification':10, 'NA':0.25, 'tube_lens_f_mm':200}, - '20x (Boli)':{'magnification':20, 'NA':0.4, 'tube_lens_f_mm':180}, - '20x (Nikon)':{'magnification':20, 'NA':0.45, 'tube_lens_f_mm':200}, - '20x':{'magnification':20, 'NA':0.4, 'tube_lens_f_mm':180}, - '40x':{'magnification':40, 'NA':0.6, 'tube_lens_f_mm':180}} + TUBE_LENS_MM = 50 CAMERA_SENSOR = 'IMX226' -DEFAULT_OBJECTIVE = '10x (Mitutoyo)' TRACKERS = ['csrt', 'kcf', 'mil', 'tld', 'medianflow','mosse','daSiamRPN'] DEFAULT_TRACKER = 'csrt' @@ -351,6 +393,17 @@ class SLIDE_POSITION: SCANNING_X_MM = 3 SCANNING_Y_MM = 3 +class OUTPUT_GAINS: + REFDIV = False + CHANNEL0_GAIN = False + CHANNEL1_GAIN = False + CHANNEL2_GAIN = False + CHANNEL3_GAIN = False + CHANNEL4_GAIN = False + CHANNEL5_GAIN = False + CHANNEL6_GAIN = False + CHANNEL7_GAIN = True + SLIDE_POTISION_SWITCHING_TIMEOUT_LIMIT_S = 10 SLIDE_POTISION_SWITCHING_HOME_EVERYTIME = False @@ -359,14 +412,15 @@ class SOFTWARE_POS_LIMIT: X_NEGATIVE = -0.5 Y_POSITIVE = 56 Y_NEGATIVE = -0.5 - Z_POSITIVE = 6 + Z_POSITIVE = 7 + Z_NEGATIVE = 0.05 SHOW_AUTOLEVEL_BTN = False AUTOLEVEL_DEFAULT_SETTING = False MULTIPOINT_AUTOFOCUS_CHANNEL = 'BF LED matrix full' # MULTIPOINT_AUTOFOCUS_CHANNEL = 'BF LED matrix left half' -MULTIPOINT_AUTOFOCUS_ENABLE_BY_DEFAULT = True +MULTIPOINT_AUTOFOCUS_ENABLE_BY_DEFAULT = False MULTIPOINT_BF_SAVING_OPTION = 'Raw' # MULTIPOINT_BF_SAVING_OPTION = 'RGB2GRAY' # MULTIPOINT_BF_SAVING_OPTION = 'Green Channel Only' @@ -374,31 +428,26 @@ class SOFTWARE_POS_LIMIT: DEFAULT_MULTIPOINT_NX=1 DEFAULT_MULTIPOINT_NY=1 -ENABLE_FLEXIBLE_MULTIPOINT = False +ENABLE_FLEXIBLE_MULTIPOINT = True +USE_OVERLAP_FOR_FLEXIBLE = False +ENABLE_WELLPLATE_MULTIPOINT = True +ENABLE_RECORDING = False CAMERA_SN = {'ch 1':'SN1','ch 2': 'SN2'} # for multiple cameras, to be overwritten in the configuration file ENABLE_STROBE_OUTPUT = False -Z_STACKING_CONFIG = 'FROM CENTER' # 'FROM BOTTOM', 'FROM TOP' +ACQUISITION_PATTERN = 'S-Pattern' # 'S-Pattern', 'Unidirectional' +FOV_PATTERN = 'Unidirectional' # 'S-Pattern', 'Unidirectional' -# plate format -WELLPLATE_FORMAT = 384 +Z_STACKING_CONFIG = 'FROM BOTTOM' # 'FROM BOTTOM', 'FROM TOP' +Z_STACKING_CONFIG_MAP = { + 0: 'FROM BOTTOM', + 1: 'FROM CENTER', + 2: 'FROM TOP' +} -# for 384 well plate -X_MM_384_WELLPLATE_UPPERLEFT = 0 -Y_MM_384_WELLPLATE_UPPERLEFT = 0 DEFAULT_Z_POS_MM = 2 -X_ORIGIN_384_WELLPLATE_PIXEL = 177 # upper left of B2 -Y_ORIGIN_384_WELLPLATE_PIXEL = 141 # upper left of B2 -NUMBER_OF_SKIP_384 = 1 -A1_X_MM_384_WELLPLATE = 12.05 -A1_Y_MM_384_WELLPLATE = 9.05 -WELL_SPACING_MM_384_WELLPLATE = 4.5 -WELL_SIZE_MM_384_WELLPLATE = 3.3 -# B1 upper left corner in piexel: x = 124, y = 141 -# B1 upper left corner in mm: x = 12.13 mm - 3.3 mm/2, y = 8.99 mm + 4.5 mm - 3.3 mm/2 -# B2 upper left corner in pixel: x = 177, y = 141 WELLPLATE_OFFSET_X_mm = 0 # x offset adjustment for using different plates WELLPLATE_OFFSET_Y_mm = 0 # y offset adjustment for using different plates @@ -416,7 +465,7 @@ class SOFTWARE_POS_LIMIT: CHOSEN_READ = 'INDIVIDUAL' # laser autofocus -SUPPORT_LASER_AUTOFOCUS = False +SUPPORT_LASER_AUTOFOCUS = True MAIN_CAMERA_MODEL = 'MER2-1220-32U3M' FOCUS_CAMERA_MODEL = 'MER2-630-60U3M' FOCUS_CAMERA_EXPOSURE_TIME_MS = 2 @@ -426,42 +475,214 @@ class SOFTWARE_POS_LIMIT: LASER_AF_CROP_WIDTH = 1536 LASER_AF_CROP_HEIGHT = 256 HAS_TWO_INTERFACES = True +LASER_AF_RANGE = 200 USE_GLASS_TOP = True SHOW_LEGACY_DISPLACEMENT_MEASUREMENT_WINDOWS = False MULTIPOINT_REFLECTION_AUTOFOCUS_ENABLE_BY_DEFAULT = False +MULTIPOINT_CONTRAST_AUTOFOCUS_ENABLE_BY_DEFAULT = False RUN_CUSTOM_MULTIPOINT = False RETRACT_OBJECTIVE_BEFORE_MOVING_TO_LOADING_POSITION = True OBJECTIVE_RETRACTED_POS_MM = 0.1 -CLASSIFICATION_MODEL_PATH ="/home/cephla/Documents/tmp/model_perf_r34_b32.pt" -SEGMENTATION_MODEL_PATH = "/home/cephla/Documents/tmp/model_segmentation_1073_9.pth" -CLASSIFICATION_TEST_MODE=False +TWO_CLASSIFICATION_MODELS = False +CLASSIFICATION_MODEL_PATH = "models/resnet18_en/version1/best.pt" +CLASSIFICATION_MODEL_PATH2 = "models/resnet18_en/version2/best.pt" +CLASSIFICATION_TEST_MODE = False +CLASSIFICATION_TH = 0.3 -USE_TRT_SEGMENTATION=False -SEGMENTATION_CROP=1500 +SEGMENTATION_MODEL_PATH = "models/m2unet_model_flat_erode1_wdecay5_smallbatch/model_4000_11.pth" +ENABLE_SEGMENTATION = True +USE_TRT_SEGMENTATION = False +SEGMENTATION_CROP = 1500 -DISP_TH_DURING_MULTIPOINT=0.95 +DISP_TH_DURING_MULTIPOINT = 0.95 SORT_DURING_MULTIPOINT = False DO_FLUORESCENCE_RTP = False -ENABLE_SPINNING_DISK_CONFOCAL=False - INVERTED_OBJECTIVE = False -CAMERA_TYPE="Default" +ILLUMINATION_INTENSITY_FACTOR = 0.6 + +CAMERA_TYPE = "Default" +FOCUS_CAMERA_TYPE = "Default" + +# Spinning disk confocal integration +ENABLE_SPINNING_DISK_CONFOCAL = False +USE_LDI_SERIAL_CONTROL = False +LDI_INTENSITY_MODE = 'PC' +LDI_SHUTTER_MODE = 'PC' + +XLIGHT_EMISSION_FILTER_MAPPING = {405:1,470:2,555:3,640:4,730:5} +XLIGHT_SERIAL_NUMBER = "B00031BE" +XLIGHT_SLEEP_TIME_FOR_WHEEL = 0.25 +XLIGHT_VALIDATE_WHEEL_POS = False + +# Confocal.nl NL5 integration +ENABLE_NL5 = False +ENABLE_CELLX = False +CELLX_SN = None +CELLX_MODULATION = 'EXT Digital' +NL5_USE_AOUT = False +NL5_USE_DOUT = True +NL5_TRIGGER_PIN = 2 +NL5_WAVENLENGTH_MAP = { + 405: 1, + 470: 2, 488: 2, + 545: 3, 555: 3, 561: 3, + 637: 4, 638: 4, 640: 4 +} + +# Laser AF characterization mode +LASER_AF_CHARACTERIZATION_MODE = False + +# Napari integration +USE_NAPARI_FOR_LIVE_VIEW = False +USE_NAPARI_FOR_MULTIPOINT = True +USE_NAPARI_FOR_TILED_DISPLAY = False +USE_NAPARI_FOR_MOSAIC_DISPLAY = True +USE_NAPARI_WELL_SELECTION = False +USE_NAPARI_FOR_LIVE_CONTROL = False +LIVE_ONLY_MODE = False + +# Controller SN (needed when using multiple teensy-based connections) +CONTROLLER_SN = None + +# Sci microscopy +SUPPORT_SCIMICROSCOPY_LED_ARRAY = False +SCIMICROSCOPY_LED_ARRAY_SN = None +SCIMICROSCOPY_LED_ARRAY_DISTANCE = 50 +SCIMICROSCOPY_LED_ARRAY_DEFAULT_NA = 0.8 +SCIMICROSCOPY_LED_ARRAY_DEFAULT_COLOR = [1,1,1] +SCIMICROSCOPY_LED_ARRAY_TURN_ON_DELAY = 0.03 # time to wait before trigger the camera (in seconds) + +# Tiled preview +SHOW_TILED_PREVIEW = False +PRVIEW_DOWNSAMPLE_FACTOR = 5 + +# Navigation Bar (Stages) +SHOW_NAVIGATION_BAR = False +ENABLE_CLICK_TO_MOVE_BY_DEFAULT = True + +# Stitcher +ENABLE_STITCHER = False +IS_HCS = False +DYNAMIC_REGISTRATION = False +STITCH_COMPLETE_ACQUISITION = False +CHANNEL_COLORS_MAP = { + "405": {"hex": 0x3300FF, "name": "blue"}, + "488": {"hex": 0x1FFF00, "name": "green"}, + "561": {"hex": 0xFFCF00, "name": "yellow"}, + "638": {"hex": 0xFF0000, "name": "red"}, + "730": {"hex": 0x770000, "name": "dark red"}, + "R": {"hex": 0xFF0000, "name": "red"}, + "G": {"hex": 0x1FFF00, "name": "green"}, + "B": {"hex": 0x3300FF, "name": "blue"} +} + +# Emission filter wheel +USE_ZABER_EMISSION_FILTER_WHEEL = False +ZABER_EMISSION_FILTER_WHEEL_DELAY_MS = 70 +ZABER_EMISSION_FILTER_WHEEL_BLOCKING_CALL = False +USE_OPTOSPIN_EMISSION_FILTER_WHEEL = False +FILTER_CONTROLLER_SERIAL_NUMBER = 'A10NG007' +OPTOSPIN_EMISSION_FILTER_WHEEL_SPEED_HZ = 50 +OPTOSPIN_EMISSION_FILTER_WHEEL_DELAY_MS = 70 +OPTOSPIN_EMISSION_FILTER_WHEEL_TTL_TRIGGER = False + +# Stage +USE_PRIOR_STAGE = False +PRIOR_STAGE_SN = "" + +# camera blacklevel settings +DISPLAY_TOUPCAMER_BLACKLEVEL_SETTINGS = False +DEFAULT_BLACKLEVEL_VALUE = 3 + +def read_objectives_csv(file_path): + objectives = {} + with open(file_path, 'r') as csvfile: + reader = csv.DictReader(csvfile) + for row in reader: + objectives[row['name']] = { + 'magnification': float(row['magnification']), + 'NA': float(row['NA']), + 'tube_lens_f_mm': float(row['tube_lens_f_mm']) + } + return objectives + +def read_sample_formats_csv(file_path): + sample_formats = {} + with open(file_path, 'r') as csvfile: + reader = csv.DictReader(csvfile) + for row in reader: + format_ = str(row['format']) + format_key = f"{format_} well plate" if format_.isdigit() else format_ + sample_formats[format_key] = { + 'a1_x_mm': float(row['a1_x_mm']), + 'a1_y_mm': float(row['a1_y_mm']), + 'a1_x_pixel': int(row['a1_x_pixel']), + 'a1_y_pixel': int(row['a1_y_pixel']), + 'well_size_mm': float(row['well_size_mm']), + 'well_spacing_mm': float(row['well_spacing_mm']), + 'number_of_skip': int(row['number_of_skip']), + 'rows': int(row['rows']), + 'cols': int(row['cols']) + } + return sample_formats + +def load_formats(): + """Load formats, prioritizing cache for sample formats.""" + cache_path = 'cache' + default_path = 'objective_and_sample_formats' + + # Load objectives (from default location) + objectives = read_objectives_csv(os.path.join(default_path, 'objectives.csv')) + + # Try cache first for sample formats, fall back to default if not found + cached_formats_path = os.path.join(cache_path, 'sample_formats.csv') + default_formats_path = os.path.join(default_path, 'sample_formats.csv') + + if os.path.exists(cached_formats_path): + print('Using cached sample formats') + sample_formats = read_sample_formats_csv(cached_formats_path) + else: + print('Using default sample formats') + sample_formats = read_sample_formats_csv(default_formats_path) -FOCUS_CAMERA_TYPE="Default" + return objectives, sample_formats -INVERTED_OBJECTIVE = False + +OBJECTIVES_CSV_PATH = 'objectives.csv' +SAMPLE_FORMATS_CSV_PATH = 'sample_formats.csv' + +OBJECTIVES, WELLPLATE_FORMAT_SETTINGS = load_formats() ########################################################## #### start of loading machine specific configurations #### ########################################################## CACHED_CONFIG_FILE_PATH = None + +# Piezo configuration items +Z_MOTOR_CONFIG = "STEPPER" # "STEPPER", "STEPPER + PIEZO", "PIEZO", "LINEAR" +ENABLE_OBJECTIVE_PIEZO = "PIEZO" in Z_MOTOR_CONFIG + +# the value of OBJECTIVE_PIEZO_CONTROL_VOLTAGE_RANGE is 2.5 or 5 +OBJECTIVE_PIEZO_CONTROL_VOLTAGE_RANGE = 5 +OBJECTIVE_PIEZO_RANGE_UM = 300 +OBJECTIVE_PIEZO_HOME_UM = 20 +OBJECTIVE_PIEZO_FLIP_DIR = False + +MULTIPOINT_USE_PIEZO_FOR_ZSTACKS = ENABLE_OBJECTIVE_PIEZO +MULTIPOINT_PIEZO_DELAY_MS = 20 +MULTIPOINT_PIEZO_UPDATE_DISPLAY = True + +AWB_RATIOS_R = 1.375 +AWB_RATIOS_G = 1 +AWB_RATIOS_B = 1.4141 + try: with open("cache/config_file_path.txt", 'r') as file: for line in file: @@ -474,12 +695,12 @@ class SOFTWARE_POS_LIMIT: if config_files: if len(config_files) > 1: if CACHED_CONFIG_FILE_PATH in config_files: - print('defaulting to last cached config file at '+CACHED_CONFIG_FILE_PATH) + log.info(f'defaulting to last cached config file at \'{CACHED_CONFIG_FILE_PATH}\'') config_files = [CACHED_CONFIG_FILE_PATH] else: - print('multiple machine configuration files found, the program will exit') - exit() - print('load machine-specific configuration') + log.error('multiple machine configuration files found, the program will exit') + sys.exit(1) + log.info('load machine-specific configuration') #exec(open(config_files[0]).read()) cfp = ConfigParser() cfp.read(config_files[0]) @@ -505,24 +726,51 @@ class SOFTWARE_POS_LIMIT: continue myclass = locals()[classkey] populate_class_from_dict(myclass,pop_items) + with open("cache/config_file_path.txt", 'w') as file: file.write(config_files[0]) CACHED_CONFIG_FILE_PATH = config_files[0] else: - print('configuration*.ini file not found, defaulting to legacy configuration') + log.warning('configuration*.ini file not found, defaulting to legacy configuration') config_files = glob.glob('.' + '/' + 'configuration*.txt') if config_files: if len(config_files) > 1: - print('multiple machine configuration files found, the program will exit') - exit() - print('load machine-specific configuration') + log.error('multiple machine configuration files found, the program will exit') + sys.exit(1) + log.info('load machine-specific configuration') exec(open(config_files[0]).read()) else: - print('machine-specific configuration not present, the program will exit') - exit() + log.error('machine-specific configuration not present, the program will exit') + sys.exit(1) + +try: + with open("cache/objective_and_sample_format.txt", 'r') as f: + cached_settings = json.load(f) + DEFAULT_OBJECTIVE = cached_settings.get('objective') if cached_settings.get('objective') in OBJECTIVES else '20x' + WELLPLATE_FORMAT = str(cached_settings.get('wellplate_format')) + WELLPLATE_FORMAT = WELLPLATE_FORMAT + ' well plate' if WELLPLATE_FORMAT.isdigit() else WELLPLATE_FORMAT + if WELLPLATE_FORMAT not in WELLPLATE_FORMAT_SETTINGS: + WELLPLATE_FORMAT = '96 well plate' +except (FileNotFoundError, json.JSONDecodeError): + DEFAULT_OBJECTIVE = '20x' + WELLPLATE_FORMAT = '96 well plate' + +NUMBER_OF_SKIP = WELLPLATE_FORMAT_SETTINGS[WELLPLATE_FORMAT]['number_of_skip'] # num rows/cols to skip on wellplate edge +WELL_SIZE_MM = WELLPLATE_FORMAT_SETTINGS[WELLPLATE_FORMAT]['well_size_mm'] +WELL_SPACING_MM = WELLPLATE_FORMAT_SETTINGS[WELLPLATE_FORMAT]['well_spacing_mm'] +A1_X_MM = WELLPLATE_FORMAT_SETTINGS[WELLPLATE_FORMAT]['a1_x_mm'] # measured stage position - to update +A1_Y_MM = WELLPLATE_FORMAT_SETTINGS[WELLPLATE_FORMAT]['a1_y_mm'] # measured stage position - to update +A1_X_PIXEL = WELLPLATE_FORMAT_SETTINGS[WELLPLATE_FORMAT]['a1_x_pixel'] # coordinate on the png +A1_Y_PIXEL = WELLPLATE_FORMAT_SETTINGS[WELLPLATE_FORMAT]['a1_y_pixel'] # coordinate on the png + ########################################################## ##### end of loading machine specific configurations ##### ########################################################## + +# objective piezo +if ENABLE_OBJECTIVE_PIEZO == False: + MULTIPOINT_USE_PIEZO_FOR_ZSTACKS = False + # saving path if not (DEFAULT_SAVING_PATH.startswith(str(Path.home()))): DEFAULT_SAVING_PATH = str(Path.home())+"/"+DEFAULT_SAVING_PATH.strip("/") @@ -532,36 +780,10 @@ class SOFTWARE_POS_LIMIT: Y_HOME_SWITCH_POLARITY = LIMIT_SWITCH_POLARITY.Y_HOME Z_HOME_SWITCH_POLARITY = LIMIT_SWITCH_POLARITY.Z_HOME +# home safety margin with (um) unit +X_HOME_SAFETY_MARGIN_UM = 50 +Y_HOME_SAFETY_MARGIN_UM = 50 +Z_HOME_SAFETY_MARGIN_UM = 600 + if ENABLE_TRACKING: DEFAULT_DISPLAY_CROP = Tracking.DEFAULT_DISPLAY_CROP - -if WELLPLATE_FORMAT == 384: - WELL_SIZE_MM = 3.3 - WELL_SPACING_MM = 4.5 - NUMBER_OF_SKIP = 1 - A1_X_MM = 12.05 - A1_Y_MM = 9.05 -elif WELLPLATE_FORMAT == 96: - NUMBER_OF_SKIP = 0 - WELL_SIZE_MM = 6.21 - WELL_SPACING_MM = 9 - A1_X_MM = 14.3 - A1_Y_MM = 11.36 -elif WELLPLATE_FORMAT == 24: - NUMBER_OF_SKIP = 0 - WELL_SIZE_MM = 15.54 - WELL_SPACING_MM = 19.3 - A1_X_MM = 17.05 - A1_Y_MM = 13.67 -elif WELLPLATE_FORMAT == 12: - NUMBER_OF_SKIP = 0 - WELL_SIZE_MM = 22.05 - WELL_SPACING_MM = 26 - A1_X_MM = 24.75 - A1_Y_MM = 16.86 -elif WELLPLATE_FORMAT == 6: - NUMBER_OF_SKIP = 0 - WELL_SIZE_MM = 34.94 - WELL_SPACING_MM = 39.2 - A1_X_MM = 24.55 - A1_Y_MM = 23.01 diff --git a/software/control/_multipoint_custom_script_entry.py b/software/control/_multipoint_custom_script_entry.py index 91561d6ac..5f6ae95bb 100644 --- a/software/control/_multipoint_custom_script_entry.py +++ b/software/control/_multipoint_custom_script_entry.py @@ -195,11 +195,15 @@ def multipoint_custom_script_entry(multiPointWorker,time_point,current_path,coor multiPointWorker.wait_till_operation_is_completed() multiPointWorker.navigationController.move_y_usteps(-multiPointWorker.dy_usteps) multiPointWorker.wait_till_operation_is_completed() - _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) - multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.dz_usteps-_usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() - multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() + if multiPointWorker.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) + multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.dz_usteps-_usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + else: + multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.dz_usteps) + multiPointWorker.wait_till_operation_is_completed() multiPointWorker.coordinates_pd.to_csv(os.path.join(current_path,'coordinates.csv'),index=False,header=True) multiPointWorker.navigationController.enable_joystick_button_action = True return @@ -215,17 +219,28 @@ def multipoint_custom_script_entry(multiPointWorker,time_point,current_path,coor if multiPointWorker.NZ > 1: # move z back if Z_STACKING_CONFIG == 'FROM CENTER': - _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) - multiPointWorker.navigationController.move_z_usteps( -multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) + multiPointWorker.deltaZ_usteps*round((multiPointWorker.NZ-1)/2) - _usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() - multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() + if multiPointWorker.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) + multiPointWorker.navigationController.move_z_usteps( -multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) + multiPointWorker.deltaZ_usteps*round((multiPointWorker.NZ-1)/2) - _usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + else: + multiPointWorker.navigationController.move_z_usteps( -multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) + multiPointWorker.deltaZ_usteps*round((multiPointWorker.NZ-1)/2)) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.dz_usteps = multiPointWorker.dz_usteps - multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) + multiPointWorker.deltaZ_usteps*round((multiPointWorker.NZ-1)/2) else: - multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) - _usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() - multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() + if multiPointWorker.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) + multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) - _usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + else: + multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1)) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.dz_usteps = multiPointWorker.dz_usteps - multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) # update FOV counter diff --git a/software/control/camera.py b/software/control/camera.py index ae3208fad..99f3071df 100644 --- a/software/control/camera.py +++ b/software/control/camera.py @@ -90,7 +90,8 @@ def open(self,index=0): # self.set_wb_ratios(self.get_awb_ratios()) print(self.get_awb_ratios()) # self.set_wb_ratios(1.28125,1.0,2.9453125) - self.set_wb_ratios(2,1,2) + # self.set_wb_ratios(2,1,2) + self.set_wb_ratios(AWB_RATIOS_R, AWB_RATIOS_G, AWB_RATIOS_B) # temporary self.camera.AcquisitionFrameRate.set(1000) @@ -219,6 +220,20 @@ def set_wb_ratios(self, wb_r=None, wb_g=None, wb_b=None): self.camera.BalanceRatioSelector.set(2) awb_b = self.camera.BalanceRatio.set(wb_b) + def set_balance_white_auto(self, value): + if value in [0, 1, 2]: + if self.camera.BalanceWhiteAuto.is_implemented(): + if self.camera.BalanceWhiteAuto.is_writable(): + self.camera.BalanceWhiteAuto.set(value) + + def get_balance_white_auto(self): + if self.camera.BalanceWhiteAuto.is_implemented(): + if self.camera.BalanceWhiteAuto.is_readable(): + return self.camera.BalanceWhiteAuto.get() + + def get_is_color(self): + return self.is_color + def set_reverse_x(self,value): self.camera.ReverseX.set(value) @@ -415,7 +430,7 @@ def set_line3_to_exposure_active(self): self.camera.LineSource.set(gx.GxLineSourceEntry.EXPOSURE_ACTIVE) class Camera_Simulation(object): - + def __init__(self,sn=None,is_global_shutter=False,rotate_image_angle=None,flip_image=None): # many to be purged self.sn = sn @@ -464,13 +479,16 @@ def __init__(self,sn=None,is_global_shutter=False,rotate_image_angle=None,flip_i self.is_live = False - self.Width = 3000 - self.Height = 3000 + self.Width = Acquisition.CROP_WIDTH + self.Height = Acquisition.CROP_HEIGHT + # self.resolution=(self.Width,self.Height) + # self.res_list = [(1000,1000), (2000,2000), (3000,3000), (4000,3000)] self.WidthMax = 4000 self.HeightMax = 3000 self.OffsetX = 0 self.OffsetY = 0 + self.new_image_callback_external = None def open(self,index=0): pass @@ -505,6 +523,15 @@ def get_awb_ratios(self): def set_wb_ratios(self, wb_r=None, wb_g=None, wb_b=None): pass + def set_balance_white_auto(self, value): + pass + + def get_balance_white_auto(self): + return 0 + + def get_is_color(self): + return False + def start_streaming(self): self.frame_ID_software = 0 @@ -530,15 +557,15 @@ def send_trigger(self): self.timestamp = time.time() if self.frame_ID == 1: if self.pixel_format == 'MONO8': - self.current_frame = np.random.randint(255,size=(2000,2000),dtype=np.uint8) - self.current_frame[901:1100,901:1100] = 200 + self.current_frame = np.random.randint(255,size=(self.Height,self.Width),dtype=np.uint8) + self.current_frame[self.Height//2-99:self.Height//2+100,self.Width//2-99:self.Width//2+100] = 200 elif self.pixel_format == 'MONO12': - self.current_frame = np.random.randint(4095,size=(2000,2000),dtype=np.uint16) - self.current_frame[901:1100,901:1100] = 200*16 + self.current_frame = np.random.randint(4095,size=(self.Height,self.Width),dtype=np.uint16) + self.current_frame[self.Height//2-99:self.Height//2+100,self.Width//2-99:self.Width//2+100] = 200*16 self.current_frame = self.current_frame << 4 elif self.pixel_format == 'MONO16': - self.current_frame = np.random.randint(65535,size=(2000,2000),dtype=np.uint16) - self.current_frame[901:1100,901:1100] = 200*256 + self.current_frame = np.random.randint(65535,size=(self.Height,self.Width),dtype=np.uint16) + self.current_frame[self.Height//2-99:self.Height//2+100,self.Width//2-99:self.Width//2+100] = 200*256 else: self.current_frame = np.roll(self.current_frame,10,axis=0) pass @@ -562,4 +589,4 @@ def set_line3_to_strobe(self): pass def set_line3_to_exposure_active(self): - pass \ No newline at end of file + pass diff --git a/software/control/camera_TIS.py b/software/control/camera_TIS.py index 7c0b0c0a2..94df33ca6 100644 --- a/software/control/camera_TIS.py +++ b/software/control/camera_TIS.py @@ -7,13 +7,17 @@ from scipy import misc import cv2 +import squid.logging +log = squid.logging.get_logger(__name__) + try: import gi gi.require_version("Gst", "1.0") gi.require_version("Tcam", "0.1") from gi.repository import Tcam, Gst, GLib, GObject except ImportError: - print('gi import error') + log.error('gi import error') + # TODO(imo): Propagate error in some way and handle DeviceInfo = namedtuple("DeviceInfo", "status name identifier connection_type") CameraProperty = namedtuple("CameraProperty", "status value min max default step type flags category group") @@ -21,6 +25,7 @@ class Camera(object): def __init__(self,sn=None,width=1920,height=1080,framerate=30,color=False): + self.log = squid.logging.get_logger(self.__class__.__name__) Gst.init(sys.argv) self.height = height self.width = width @@ -55,11 +60,11 @@ def __init__(self,sn=None,width=1920,height=1080,framerate=30,color=False): p += ' ! videoconvert ! appsink name=sink' - print(p) + self.log.info(p) try: self.pipeline = Gst.parse_launch(p) except GLib.Error as error: - print("Error creating pipeline: {0}".format(err)) + self.log.error(f"Error creating pipeline", error) raise self.pipeline.set_state(Gst.State.READY) @@ -111,13 +116,13 @@ def start_streaming(self): self.pipeline.get_state(Gst.CLOCK_TIME_NONE) self.is_streaming = True except GLib.Error as error: - print("Error starting pipeline: {0}".format(err))#error? + self.log.error("Error starting pipeline", error) raise self.frame_ID = 0 def stop_streaming(self): self.pipeline.set_state(Gst.State.NULL) - print("pipeline stopped") + self.log.info("pipeline stopped") self.pipeline.set_state(Gst.State.READY) self.is_streaming = False @@ -141,16 +146,15 @@ def read_frame(self): def _on_new_buffer(self, appsink): # Function that is called when a new sample from camera is available self.newsample = True - # print('new buffer received: ' + str(time.time())) #@@@ if self.image_locked: - print('last image is still being processed, a frame is dropped') + self.log.error('last image is still being processed, a frame is dropped') + # TODO(imo): Propagate error in some way and handle return if self.samplelocked is False: self.samplelocked = True try: self.sample = self.appsink.get_property('last-sample') self._gstbuffer_to_opencv() - # print('new buffer read into RAM: ' + str(time.time())) #@@@ self.samplelocked = False self.newsample = False # gotimage reflects if a new image was triggered @@ -160,23 +164,25 @@ def _on_new_buffer(self, appsink): if self.new_image_callback_external is not None: self.new_image_callback_external(self) except GLib.Error as error: - print("Error on_new_buffer pipeline: {0}".format(err)) #error + self.log.error("Error on_new_buffer pipeline", error) self.img_mat = None + # TODO(imo): Propagate error in some way and handle + return Gst.FlowReturn.OK - def _get_property(self, PropertyName): + def _get_property(self, property_name): try: - return CameraProperty(*self.source.get_tcam_property(PropertyName)) + return CameraProperty(*self.source.get_tcam_property(property_name)) except GLib.Error as error: - print("Error get Property {0}: {1}",PropertyName, format(err)) + self.log.error(f"Error get Property {property_name}", error) raise - def _set_property(self, PropertyName, value): + def _set_property(self, property_name, value): try: - print('setting ' + PropertyName + 'to ' + str(value)) - self.source.set_tcam_property(PropertyName,GObject.Value(type(value),value)) + self.log.info('setting ' + property_name + 'to ' + str(value)) + self.source.set_tcam_property(property_name, GObject.Value(type(value), value)) except GLib.Error as error: - print("Error set Property {0}: {1}",PropertyName, format(err)) + self.log.error(f"Error set Property {property_name}", error) raise def _gstbuffer_to_opencv(self): diff --git a/software/control/camera_flir.py b/software/control/camera_flir.py index 01b27e2e5..2cf1531d8 100644 --- a/software/control/camera_flir.py +++ b/software/control/camera_flir.py @@ -564,6 +564,9 @@ def open(self,index=0, is_color=None): self.OffsetX = PySpin.CIntegerPtr(self.nodemap.GetNode('OffsetX')).GetValue() self.OffsetY = PySpin.CIntegerPtr(self.nodemap.GetNode('OffsetY')).GetValue() + # disable gamma + PySpin.CBooleanPtr(self.nodemap.GetNode('GammaEnable')).SetValue(False) + def set_callback(self,function): self.new_image_callback_external = function @@ -826,7 +829,6 @@ def start_streaming(self): self.is_streaming = True def stop_streaming(self): - self.camera.Init() if self.is_streaming: try: self.camera.EndAcquisition() diff --git a/software/control/camera_hamamatsu.py b/software/control/camera_hamamatsu.py new file mode 100644 index 000000000..5ccf74697 --- /dev/null +++ b/software/control/camera_hamamatsu.py @@ -0,0 +1,449 @@ +import argparse +import cv2 +import time +import numpy as np +import threading + +from control.dcam import Dcam, Dcamapi +from control.dcamapi4 import * +from control._def import * + +def get_sn_by_model(model_name): + try: + _, count = Dcamapi.init() + except TypeError: + print('Cannot init Hamamatsu Camera.') + sys.exit(1) + + for i in range(count): + d = Dcam(i) + sn = d.dev_getstring(DCAM_IDSTR.CAMERAID) + if sn is not False: + Dcamapi.uninit() + print('Hamamatsu Camera ' + sn) + return sn + + Dcamapi.uninit() + return None + + +class Camera(object): + def __init__(self,sn=None, resolution=(2304,2304), is_global_shutter=False, rotate_image_angle=None, flip_image=None): + self.dcam = None + self.exposure_time = 1 # ms + self.analog_gain = 0 + self.is_streaming = False + self.pixel_format = None + self.is_color = False + + self.frame_ID = -1 + self.frame_ID_software = -1 + self.frame_ID_offset_hardware_trigger = 0 + self.timestamp = 0 + self.trigger_mode = None + + self.strobe_delay_us = None + + self.image_locked = False + self.current_frame = None + self.callback_is_enabled = False + self.new_image_callback_external = None + self.stop_waiting = False + + self.GAIN_MAX = 0 + self.GAIN_MIN = 0 + self.GAIN_STEP = 0 + self.EXPOSURE_TIME_MS_MIN = 0.017633 + self.EXPOSURE_TIME_MS_MAX = 10000.0046 + + self.rotate_image_angle = rotate_image_angle + self.flip_image = flip_image + self.is_global_shutter = is_global_shutter + self.sn = sn + + self.ROI_offset_x = 0 + self.ROI_offset_y = 0 + self.ROI_width = 2304 + self.ROI_height = 2304 + + self.OffsetX = 0 + self.OffsetY = 0 + self.Width = 2304 + self.Height = 2304 + + self.WidthMax = 2304 + self.HeightMax = 2304 + + def open(self, index=0): + result = Dcamapi.init() + self.dcam = Dcam(index) + result = self.dcam.dev_open(index) and result + if result: + self.calculate_strobe_delay() + print('Hamamatsu Camera opened: ' + str(result)) + + def open_by_sn(self, sn): + unopened = 0 + success, count = Dcamapi.init() + if success: + for i in count: + d = Dcam(i) + if sn == d.dev_getstring(DCAM_IDSTR.CAMERAID): + self.dcam = d + self.calculate_strobe_delay() + print(self.dcam.dev_open(index)) + else: + unopened += 1 + if unopened == count or not success: + print('Hamamatsu Camera open_by_sn: No camera is opened.') + + def close(self): + if self.is_streaming: + self.stop_streaming() + self.disable_callback() + result = self.dcam.dev_close() and Dcamapi.uninit() + print('Hamamatsu Camera closed: ' + str(result)) + + def set_callback(self, function): + self.new_image_callback_external = function + + def enable_callback(self): + if self.callback_is_enabled: + return + + if not self.is_streaming: + self.start_streaming() + + self.stop_waiting = False + self.callback_thread = threading.Thread(target=self._wait_and_callback) + self.callback_thread.start() + + self.callback_is_enabled = True + + def _wait_and_callback(self): + # Note: DCAM API doesn't provide a direct callback mechanism + # This implementation uses the wait_event method to simulate a callback + while True: + if self.stop_waiting: + break + event = self.dcam.wait_event(DCAMWAIT_CAPEVENT.FRAMEREADY, 1000) + if event is not False: + self._on_new_frame() + + def _on_new_frame(self): + image = self.read_frame(no_wait=True) + if image is False: + print('Cannot get new frame from buffer.') + return + if self.image_locked: + print('Last image is still being processed; a frame is dropped') + return + + self.current_frame = image + + self.frame_ID_software += 1 + self.frame_ID += 1 + + # frame ID for hardware triggered acquisition + if self.trigger_mode == TriggerMode.HARDWARE: + if self.frame_ID_offset_hardware_trigger == None: + self.frame_ID_offset_hardware_trigger = self.frame_ID + self.frame_ID = self.frame_ID - self.frame_ID_offset_hardware_trigger + + self.timestamp = time.time() + self.new_image_callback_external(self) + + def disable_callback(self): + if not self.callback_is_enabled: + return + + was_streaming = self.is_streaming + if self.is_streaming: + self.stop_streaming() + + self.stop_waiting = True + time.sleep(0.2) + self.callback_thread.join() + del self.callback_thread + self.callback_is_enabled = False + + if was_streaming: + self.start_streaming() + + def set_analog_gain(self, gain): + pass + + def set_exposure_time(self, exposure_time): + if self.trigger_mode == TriggerMode.SOFTWARE: + adjusted = exposure_time / 1000 + elif self.trigger_mode == TriggerMode.HARDWARE: + adjusted = self.strobe_delay_us / 1000000 + exposure_time / 1000 + if self.dcam.prop_setvalue(DCAM_IDPROP.EXPOSURETIME, adjusted): + self.exposure_time = exposure_time + + def set_continuous_acquisition(self): + was_streaming = False + if self.is_streaming: + was_streaming = True + self.stop_streaming() + + if self.dcam.prop_setvalue(DCAM_IDPROP.TRIGGERSOURCE, DCAMPROP.TRIGGERSOURCE.INTERNAL): + self.trigger_mode = TriggerMode.CONTINUOUS + + if was_streaming: + self.start_streaming() + + def set_software_triggered_acquisition(self): + was_streaming = False + if self.is_streaming: + was_streaming = True + self.stop_streaming() + + if self.dcam.prop_setvalue(DCAM_IDPROP.TRIGGERSOURCE, DCAMPROP.TRIGGERSOURCE.SOFTWARE): + self.trigger_mode = TriggerMode.SOFTWARE + + if was_streaming: + self.start_streaming() + + def set_hardware_triggered_acquisition(self): + was_streaming = False + if self.is_streaming: + was_streaming = True + self.stop_streaming() + + self.dcam.prop_setvalue(DCAM_IDPROP.TRIGGERPOLARITY, DCAMPROP.TRIGGERPOLARITY.POSITIVE) + if self.dcam.prop_setvalue(DCAM_IDPROP.TRIGGERSOURCE, DCAMPROP.TRIGGERSOURCE.EXTERNAL): + self.frame_ID_offset_hardware_trigger = None + self.trigger_mode = TriggerMode.HARDWARE + + if was_streaming: + self.start_streaming() + + def set_pixel_format(self, pixel_format): + was_streaming = False + if self.is_streaming: + was_streaming = True + self.stop_streaming() + + self.pixel_format = pixel_format + + if pixel_format == 'MONO8': + result = self.dcam.prop_setvalue(DCAM_IDPROP.IMAGE_PIXELTYPE, DCAM_PIXELTYPE.MONO8) + elif pixel_format == 'MONO16': + result = self.dcam.prop_setvalue(DCAM_IDPROP.IMAGE_PIXELTYPE, DCAM_PIXELTYPE.MONO16) + + if was_streaming: + self.start_streaming() + + print('Set pixel format: ' + str(result)) + + def send_trigger(self): + if self.is_streaming: + if not self.dcam.cap_firetrigger(): + print('trigger not sent - firetrigger failed') + else: + print('trigger not sent - camera is not streaming') + + def read_frame(self, no_wait=False): + if no_wait: + return self.dcam.buf_getlastframedata() + else: + if self.dcam.wait_capevent_frameready(5000) is not False: + data = self.dcam.buf_getlastframedata() + return data + + dcamerr = self.dcam.lasterr() + if dcamerr.is_timeout(): + print('===: timeout') + + print('-NG: Dcam.wait_event() fails with error {}'.format(dcamerr)) + + def start_streaming(self, buffer_frame_num=5): + if self.is_streaming: + return + if self.dcam.buf_alloc(buffer_frame_num): + if self.dcam.cap_start(True): + self.is_streaming = True + print('Hamamatsu Camera starts streaming') + return + else: + self.dcam.buf_release() + print('Hamamatsu Camera cannot start streaming') + + def stop_streaming(self): + if self.dcam.cap_stop() and self.dcam.buf_release(): + self.is_streaming = False + print('Hamamatsu Camera streaming stopped') + else: + print('Hamamatsu Camera cannot stop streaming') + + def set_ROI(self,offset_x=None,offset_y=None,width=None,height=None): + if offset_x is not None: + ROI_offset_x = 2*(offset_x//2) + else: + ROI_offset_x = self.ROI_offset_x + + if offset_y is not None: + ROI_offset_y = 2*(offset_y//2) + else: + ROI_offset_y = self.ROI_offset_y + + if width is not None: + ROI_width = max(16,2*(width//2)) + else: + ROI_width = self.ROI_width + + if height is not None: + ROI_height = max(16,2*(height//2)) + else: + ROI_height = self.ROI_height + + was_streaming = False + if self.is_streaming: + was_streaming = True + self.stop_streaming() + + result = self.dcam.prop_setvalue(DCAM_IDPROP.SUBARRAYMODE, DCAMPROP.MODE.ON) + + result = result and self.dcam.prop_setvalue(DCAM_IDPROP.SUBARRAYHPOS, ROI_offset_x) + result = result and self.dcam.prop_setvalue(DCAM_IDPROP.SUBARRAYHSIZE, ROI_width) + result = result and self.dcam.prop_setvalue(DCAM_IDPROP.SUBARRAYVPOS, ROI_offset_y) + result = result and self.dcam.prop_setvalue(DCAM_IDPROP.SUBARRAYVSIZE, ROI_height) + + if result: + self.calculate_strobe_delay() + else: + print("Setting ROI failed.") + + if was_streaming: + self.start_streaming() + + def calculate_strobe_delay(self): + self.strobe_delay_us = int(self.dcam.prop_getvalue(DCAM_IDPROP.INTERNAL_LINEINTERVAL) * 1000000 * 2304 + self.dcam.prop_getvalue(DCAM_IDPROP.TRIGGERDELAY) *1000000) # s to us + + +class Camera_Simulation(object): + + def __init__(self,sn=None,is_global_shutter=False,rotate_image_angle=None,flip_image=None): + # many to be purged + self.sn = sn + self.is_global_shutter = is_global_shutter + self.device_info_list = None + self.device_index = 0 + self.camera = None + self.is_color = None + self.gamma_lut = None + self.contrast_lut = None + self.color_correction_param = None + + self.rotate_image_angle = rotate_image_angle + self.flip_image = flip_image + + self.exposure_time = 0 + self.analog_gain = 0 + self.frame_ID = 0 + self.frame_ID_software = -1 + self.frame_ID_offset_hardware_trigger = 0 + self.timestamp = 0 + + self.image_locked = False + self.current_frame = None + + self.callback_is_enabled = False + self.is_streaming = False + + self.GAIN_MAX = 0 + self.GAIN_MIN = 0 + self.GAIN_STEP = 0 + self.EXPOSURE_TIME_MS_MIN = 0.01 + self.EXPOSURE_TIME_MS_MAX = 4000 + + self.trigger_mode = None + + self.pixel_format = 'MONO16' + + self.is_live = False + + self.Width = 3000 + self.Height = 3000 + self.WidthMax = 4000 + self.HeightMax = 3000 + self.OffsetX = 0 + self.OffsetY = 0 + + self.new_image_callback_external = None + + + def open(self,index=0): + pass + + def set_callback(self,function): + self.new_image_callback_external = function + + def enable_callback(self): + self.callback_is_enabled = True + + def disable_callback(self): + self.callback_is_enabled = False + + def open_by_sn(self,sn): + pass + + def close(self): + pass + + def set_exposure_time(self,exposure_time): + pass + + def set_analog_gain(self,analog_gain): + pass + + def start_streaming(self): + self.frame_ID_software = 0 + + def stop_streaming(self): + pass + + def set_pixel_format(self,pixel_format): + self.pixel_format = pixel_format + print(pixel_format) + self.frame_ID = 0 + + def set_continuous_acquisition(self): + pass + + def set_software_triggered_acquisition(self): + pass + + def set_hardware_triggered_acquisition(self): + pass + + def send_trigger(self): + print('send trigger') + self.frame_ID = self.frame_ID + 1 + self.timestamp = time.time() + if self.frame_ID == 1: + if self.pixel_format == 'MONO8': + self.current_frame = np.random.randint(255,size=(2000,2000),dtype=np.uint8) + self.current_frame[901:1100,901:1100] = 200 + elif self.pixel_format == 'MONO16': + self.current_frame = np.random.randint(65535,size=(2000,2000),dtype=np.uint16) + self.current_frame[901:1100,901:1100] = 200*256 + else: + self.current_frame = np.roll(self.current_frame,10,axis=0) + pass + # self.current_frame = np.random.randint(255,size=(768,1024),dtype=np.uint8) + if self.new_image_callback_external is not None and self.callback_is_enabled: + self.new_image_callback_external(self) + + def read_frame(self): + return self.current_frame + + def _on_frame_callback(self, user_param, raw_image): + pass + + def set_ROI(self,offset_x=None,offset_y=None,width=None,height=None): + pass + + def set_line3_to_strobe(self): + pass diff --git a/software/control/camera_ids.py b/software/control/camera_ids.py new file mode 100644 index 000000000..f57cb1e0c --- /dev/null +++ b/software/control/camera_ids.py @@ -0,0 +1,520 @@ +import time +import numpy as np +import threading + +from control._def import * + +from ids_peak import ids_peak +from ids_peak_ipl import ids_peak_ipl +from ids_peak import ids_peak_ipl_extension + +import squid.logging +log = squid.logging.get_logger(__name__) + +def get_sn_by_model(model_name): + ids_peak.Library.Initialize() + device_manager = ids_peak.DeviceManager.Instance() + device_manager.Update() + if device_manager.Devices().empty(): + log.error('iDS camera not found.') + # TODO(imo): Propagate error in some way and handle + return + devices = device_manager.Devices() + for i in range(devices.size()): + dev = device_manager.Devices()[i].OpenDevice(ids_peak.DeviceAccessType_Control) + nodemap = dev.RemoteDevice().NodeMaps()[i] + sn = nodemap.FindNode("DeviceSerialNumber").Value() + mn = nodemap.FindNode("DeviceModelName").Value() + #if mn == model_name: + #return nodemap.FindNode("DeviceSerialNumber").Value() + log.debug(f"get_sn_by_model: {mn}") + return sn + + ids_peak.Library.Close() + + return None + + +class Camera(object): + def __init__(self, sn=None, resolution=(1920,1080), is_global_shutter=False, rotate_image_angle=None, flip_image=None): + self.log = squid.logging.get_logger(self.__class__.__name__) + + ids_peak.Library.Initialize() + self.device_manager = ids_peak.DeviceManager.Instance() + + self.device = None + self.datastream = None + self.nodemap = None + self.buffer_list = None + self.image_converter = None + + self.exposure_time = 1 # ms + self.analog_gain = 1 + self.is_streaming = False + self.pixel_format = None + self.is_color = False + + self.frame_ID = -1 + self.frame_ID_software = -1 + self.frame_ID_offset_hardware_trigger = 0 + self.timestamp = 0 + self.trigger_mode = None + + self.image_locked = False + self.current_frame = None + self.callback_is_enabled = False + self.new_image_callback_external = None + self.stop_waiting = False + + self.GAIN_MAX = 31.6 + self.GAIN_MIN = 1.0 + self.GAIN_STEP = 0.01 + self.EXPOSURE_TIME_MS_MIN = 0.015 + self.EXPOSURE_TIME_MS_MAX = 1999 + + self.rotate_image_angle = rotate_image_angle + self.flip_image = flip_image + self.is_global_shutter = is_global_shutter + self.sn = sn + + self.ROI_offset_x = 0 + self.ROI_offset_y = 0 + self.ROI_width = 1936 + self.ROI_height = 1096 + + self.OffsetX = 0 + self.OffsetY = 0 + self.Width = 1920 + self.Height = 1080 + + self.WidthMax = 1920 + self.HeightMax = 1080 + + def open(self, index=0): + self.device_manager.Update() + if self.device_manager.Devices().empty(): + self.log.error('iDS camera not found.') + # TODO(imo): Propagate error in some way and handle + return + self.device = self.device_manager.Devices()[index].OpenDevice(ids_peak.DeviceAccessType_Control) + if self.device is None: + self.log.error('Cannot open iDS camera.') + # TODO(imo): Propagate error in some way and handle + return + self.nodemap = self.device.RemoteDevice().NodeMaps()[0] + + self._camera_init() + self.log.info('iDS camera opened.') + + def open_by_sn(self, sn): + self.device_manager.Update() + for i in range(self.device_manager.Devices().size()): + dev = self.device_manager.Devices()[i] + nodemap = dev.RemoteDevice().NodeMaps()[i] + if sn == nodemap.FindNode("DeviceSerialNumber").Value(): + self.device = dev.OpenDevice(ids_peak.DeviceAccessType_Control) + if self.device is None: + self.log.error('Cannot open iDS camera.') + # TODO(imo): Propagate error in some way and handle + return + self.nodemap = nodemap + self._camera_init() + self.log.info(f'iDS camera opened by sn={sn}.') + return + self.log.error('No iDS camera is opened.') + # TODO(imo): Propagate error in some way and handle + return + + def _camera_init(self): + gain_node = self.nodemap.FindNode("Gain") + self.log.info(f'gain: min={gain_node.Minimum()}, max={gain_node.Maximum()}, increment={gain_node.Increment()}') + + # initialize software trigger + entries = [] + for entry in self.nodemap.FindNode("TriggerSelector").Entries(): + if (entry.AccessStatus() != ids_peak.NodeAccessStatus_NotAvailable + and entry.AccessStatus() != ids_peak.NodeAccessStatus_NotImplemented): + entries.append(entry.SymbolicValue()) + + if len(entries) == 0: + raise Exception("Software Trigger not supported") + elif "ExposureStart" not in entries: + self.nodemap.FindNode("TriggerSelector").SetCurrentEntry(entries[0]) + else: + self.nodemap.FindNode("TriggerSelector").SetCurrentEntry("ExposureStart") + + # initialize image converter + self.image_converter = ids_peak_ipl.ImageConverter() + + # Open device's datastream + ds = self.device.DataStreams() + if ds.empty(): + self.log.error("Device has no datastream!") + # TODO(imo): Propagate error in some way and handle + else: + self.datastream = ds[0].OpenDataStream() + + def close(self): + self.stop_streaming() + self.disable_callback() + self._revoke_buffer() + + ids_peak.Library.Close() + + def set_callback(self, function): + self.new_image_callback_external = function + + def enable_callback(self): + if self.callback_is_enabled: + return + + if not self.is_streaming: + self.start_streaming() + + self.stop_waiting = False + self.callback_thread = threading.Thread(target=self._wait_and_callback) + self.callback_thread.start() + + self.callback_is_enabled = True + self.log.info("callback enabled") + + def _wait_and_callback(self): + while True: + if self.stop_waiting: + break + try: + buffer = self.datastream.WaitForFinishedBuffer(2000) + if buffer is not None: + self._on_new_frame(buffer) + except Exception as e: + pass + + def _on_new_frame(self, buffer): + image = self.read_frame(no_wait=True, buffer=buffer) + if image is False: + # TODO(imo): Propagate error in some way and handle + self.log.error('Cannot get new frame from buffer.') + return + if self.image_locked: + # TODO(imo): Propagate error in some way and handle + self.log.error('Last image is still being processed; a frame is dropped') + return + + self.current_frame = image + self.frame_ID_software += 1 + self.frame_ID += 1 + + # frame ID for hardware triggered acquisition + if self.trigger_mode == TriggerMode.HARDWARE: + if self.frame_ID_offset_hardware_trigger == None: + self.frame_ID_offset_hardware_trigger = self.frame_ID + self.frame_ID = self.frame_ID - self.frame_ID_offset_hardware_trigger + + self.timestamp = time.time() + self.new_image_callback_external(self) + + def disable_callback(self): + if not self.callback_is_enabled: + return + + was_streaming = self.is_streaming + if self.is_streaming: + self.stop_streaming() + + self.stop_waiting = True + self.callback_thread.join() + del self.callback_thread + self.callback_is_enabled = False + + if was_streaming: + self.start_streaming() + self.log.info("callback disabled") + + def set_analog_gain(self, gain): + if gain < self.GAIN_MIN: + gain = self.GAIN_MIN + elif gain > self.GAIN_MAX: + gain = self.GAIN_MAX + self.nodemap.FindNode("GainSelector").SetCurrentEntry("AnalogAll") + self.nodemap.FindNode("Gain").SetValue(gain) + self.analog_gain = gain + + def set_exposure_time(self, exposure_time): + if exposure_time < self.EXPOSURE_TIME_MS_MIN: + exposure_time = self.EXPOSURE_TIME_MS_MIN + elif exposure_time > self.EXPOSURE_TIME_MS_MAX: + exposure_time = self.EXPOSURE_TIME_MS_MAX + self.nodemap.FindNode("ExposureTime").SetValue(exposure_time * 1000) + self.exposure_time = exposure_time + + def set_continuous_acquisition(self): + pass + + def set_software_triggered_acquisition(self): + try: + self.nodemap.FindNode("TriggerMode").SetCurrentEntry("On") + self.nodemap.FindNode("TriggerSource").SetCurrentEntry("Software") + self.trigger_mode = TriggerMode.SOFTWARE + except Exception as e: + self.log.error(f"Cannot set Software trigger", e) + + def set_hardware_triggered_acquisition(self): + self.nodemap.FindNode("TriggerMode").SetCurrentEntry("On") + self.nodemap.FindNode("TriggerSource").SetCurrentEntry("Line0") + self.trigger_mode = TriggerMode.HARDWARE + + def set_pixel_format(self, pixel_format): + self.log.debug(f"Pixel format={pixel_format}") + was_streaming = False + if self.is_streaming: + was_streaming = True + self.stop_streaming() + try: + if pixel_format == 'MONO10': + self.nodemap.FindNode("PixelFormat").SetCurrentEntry("Mono10g40IDS") + elif pixel_format == 'MONO12': + self.nodemap.FindNode("PixelFormat").SetCurrentEntry("Mono12g24IDS") + else: + raise Exception('Wrong pixel format.') + self.pixel_format = pixel_format + + if was_streaming: + self.start_streaming() + except Exception as e: + self.log.error("Cannot change pixelformat", e) + + def send_trigger(self): + if self.is_streaming: + self.nodemap.FindNode("TriggerSoftware").Execute() + self.nodemap.FindNode("TriggerSoftware").WaitUntilDone() + self.log.debug('Trigger sent') + + def read_frame(self, no_wait=False, buffer=None): + if not no_wait: + buffer = self.datastream.WaitForFinishedBuffer(2000) + self.log.debug("Buffered image!") + + # Convert image and make deep copy + if self.pixel_format == 'MONO10': + output_pixel_format = ids_peak_ipl.PixelFormatName_Mono10 + elif self.pixel_format == 'MONO12': + output_pixel_format = ids_peak_ipl.PixelFormatName_Mono12 + + ipl_image = ids_peak_ipl_extension.BufferToImage(buffer) + ipl_converted = self.image_converter.Convert(ipl_image, output_pixel_format) + numpy_image = ipl_converted.get_numpy_1D().copy() + + self.current_frame = np.frombuffer(numpy_image, dtype=np.uint16).reshape(ipl_converted.Height(), ipl_converted.Width()) + + self.datastream.QueueBuffer(buffer) + + return self.current_frame + + def start_streaming(self, extra_buffer=1): + if self.is_streaming: + return + + # Allocate image buffer for image acquisition + self._revoke_buffer() + self._allocate_buffer(extra_buffer) + for buffer in self.buffer_list: + self.datastream.QueueBuffer(buffer) + + # Lock parameters that should not be accessed during acquisition + self.nodemap.FindNode("TLParamsLocked").SetValue(1) + + # Pre-allocate conversion buffers to speed up first image conversion + # while the acquisition is running + # NOTE: Re-create the image converter, so old conversion buffers + # get freed + input_pixel_format = ids_peak_ipl.PixelFormat( + self.nodemap.FindNode("PixelFormat").CurrentEntry().Value()) + if self.pixel_format == 'MONO10': + output_pixel_format = ids_peak_ipl.PixelFormatName_Mono10 + elif self.pixel_format == 'MONO12': + output_pixel_format = ids_peak_ipl.PixelFormatName_Mono12 + + self.image_converter.PreAllocateConversion( + input_pixel_format, output_pixel_format, self.Width, self.Height) + + self.datastream.StartAcquisition() + self.nodemap.FindNode("AcquisitionStart").Execute() + self.nodemap.FindNode("AcquisitionStart").WaitUntilDone() + self.is_streaming = True + self.log.info("ids started streaming") + + def _revoke_buffer(self): + if self.datastream is None: + self.log.error("No datastream!") + # TODO(imo): Propagate error in some way and handle + return + + try: + # Remove buffers from the announced pool + for buffer in self.datastream.AnnouncedBuffers(): + self.datastream.RevokeBuffer(buffer) + self.buffer_list = None + except Exception as e: + self.log.error("Error revoking buffers", e) + + def _allocate_buffer(self, extra_buffer): + if self.datastream is None: + self.log.error("No datastream!") + return + + try: + self.buffer_list = [] + payload_size = self.nodemap.FindNode("PayloadSize").Value() + buffer_amount = self.datastream.NumBuffersAnnouncedMinRequired() + extra_buffer + + for _ in range(buffer_amount): + buffer = self.datastream.AllocAndAnnounceBuffer(payload_size) + self.buffer_list.append(buffer) + + self.log.debug("Allocated buffers!") + except Exception as e: + self.log.error("Error allocating buffers", e) + + def stop_streaming(self): + if self.is_streaming: + try: + self.nodemap.FindNode("AcquisitionStop").Execute() + + self.datastream.StopAcquisition(ids_peak.AcquisitionStopMode_Default) + # Discard all buffers from the acquisition engine + # They remain in the announced buffer pool + self.datastream.Flush(ids_peak.DataStreamFlushMode_DiscardAll) + + # Unlock parameters + self.nodemap.FindNode("TLParamsLocked").SetValue(0) + + self.is_streaming = False + except Exception as e: + self.log.error("stop_streaming error", e) + + def set_ROI(self, offset_x=None, offset_y=None, width=None, height=None): + pass + + +class Camera_Simulation(object): + + def __init__(self, sn=None, is_global_shutter=False, rotate_image_angle=None, flip_image=None): + self.log = squid.logging.get_logger(self.__class__.__name__ + ) + # many to be purged + self.sn = sn + self.is_global_shutter = is_global_shutter + self.device_info_list = None + self.device_index = 0 + self.camera = None + self.is_color = None + self.gamma_lut = None + self.contrast_lut = None + self.color_correction_param = None + + self.rotate_image_angle = rotate_image_angle + self.flip_image = flip_image + + self.exposure_time = 0 + self.analog_gain = 0 + self.frame_ID = 0 + self.frame_ID_software = -1 + self.frame_ID_offset_hardware_trigger = 0 + self.timestamp = 0 + + self.image_locked = False + self.current_frame = None + + self.callback_is_enabled = False + self.is_streaming = False + + self.GAIN_MAX = 0 + self.GAIN_MIN = 0 + self.GAIN_STEP = 0 + self.EXPOSURE_TIME_MS_MIN = 0.01 + self.EXPOSURE_TIME_MS_MAX = 4000 + + self.trigger_mode = None + + self.pixel_format = 'MONO12' + + self.is_live = False + + self.Width = 1920 + self.Height = 1080 + self.WidthMax = 1920 + self.HeightMax = 1080 + self.OffsetX = 0 + self.OffsetY = 0 + + self.new_image_callback_external = None + + def open(self, index=0): + pass + + def set_callback(self, function): + self.new_image_callback_external = function + + def enable_callback(self): + self.callback_is_enabled = True + + def disable_callback(self): + self.callback_is_enabled = False + + def open_by_sn(self, sn): + pass + + def close(self): + pass + + def set_exposure_time(self, exposure_time): + pass + + def set_analog_gain(self, analog_gain): + pass + + def start_streaming(self): + self.frame_ID_software = 0 + + def stop_streaming(self): + pass + + def set_pixel_format(self, pixel_format): + self.pixel_format = pixel_format + self.log.info(f"pixel_format={pixel_format}") + self.frame_ID = 0 + + def set_continuous_acquisition(self): + pass + + def set_software_triggered_acquisition(self): + pass + + def set_hardware_triggered_acquisition(self): + pass + + def send_trigger(self): + self.log.info('send trigger') + self.frame_ID = self.frame_ID + 1 + self.timestamp = time.time() + if self.frame_ID == 1: + self.current_frame = np.random.randint(255, size=(2000,2000), dtype=np.uint8) + self.current_frame[901:1100,901:1100] = 200 + else: + self.current_frame = np.roll(self.current_frame, 10, axis=0) + pass + # self.current_frame = np.random.randint(255,size=(768,1024),dtype=np.uint8) + if self.new_image_callback_external is not None and self.callback_is_enabled: + self.new_image_callback_external(self) + + def read_frame(self): + return self.current_frame + + def _on_frame_callback(self, user_param, raw_image): + pass + + def set_ROI(self, offset_x=None, offset_y=None, width=None, height=None): + pass + + def set_line3_to_strobe(self): + pass diff --git a/software/control/camera_toupcam.py b/software/control/camera_toupcam.py index ba36e2751..c02c5423d 100644 --- a/software/control/camera_toupcam.py +++ b/software/control/camera_toupcam.py @@ -1,19 +1,20 @@ -import argparse -import cv2 import time import numpy as np +import squid.logging from control._def import * import threading import control.toupcam as toupcam from control.toupcam_exceptions import hresult_checker +log = squid.logging.get_logger(__name__) + def get_sn_by_model(model_name): try: device_list = toupcam.Toupcam.EnumV2() except: - print("Problem generating Toupcam device list") + log.error("Problem generating Toupcam device list") return None for dev in device_list: if dev.displayname == model_name: @@ -29,21 +30,20 @@ def _event_callback(nEvent, camera): if camera.is_streaming: camera._on_frame_callback() camera._software_trigger_sent = False - # print(' >>> new frame callback') def _on_frame_callback(self): # check if the last image is still locked if self.image_locked: - print('last image is still being processed, a frame is dropped') + self.log.warning('last image is still being processed, a frame is dropped') return # get the image from the camera try: self.camera.PullImageV2(self.buf, self.pixel_size_byte*8, None) # the second camera is number of bits per pixel - ignored in RAW mode - # print(' >>> pull image ok, current frame # = {}'.format(self.frame_ID)) except toupcam.HRESULTException as ex: - print('pull image failed, hr=0x{:x}'.format(ex.hr)) + # TODO(imo): Propagate error in some way and handle + self.log.error('pull image failed, hr=0x{:x}'.format(ex.hr)) # increament frame ID self.frame_ID_software += 1 @@ -53,19 +53,15 @@ def _on_frame_callback(self): # right now support the raw format only if self.data_format == 'RGB': if self.pixel_format == 'RGB24': - # self.current_frame = QImage(self.buf, self.w, self.h, (self.w * 24 + 31) // 32 * 4, QImage.Format_RGB888) - print('convert buffer to image not yet implemented for the RGB format') - return() + # TODO(imo): Propagate error in some way and handle + self.log.error('convert buffer to image not yet implemented for the RGB format') + return else: if self.pixel_size_byte == 1: raw_image = np.frombuffer(self.buf, dtype='uint8') elif self.pixel_size_byte == 2: raw_image = np.frombuffer(self.buf, dtype='uint16') - self.current_frame = raw_image.reshape(self.height,self.width) - - # for debugging - #print(self.current_frame.shape) - #print(self.current_frame.dtype) + self.current_frame = raw_image.reshape(self.Height,self.Width) # frame ID for hardware triggered acquisition if self.trigger_mode == TriggerMode.HARDWARE: @@ -82,6 +78,7 @@ def _TDIBWIDTHBYTES(w): return (w * 24 + 31) // 32 * 4 def __init__(self,sn=None,resolution=(3104,2084),is_global_shutter=False,rotate_image_angle=None,flip_image=None): + self.log = squid.logging.get_logger(self.__class__.__name__) # many to be purged self.sn = sn @@ -113,7 +110,7 @@ def __init__(self,sn=None,resolution=(3104,2084),is_global_shutter=False,rotate_ self.GAIN_MAX = 40 self.GAIN_MIN = 0 self.GAIN_STEP = 1 - self.EXPOSURE_TIME_MS_MIN = 0.01 + self.EXPOSURE_TIME_MS_MIN = 0.1 self.EXPOSURE_TIME_MS_MAX = 3600000 self.ROI_offset_x = CAMERA_CONFIG.ROI_OFFSET_X_DEFAULT @@ -129,6 +126,9 @@ def __init__(self,sn=None,resolution=(3104,2084),is_global_shutter=False,rotate_ self.row_numbers = 3036 self.exposure_delay_us_8bit = 650 self.exposure_delay_us = self.exposure_delay_us_8bit*self.pixel_size_byte + + # just setting a default value + # it would be re-calculate with function calculate_hardware_trigger_arguments self.strobe_delay_us = self.exposure_delay_us + self.row_period_us*self.pixel_size_byte*(self.row_numbers-1) self.pixel_format = None # use the default pixel format @@ -141,6 +141,13 @@ def __init__(self,sn=None,resolution=(3104,2084),is_global_shutter=False,rotate_ self._software_trigger_sent = False self._last_software_trigger_timestamp = None self.resolution = None + # the balcklevel factor + # 8 bits: 1 + # 10 bits: 4 + # 12 bits: 16 + # 14 bits: 64 + # 16 bits: 256 + self.blacklevel_factor = 1 if resolution != None: self.resolution = resolution @@ -169,23 +176,25 @@ def __init__(self,sn=None,resolution=(3104,2084),is_global_shutter=False,rotate_ self.Width = resolution[0] self.Height = resolution[1] + # when camera arguments changed, call it to update strobe_delay + self.reset_strobe_delay = None + def check_temperature(self): while self.terminate_read_temperature_thread == False: time.sleep(2) - # print('[ camera temperature: ' + str(self.get_temperature()) + ' ]') temperature = self.get_temperature() if self.temperature_reading_callback is not None: try: self.temperature_reading_callback(temperature) except TypeError as ex: - print("Temperature read callback failed due to error: "+repr(ex)) + self.log.error("Temperature read callback failed due to error: "+repr(ex)) pass def open(self,index=0): if len(self.devices) > 0: - print('{}: flag = {:#x}, preview = {}, still = {}'.format(self.devices[0].displayname, self.devices[0].model.flag, self.devices[0].model.preview, self.devices[0].model.still)) + self.log.info('{}: flag = {:#x}, preview = {}, still = {}'.format(self.devices[0].displayname, self.devices[0].model.flag, self.devices[0].model.preview, self.devices[0].model.still)) for r in self.devices[index].model.res: - print('\t = [{} x {}]'.format(r.width, r.height)) + self.log.info('\t = [{} x {}]'.format(r.width, r.height)) if self.sn is not None: index = [idx for idx in range(len(self.devices)) if self.devices[idx].id == self.sn][0] highest_res = (0,0) @@ -205,13 +214,13 @@ def open(self,index=0): # RAW format: In this format, the output is the raw data directly output from the sensor. The RAW format is for the users that want to skip the internal color processing and obtain the raw data for user-specific purpose. With the raw format output enabled, the functions that are related to the internal color processing will not work, such as Toupcam_put_Hue or Toupcam_AwbOnce function and so on # set temperature - # print('max fan speed is ' + str(self.camera.FanMaxSpeed())) self.set_fan_speed(1) - self.set_temperature(0) + self.set_temperature(20) self.set_data_format('RAW') self.set_pixel_format('MONO16') # 'MONO8' self.set_auto_exposure(False) + self.set_blacklevel(DEFAULT_BLACKLEVEL_VALUE) # set resolution to full if resolution is not specified or not in the list of supported resolutions if self.resolution is None: @@ -228,14 +237,16 @@ def open(self,index=0): try: self.camera.StartPullModeWithCallback(self._event_callback, self) except toupcam.HRESULTException as ex: - print('failed to start camera, hr=0x{:x}'.format(ex.hr)) - exit() + self.log.error('failed to start camera, hr=0x{:x}'.format(ex.hr)) + # TODO(imo): Remove sys.exit and propagate+handle. + sys.exit(1) self._toupcam_pullmode_started = True else: - print('failed to open camera') - exit() + # TODO(imo): Remove sys.exit and propagate+handle. + self.log.error('failed to open camera') + sys.exit(1) else: - print('no camera found') + self.log.error('no camera found') self.is_color = False if self.is_color: @@ -270,8 +281,6 @@ def close(self): self.last_numpy_image = None def set_exposure_time(self,exposure_time): - # exposure time in ms - self.camera.put_ExpoTime(int(exposure_time*1000)) # use_strobe = (self.trigger_mode == TriggerMode.HARDWARE) # true if using hardware trigger # if use_strobe == False or self.is_global_shutter: # self.exposure_time = exposure_time @@ -284,6 +293,12 @@ def set_exposure_time(self,exposure_time): # self.camera.ExposureTime.set(camera_exposure_time) self.exposure_time = exposure_time + # exposure time in ms + if self.trigger_mode == TriggerMode.HARDWARE: + self.camera.put_ExpoTime(int(exposure_time*1000) + int(self.strobe_delay_us)) + else: + self.camera.put_ExpoTime(int(exposure_time*1000)) + def update_camera_exposure_time(self): pass # use_strobe = (self.trigger_mode == TriggerMode.HARDWARE) # true if using hardware trigger @@ -308,7 +323,7 @@ def get_awb_ratios(self): return self.camera.get_WhiteBalanceGain() except toupcam.HRESULTException as ex: err_type = hresult_checker(ex,'E_NOTIMPL') - print("AWB not implemented") + self.log.warning("AWB not implemented") return (0,0,0) def set_wb_ratios(self, wb_r=None, wb_g=None, wb_b=None): @@ -316,7 +331,7 @@ def set_wb_ratios(self, wb_r=None, wb_g=None, wb_b=None): camera.put_WhiteBalanceGain(wb_r,wb_g,wb_b) except toupcam.HRESULTException as ex: err_type = hresult_checker(ex,'E_NOTIMPL') - print("White balance not implemented") + self.log.warning("White balance not implemented") def set_reverse_x(self,value): pass @@ -330,10 +345,11 @@ def start_streaming(self): self.camera.StartPullModeWithCallback(self._event_callback, self) self._toupcam_pullmode_started = True except toupcam.HRESULTException as ex: - print('failed to start camera, hr: '+hresult_checker(ex)) + self.log.error('failed to start camera, hr: '+hresult_checker(ex)) self.close() - exit() - print(' start streaming') + # TODO(imo): Remove sys.exit and propagate+handle. + sys.exit(1) + self.log.info('start streaming') self.is_streaming = True def stop_streaming(self): @@ -349,22 +365,22 @@ def set_pixel_format(self,pixel_format): self.stop_streaming() self.pixel_format = pixel_format - - if self._toupcam_pullmode_started: - self.camera.Stop() - if self.data_format == 'RAW': if pixel_format == 'MONO8': self.pixel_size_byte = 1 + self.blacklevel_factor = 1 self.camera.put_Option(toupcam.TOUPCAM_OPTION_BITDEPTH,0) elif pixel_format == 'MONO12': self.pixel_size_byte = 2 + self.blacklevel_factor = 16 self.camera.put_Option(toupcam.TOUPCAM_OPTION_BITDEPTH,1) elif pixel_format == 'MONO14': self.pixel_size_byte = 2 + self.blacklevel_factor = 64 self.camera.put_Option(toupcam.TOUPCAM_OPTION_BITDEPTH,1) elif pixel_format == 'MONO16': self.pixel_size_byte = 2 + self.blacklevel_factor = 256 self.camera.put_Option(toupcam.TOUPCAM_OPTION_BITDEPTH,1) else: # RGB data format @@ -397,26 +413,13 @@ def set_pixel_format(self,pixel_format): self.camera.put_Option(toupcam.TOUPCAM_OPTION_BITDEPTH,1) self.camera.put_Option(toupcam.TOUPCAM_OPTION_RGB,1) - self._update_buffer_settings(self.Width, self.Height) + self._update_buffer_settings() if was_streaming: self.start_streaming() - # if pixel_format == 'BAYER_RG8': - # self.camera.PixelFormat.set(gx.GxPixelFormatEntry.BAYER_RG8) - # self.pixel_size_byte = 1 - # if pixel_format == 'BAYER_RG12': - # self.camera.PixelFormat.set(gx.GxPixelFormatEntry.BAYER_RG12) - # self.pixel_size_byte = 2 - # self.pixel_format = pixel_format - # else: - # print("pixel format is not implemented or not writable") - - # if was_streaming: - # self.start_streaming() - # # update the exposure delay and strobe delay - # self.exposure_delay_us = self.exposure_delay_us_8bit*self.pixel_size_byte - # self.strobe_delay_us = self.exposure_delay_us + self.row_period_us*self.pixel_size_byte*(self.row_numbers-1) + if self.reset_strobe_delay is not None: + self.reset_strobe_delay() # It is forbidden to call Toupcam_put_Option with TOUPCAM_OPTION_BITDEPTH in the callback context of # PTOUPCAM_EVENT_CALLBACK and PTOUPCAM_DATA_CALLBACK_V3, the return value is E_WRONG_THREAD @@ -425,7 +428,8 @@ def set_auto_exposure(self,enabled): try: self.camera.put_AutoExpoEnable(enabled) except toupcam.HRESULTException as ex: - print("Unable to set auto exposure: "+repr(ex)) + self.log.error("Unable to set auto exposure: "+repr(ex)) + # TODO(imo): Propagate error in some way and handle def set_data_format(self,data_format): self.data_format = data_format @@ -444,24 +448,30 @@ def set_resolution(self,width,height): except toupcam.HRESULTException as ex: err_type = hresult_checker(ex,'E_INVALIDARG','E_BUSY','E_ACCESDENIED', 'E_UNEXPECTED') if err_type == 'E_INVALIDARG': - print(f"Resolution ({width},{height}) not supported by camera") + self.log.error(f"Resolution ({width},{height}) not supported by camera") else: - print(f"Resolution cannot be set due to error: "+err_type) - self._update_buffer_settings(self.Width, self.Height) + self.log.error(f"Resolution cannot be set due to error: "+err_type) + # TODO(imo): Propagate error in some way and handle + self._update_buffer_settings() if was_streaming: self.start_streaming() + if self.reset_strobe_delay is not None: + self.reset_strobe_delay() + def _update_buffer_settings(self): # resize the buffer - width, height = self.camera.get_Size() - self.width = width - self.height = height + xoffset, yoffset, width, height = self.camera.get_Roi() + + self.Width = width + self.Height = height + # calculate buffer size if (self.data_format == 'RGB') & (self.pixel_size_byte != 4): - bufsize = _TDIBWIDTHBYTES(self.width * self.pixel_size_byte * 8) * height + bufsize = _TDIBWIDTHBYTES(width * self.pixel_size_byte * 8) * height else: bufsize = width * self.pixel_size_byte * height - print('image size: {} x {}, bufsize = {}'.format(width, height, bufsize)) + self.log.info('image size: {} x {}, bufsize = {}'.format(width, height, bufsize)) # create the buffer self.buf = bytes(bufsize) @@ -470,7 +480,8 @@ def get_temperature(self): return self.camera.get_Temperature()/10 except toupcam.HRESULTException as ex: error_type = hresult_checker(ex) - print("Could not get temperature, error: "+error_type) + self.log.debug("Could not get temperature, error: "+error_type) + # TODO(imo): Returning 0 temp here seems dangerous - probably indicate instead that we don't know the temp return 0 def set_temperature(self,temperature): @@ -478,7 +489,8 @@ def set_temperature(self,temperature): self.camera.put_Temperature(int(temperature*10)) except toupcam.HRESULTException as ex: error_type = hresult_checker(ex) - print("Unable to set temperature: "+error_type) + # TODO(imo): Propagate error in some way and handle + self.log.error("Unable to set temperature: "+error_type) def set_fan_speed(self,speed): if self.has_fan: @@ -486,7 +498,8 @@ def set_fan_speed(self,speed): self.camera.put_Option(toupcam.TOUPCAM_OPTION_FAN,speed) except toupcam.HRESULTException as ex: error_type = hresult_checker(ex) - print("Unable to set fan speed: "+error_type) + # TODO(imo): Propagate error in some way and handle + self.log.error("Unable to set fan speed: "+error_type) else: pass @@ -504,7 +517,26 @@ def set_hardware_triggered_acquisition(self): self.camera.put_Option(toupcam.TOUPCAM_OPTION_TRIGGER,2) self.frame_ID_offset_hardware_trigger = None self.trigger_mode = TriggerMode.HARDWARE - # self.update_camera_exposure_time() + + # select trigger source to GPIO0 + try: + self.camera.IoControl(1, toupcam.TOUPCAM_IOCONTROLTYPE_SET_TRIGGERSOURCE, 1) + except toupcam.HRESULTException as ex: + error_type = hresult_checker(ex) + # TODO(imo): Propagate error in some way and handle + self.log.error("Unable to select trigger source: " + error_type) + # set GPIO1 to trigger wait + try: + self.camera.IoControl(3, toupcam.TOUPCAM_IOCONTROLTYPE_SET_OUTPUTMODE, 0) + self.camera.IoControl(3, toupcam.TOUPCAM_IOCONTROLTYPE_SET_OUTPUTINVERTER, 0) + except toupcam.HRESULTException as ex: + error_type = hresult_checker(ex) + # TODO(imo): Propagate error in some way and handle + self.log.error("Unable to set GPIO1 for trigger ready: " + error_type) + + def set_trigger_width_mode(self): + self.camera.IoControl(1, toupcam.TOUPCAM_IOCONTROLTYPE_SET_PWMSOURCE, 1) # set PWM source to GPIO0 + self.camera.IoControl(1, toupcam.TOUPCAM_IOCONTROLTYPE_SET_TRIGGERSOURCE, 4) # trigger source to PWM def set_gain_mode(self,mode): if mode == 'LCG': @@ -517,20 +549,19 @@ def set_gain_mode(self,mode): def send_trigger(self): if self._last_software_trigger_timestamp!= None: if (time.time() - self._last_software_trigger_timestamp) > (1.5*self.exposure_time/1000*1.02 + 4): - print('last software trigger timed out') + self.log.warning('last software trigger timed out') self._software_trigger_sent = False if self.is_streaming and (self._software_trigger_sent == False): self.camera.Trigger(1) self._software_trigger_sent = True self._last_software_trigger_timestamp = time.time() - print(' >>> trigger sent') + self.log.debug('>>> trigger sent') else: + # TODO(imo): Propagate these errors in some way and handle if self.is_streaming == False: - print('trigger not sent - camera is not streaming') + self.logger.error('trigger not sent - camera is not streaming') else: - # print('trigger not sent - waiting for the last trigger to complete') pass - #print("{:.3f}".format(time.time()-self._last_software_trigger_timestamp) + ' s since the last trigger') def stop_exposure(self): if self.is_streaming and self._software_trigger_sent == True: @@ -539,15 +570,17 @@ def stop_exposure(self): else: pass - def read_frame(self): - self.image_is_ready = False - # self.send_trigger() + def read_frame(self,reset_image_ready_flag=True): + # set reset_image_ready_flag to True when read_frame() is called immediately after triggering the acquisition + if reset_image_ready_flag: + self.image_is_ready = False timestamp_t0 = time.time() while (time.time() - timestamp_t0) <= (self.exposure_time/1000)*1.02 + 4: time.sleep(0.005) if self.image_is_ready: + self.image_is_ready = False return self.current_frame - print('read frame timed out') + self.log.error('read frame timed out') return None def set_ROI(self,offset_x=None,offset_y=None,width=None,height=None): @@ -555,77 +588,22 @@ def set_ROI(self,offset_x=None,offset_y=None,width=None,height=None): ROI_offset_x = 2*(offset_x//2) else: ROI_offset_x = self.ROI_offset_x - # # stop streaming if streaming is on - # if self.is_streaming == True: - # was_streaming = True - # self.stop_streaming() - # else: - # was_streaming = False - # # update the camera setting - # if self.camera.OffsetX.is_implemented() and self.camera.OffsetX.is_writable(): - # self.camera.OffsetX.set(self.ROI_offset_x) - # else: - # print("OffsetX is not implemented or not writable") - # # restart streaming if it was previously on - # if was_streaming == True: - # self.start_streaming() if offset_y is not None: ROI_offset_y = 2*(offset_y//2) else: ROI_offset_y = self.ROI_offset_y - # # stop streaming if streaming is on - # if self.is_streaming == True: - # was_streaming = True - # self.stop_streaming() - # else: - # was_streaming = False - # # update the camera setting - # if self.camera.OffsetY.is_implemented() and self.camera.OffsetY.is_writable(): - # self.camera.OffsetY.set(self.ROI_offset_y) - # else: - # print("OffsetX is not implemented or not writable") - # # restart streaming if it was previously on - # if was_streaming == True: - # self.start_streaming() if width is not None: ROI_width = max(16,2*(width//2)) else: ROI_width = self.ROI_width - # # stop streaming if streaming is on - # if self.is_streaming == True: - # was_streaming = True - # self.stop_streaming() - # else: - # was_streaming = False - # # update the camera setting - # if self.camera.Width.is_implemented() and self.camera.Width.is_writable(): - # self.camera.Width.set(self.ROI_width) - # else: - # print("OffsetX is not implemented or not writable") - # # restart streaming if it was previously on - # if was_streaming == True: - # self.start_streaming() if height is not None: ROI_height = max(16,2*(height//2)) else: ROI_height = self.ROI_height - # # stop streaming if streaming is on - # if self.is_streaming == True: - # was_streaming = True - # self.stop_streaming() - # else: - # was_streaming = False - # # update the camera setting - # if self.camera.Height.is_implemented() and self.camera.Height.is_writable(): - # self.camera.Height.set(self.ROI_height) - # else: - # print("Height is not implemented or not writable") - # # restart streaming if it was previously on - # if was_streaming == True: - # self.start_streaming() + was_streaming = False if self.is_streaming: self.stop_streaming() @@ -661,12 +639,16 @@ def set_ROI(self,offset_x=None,offset_y=None,width=None,height=None): self.OffsetY = ROI_offset_y except toupcam.HRESULTException as ex: err_type = hresult_checker(ex,'E_INVALIDARG') - print("ROI bounds invalid, not changing ROI.") - self._update_buffer_settings(self.Width, self.Height) + self.log.error("ROI bounds invalid, not changing ROI.") + self._update_buffer_settings() if was_streaming: self.start_streaming() + if self.reset_strobe_delay is not None: + self.reset_strobe_delay() + def reset_camera_acquisition_counter(self): + # TODO(imo): raise not implemented or something so this isn't a silent fail # if self.camera.CounterEventSource.is_implemented() and self.camera.CounterEventSource.is_writable(): # self.camera.CounterEventSource.set(gx.GxCounterEventSourceEntry.LINE2) # else: @@ -679,6 +661,7 @@ def reset_camera_acquisition_counter(self): pass def set_line3_to_strobe(self): + # TODO(imo): Make this not a silent fail # # self.camera.StrobeSwitch.set(gx.GxSwitchEntry.ON) # self.camera.LineSelector.set(gx.GxLineSelectorEntry.LINE3) # self.camera.LineMode.set(gx.GxLineModeEntry.OUTPUT) @@ -686,15 +669,130 @@ def set_line3_to_strobe(self): pass def set_line3_to_exposure_active(self): + # TODO(imo): Make this not a silent fail # # self.camera.StrobeSwitch.set(gx.GxSwitchEntry.ON) # self.camera.LineSelector.set(gx.GxLineSelectorEntry.LINE3) # self.camera.LineMode.set(gx.GxLineModeEntry.OUTPUT) # self.camera.LineSource.set(gx.GxLineSourceEntry.EXPOSURE_ACTIVE) pass + + def calculate_hardware_trigger_arguments(self): + # use camera arguments such as resolutuon, ROI, exposure time, set max FPS, bandwidth to calculate the trigger delay time + resolution_width = 0 + resolution_height = 0 + + roi_width = 0 + roi_height = 0 + + pixel_bits = self.pixel_size_byte * 8 + + + line_length = 0 + low_noise = 0 + + row_time = 0 + + vheight = 0 + + exp_length = 0 + + SHR = 0 + + TRG_DELAY = 0 + + try: + resolution_width, resolution_height = self.camera.get_Size() + except toupcam.HRESULTException as ex: + # TODO(imo): Propagate error in some way and handle + self.log.error('get resolution fail, hr=0x{:x}'.format(ex.hr)) + + xoffset, yoffset, roi_width, roi_height = self.camera.get_Roi() + + try: + bandwidth = self.camera.get_Option(toupcam.TOUPCAM_OPTION_BANDWIDTH) + except toupcam.HRESULTException as ex: + # TODO(imo): Propagate error in some way and handle + self.log.error('get badwidth fail, hr=0x{:x}'.format(ex.hr)) + + if self.has_low_noise_mode: + try: + low_noise = self.camera.get_Option(toupcam.TOUPCAM_OPTION_LOW_NOISE) + except toupcam.HRESULTException as ex: + # TODO(imo): Propagate error in some way and handle + self.log.error('get low_noise fail, hr=0x{:x}'.format(ex.hr)) + + if resolution_width == 6224 and resolution_height == 4168: + if pixel_bits == 8: + line_length = 1200 * (roi_width / 6224) + if line_length < 450: + line_length = 450 + elif pixel_bits == 16: + if low_noise == 1: + line_length = 5000 + elif low_noise == 0: + line_length = 2500 + elif resolution_width == 3104 and resolution_height == 2084: + if pixel_bits == 8: + line_length = 906 + elif pixel_bits == 16: + line_length = 1200 + elif resolution_width == 2064 and resolution_height == 1386: + if pixel_bits == 8: + line_length = 454 + elif pixel_bits == 16: + line_length = 790 + + line_length = int(line_length / (bandwidth / 100.0)) + row_time = line_length / 72 + + try: + max_framerate = self.camera.get_Option(toupcam.TOUPCAM_OPTION_MAX_PRECISE_FRAMERATE) + except toupcam.HRESULTException as ex: + # TODO(imo): Propagate error in some way and handle + self.log.error('get max_framerate fail, hr=0x{:x}'.format(ex.hr)) + + # need reset value, because the default value is only 90% of setting value + try: + self.camera.put_Option(toupcam.TOUPCAM_OPTION_PRECISE_FRAMERATE, max_framerate ) + except toupcam.HRESULTException as ex: + # TODO(imo): Propagate error in some way and handle + self.log.error('put max_framerate fail, hr=0x{:x}'.format(ex.hr)) + + max_framerate = max_framerate / 10.0 + + vheight = 72000000 / (max_framerate * line_length) + if vheight < roi_height + 56: + vheight = roi_height + 56 + + exp_length = 72 * self.exposure_time * 1000 / line_length + + frame_time = int(vheight * row_time) + + self.strobe_delay_us = frame_time + + def set_reset_strobe_delay_function(self, function_body): + self.reset_strobe_delay = function_body + + def set_blacklevel(self, blacklevel): + try: + current_blacklevel = self.camera.get_Option(toupcam.TOUPCAM_OPTION_BLACKLEVEL) + except toupcam.HRESULTException as ex: + err_type = hresult_checker(ex,'E_NOTIMPL') + print("blacklevel not implemented") + return + + _blacklevel = blacklevel * self.blacklevel_factor + + try: + self.camera.put_Option(toupcam.TOUPCAM_OPTION_BLACKLEVEL, _blacklevel) + except toupcam.HRESULTException as ex: + print('put blacklevel fail, hr=0x{:x}'.format(ex.hr)) + class Camera_Simulation(object): def __init__(self,sn=None,is_global_shutter=False,rotate_image_angle=None,flip_image=None): + self.log = squid.logging.get_logger(self.__class__.__name__) # many to be purged self.sn = sn self.is_global_shutter = is_global_shutter @@ -725,7 +823,7 @@ def __init__(self,sn=None,is_global_shutter=False,rotate_image_angle=None,flip_i self.GAIN_MAX = 40 self.GAIN_MIN = 0 self.GAIN_STEP = 1 - self.EXPOSURE_TIME_MS_MIN = 0.01 + self.EXPOSURE_TIME_MS_MIN = 0.1 self.EXPOSURE_TIME_MS_MAX = 3600000 self.trigger_mode = None @@ -736,12 +834,15 @@ def __init__(self,sn=None,is_global_shutter=False,rotate_image_angle=None,flip_i self.row_numbers = 3036 self.exposure_delay_us_8bit = 650 self.exposure_delay_us = self.exposure_delay_us_8bit*self.pixel_size_byte + + # just setting a default value + # it would be re-calculate with function calculate_hardware_trigger_arguments self.strobe_delay_us = self.exposure_delay_us + self.row_period_us*self.pixel_size_byte*(self.row_numbers-1) self.pixel_format = 'MONO16' - self.Width = 3000 - self.Height = 3000 + self.Width = Acquisition.CROP_WIDTH + self.Height = Acquisition.CROP_HEIGHT self.WidthMax = 4000 self.HeightMax = 3000 self.OffsetX = 0 @@ -749,6 +850,17 @@ def __init__(self,sn=None,is_global_shutter=False,rotate_image_angle=None,flip_i self.brand = 'ToupTek' + # when camera arguments changed, call it to update strobe_delay + self.reset_strobe_delay = None + + # the balcklevel factor + # 8 bits: 1 + # 10 bits: 4 + # 12 bits: 16 + # 14 bits: 64 + # 16 bits: 256 + self.blacklevel_factor = 1 + def open(self,index=0): pass @@ -793,7 +905,7 @@ def stop_streaming(self): def set_pixel_format(self,pixel_format): self.pixel_format = pixel_format - print(pixel_format) + self.log.debug(f"Pixel format: {pixel_format}") self.frame_ID = 0 def get_temperature(self): @@ -822,15 +934,15 @@ def send_trigger(self): self.timestamp = time.time() if self.frame_ID == 1: if self.pixel_format == 'MONO8': - self.current_frame = np.random.randint(255,size=(2000,2000),dtype=np.uint8) - self.current_frame[901:1100,901:1100] = 200 + self.current_frame = np.random.randint(255,size=(self.Height,self.Width),dtype=np.uint8) + self.current_frame[self.Height//2-99:self.Height//2+100,self.Width//2-99:self.Width//2+100] = 200 elif self.pixel_format == 'MONO12': - self.current_frame = np.random.randint(4095,size=(2000,2000),dtype=np.uint16) - self.current_frame[901:1100,901:1100] = 200*16 + self.current_frame = np.random.randint(4095,size=(self.Height,self.Width),dtype=np.uint16) + self.current_frame[self.Height//2-99:self.Height//2+100,self.Width//2-99:self.Width//2+100] = 200*16 self.current_frame = self.current_frame << 4 elif self.pixel_format == 'MONO16': - self.current_frame = np.random.randint(65535,size=(2000,2000),dtype=np.uint16) - self.current_frame[901:1100,901:1100] = 200*256 + self.current_frame = np.random.randint(65535,size=(self.Height,self.Width),dtype=np.uint16) + self.current_frame[self.Height//2-99:self.Height//2+100,self.Width//2-99:self.Width//2+100] = 200*256 else: self.current_frame = np.roll(self.current_frame,10,axis=0) pass @@ -861,3 +973,12 @@ def set_line3_to_strobe(self): def set_line3_to_exposure_active(self): pass + + def calculate_hardware_trigger_arguments(self): + pass + + def set_reset_strobe_delay_function(self, function_body): + pass + + def set_blacklevel(self, blacklevel): + pass diff --git a/software/control/camera_tucsen.py b/software/control/camera_tucsen.py new file mode 100644 index 000000000..8866a66e4 --- /dev/null +++ b/software/control/camera_tucsen.py @@ -0,0 +1,534 @@ +import ctypes +from ctypes import * + +import squid.logging +from control.TUCam import * +import numpy as np +import threading +import time + +from control._def import * + +def get_sn_by_model(model_name): + TUCAMINIT = TUCAM_INIT(0, './control'.encode('utf-8')) + TUCAM_Api_Init(pointer(TUCAMINIT)) + + for i in range(TUCAMINIT.uiCamCount): + TUCAMOPEN = TUCAM_OPEN(i, 0) + TUCAM_Dev_Open(pointer(TUCAMOPEN)) + TUCAMVALUEINFO = TUCAM_VALUE_INFO(TUCAM_IDINFO.TUIDI_CAMERA_MODEL.value, 0, 0, 0) + TUCAM_Dev_GetInfo(TUCAMOPEN.hIdxTUCam, pointer(TUCAMVALUEINFO)) + if TUCAMVALUEINFO.pText == model_name: + TUCAM_Reg_Read = TUSDKdll.TUCAM_Reg_Read + cSN = (c_char * 64)() + pSN = cast(cSN, c_char_p) + TUCAMREGRW = TUCAM_REG_RW(1, pSN, 64) + TUCAM_Reg_Read(TUCAMOPEN.hIdxTUCam, TUCAMREGRW) + sn = string_at(pSN).decode('utf-8') + + TUCAM_Dev_Close(TUCAMOPEN.hIdxTUCam) + TUCAM_Api_Uninit() + return sn + + TUCAM_Dev_Close(TUCAMOPEN.hIdxTUCam) + + TUCAM_Api_Uninit() + return None + + +class Camera(object): + def __init__(self, sn=None, resolution=(6240, 4168), is_global_shutter=False, rotate_image_angle=None, flip_image=None): + self.log = squid.logging.get_logger(self.__class__.__name__) + + self.sn = sn + self.resolution = resolution + self.is_global_shutter = is_global_shutter + self.rotate_image_angle = rotate_image_angle + self.flip_image = flip_image + + self.TUCAMINIT = TUCAM_INIT(0, './control'.encode('utf-8')) + self.TUCAMOPEN = TUCAM_OPEN(0, 0) + self.trigger_attr = TUCAM_TRIGGER_ATTR() + self.m_frame = None # buffer + + self.exposure_time = 1 # ms + self.analog_gain = 0 + self.is_streaming = False + self.pixel_format = None + self.is_color = False + + self.frame_ID = -1 + self.frame_ID_software = -1 + self.frame_ID_offset_hardware_trigger = 0 + self.timestamp = 0 + self.trigger_mode = None + + self.image_locked = False + self.current_frame = None + self.callback_is_enabled = False + self.new_image_callback_external = None + self.stop_waiting = False + + self.temperature_reading_callback = None + self.terminate_read_temperature_thread = False + self.temperature_reading_thread = threading.Thread(target=self.check_temperature, daemon=True) + + self.GAIN_MAX = 20.0 + self.GAIN_MIN = 0.0 + self.GAIN_STEP = 1.0 + self.EXPOSURE_TIME_MS_MIN = 0.0347 + self.EXPOSURE_TIME_MS_MAX = 31640472.76 + + self.ROI_offset_x = 0 + self.ROI_offset_y = 0 + self.ROI_width = 6240 + self.ROI_height = 4168 + + self.OffsetX = 0 + self.OffsetY = 0 + self.Width = 6240 + self.Height = 4168 + + self.WidthMax = 6240 + self.HeightMax = 4168 + self.binning_options = { + (6240, 4168): 0, (3120, 2084): 1, (2080, 1388): 2, (1560, 1040): 3, + (1248, 832): 4, (1040, 692): 5, (780, 520): 6, (388, 260): 7 + } + + def open(self, index=0): + TUCAM_Api_Init(pointer(self.TUCAMINIT)) + self.log.info(f'Connect {self.TUCAMINIT.uiCamCount} camera(s)') + + if index >= self.TUCAMINIT.uiCamCount: + self.log.error('Camera index out of range') + # TODO(imo): Propagate error in some way and handle + return + + self.TUCAMOPEN = TUCAM_OPEN(index, 0) + TUCAM_Dev_Open(pointer(self.TUCAMOPEN)) + + # TODO(imo): Propagate error in some way and handle + if self.TUCAMOPEN.hIdxTUCam == 0: + self.log.error('Open Tucsen camera failure!') + else: + self.log.info('Open Tucsen camera success!') + + self.set_temperature(20) + self.temperature_reading_thread.start() + + def open_by_sn(self, sn): + TUCAM_Api_Init(pointer(self.TUCAMINIT)) + + for i in range(self.TUCAMINIT.uiCamCount): + TUCAMOPEN = TUCAM_OPEN(i, 0) + TUCAM_Dev_Open(pointer(TUCAMOPEN)) + + TUCAM_Reg_Read = TUSDKdll.TUCAM_Reg_Read + cSN = (c_char * 64)() + pSN = cast(cSN, c_char_p) + TUCAMREGRW = TUCAM_REG_RW(1, pSN, 64) + TUCAM_Reg_Read(TUCAMOPEN.hIdxTUCam, TUCAMREGRW) + + if string_at(pSN).decode('utf-8') == sn: + self.TUCAMOPEN = TUCAMOPEN + self.set_temperature(20) + self.temperature_reading_thread.start() + self.log.info(f'Open the camera success! sn={sn}') + return + else: + TUCAM_Dev_Close(TUCAMOPEN.hIdxTUCam) + + # TODO(imo): Propagate error in some way and handle + self.log.error('No camera with the specified serial number found') + + def close(self): + self.disable_callback() + self.terminate_read_temperature_thread = True + self.temperature_reading_thread.join() + TUCAM_Buf_Release(self.TUCAMOPEN.hIdxTUCam) + TUCAM_Dev_Close(self.TUCAMOPEN.hIdxTUCam) + TUCAM_Api_Uninit() + self.log.info('Close Tucsen camera success') + + def set_callback(self, function): + self.new_image_callback_external = function + + def enable_callback(self): + if self.callback_is_enabled: + return + + if not self.is_streaming: + self.start_streaming() + + self.stop_waiting = False + self.callback_thread = threading.Thread(target=self._wait_and_callback) + self.callback_thread.start() + + self.callback_is_enabled = True + self.log.debug('enable callback') + + def _wait_and_callback(self): + while not self.stop_waiting: + result = TUCAM_Buf_WaitForFrame(self.TUCAMOPEN.hIdxTUCam, pointer(self.m_frame), int(self.exposure_time + 1000)) + if result == TUCAMRET.TUCAMRET_SUCCESS: + self._on_new_frame(self.m_frame) + + TUCAM_Buf_AbortWait(self.TUCAMOPEN.hIdxTUCam) + TUCAM_Cap_Stop(self.TUCAMOPEN.hIdxTUCam) + TUCAM_Buf_Release(self.TUCAMOPEN.hIdxTUCam) + + def _on_new_frame(self, frame): + # TODO(imo): Propagate error in some way and handle + if frame is False: + self.log.error('Cannot get new frame from buffer.') + return + if self.image_locked: + self.log.error('Last image is still being processed; a frame is dropped') + return + + self.current_frame = self._convert_frame_to_numpy(frame) + + self.frame_ID_software += 1 + self.frame_ID += 1 + + # frame ID for hardware triggered acquisition + if self.trigger_mode == TriggerMode.HARDWARE: + if self.frame_ID_offset_hardware_trigger == None: + self.frame_ID_offset_hardware_trigger = self.frame_ID + self.frame_ID = self.frame_ID - self.frame_ID_offset_hardware_trigger + + self.timestamp = time.time() + self.new_image_callback_external(self) + + def disable_callback(self): + if not self.callback_is_enabled: + return + + was_streaming = self.is_streaming + + self.stop_waiting = True + self.is_streaming = False + + if hasattr(self, 'callback_thread'): + self.callback_thread.join() + del self.callback_thread + self.callback_is_enabled = False + + if was_streaming: + self.start_streaming() + self.log.debug('disable callback') + + def set_temperature_reading_callback(self, func): + self.temperature_reading_callback = func + + def set_temperature(self, temperature): + t = temperature * 10 + 500 + result = TUCAM_Prop_SetValue(self.TUCAMOPEN.hIdxTUCam, TUCAM_IDPROP.TUIDP_TEMPERATURE.value, c_double(t), 0) + + def get_temperature(self): + t = c_double(0) + TUCAM_Prop_GetValue(self.TUCAMOPEN.hIdxTUCam, TUCAM_IDPROP.TUIDP_TEMPERATURE.value, pointer(t), 0) + return t.value + + def check_temperature(self): + while self.terminate_read_temperature_thread == False: + time.sleep(2) + temperature = self.get_temperature() + if self.temperature_reading_callback is not None: + try: + self.temperature_reading_callback(temperature) + except TypeError as ex: + self.log.error("Temperature read callback failed due to error: "+repr(ex)) + # TODO(imo): Propagate error in some way and handle + pass + + def set_resolution(self, width, height): + was_streaming = False + if self.is_streaming: + self.stop_streaming() + was_streaming = True + + if not (width, height) in self.binning_options: + self.log.error(f"No suitable binning found for resolution {width}x{height}") + # TODO(imo): Propagate error in some way and handle + return + + bin_value = c_int(self.binning_options[(width, height)]) + try: + TUCAM_Capa_SetValue(self.TUCAMOPEN.hIdxTUCam, TUCAM_IDCAPA.TUIDC_BINNING_SUM.value, bin_value) + + except Exception: + self.log.error('Cannot set binning.') + # TODO(imo): Propagate error in some way and handle + + if was_streaming: + self.start_streaming() + + def set_auto_exposure(self, enable=False): + value = 1 if enable else 0 + TUCAM_Capa_SetValue(self.TUCAMOPEN.hIdxTUCam, TUCAM_IDCAPA.TUIDC_ATEXPOSURE.value, value) + + if enable: + self.log.info("Auto exposure enabled") + else: + self.log.info("Auto exposure disabled") + + def set_exposure_time(self, exposure_time): + # Disable auto-exposure + TUCAM_Capa_SetValue(self.TUCAMOPEN.hIdxTUCam, TUCAM_IDCAPA.TUIDC_ATEXPOSURE.value, 0) + # Set the exposure time + TUCAM_Prop_SetValue(self.TUCAMOPEN.hIdxTUCam, TUCAM_IDPROP.TUIDP_EXPOSURETM.value, c_double(exposure_time), 0) + self.exposure_time = exposure_time + + def set_analog_gain(self, gain): + # Gain0: System Gain (DN/e-): 1.28; Full Well Capacity (e-): 49000; Readout Noise (e-): 2.7(Median), 3.3(RMS) + # Gain1: System Gain (DN/e-): 3.98; Full Well Capacity (e-): 15700; Readout Noise (e-): 1.0(Median), 1.3(RMS) + # Gain2: System Gain (DN/e-): 8.0; Full Well Capacity (e-): 7800; Readout Noise (e-): 0.95(Median), 1.2(RMS) + # Gain3: System Gain (DN/e-): 20; Full Well Capacity (e-): 3000; Readout Noise (e-): 0.85(Median), 1.0(RMS) + if gain < 2: + value = 0 + elif gain >= 2 and gain < 4: + value = 1 + elif gain >= 4 and gain < 9: + value = 2 + else: + value = 3 + TUCAM_Prop_SetValue(self.TUCAMOPEN.hIdxTUCam, TUCAM_IDPROP.TUIDP_GLOBALGAIN.value, c_double(value), 0) + self.analog_gain = value + + def set_pixel_format(self, pixel_format): + # TUIDC_BINNING_SUM value: [0, 6] + # 0: "1*1Normal"; 1: "2*2Bin_Sum"; 2: "3*3Bin_Sum"; 3: "4*4Bin_Sum"; 4: "6*6Bin_Sum"; 5: "8*8Bin_Sum"; 6: "16*16Bin_Sum" + # 0: 12bit; 1: 14bit; 2: 15bit; others: 16bit + pass + + def set_continuous_acquisition(self): + self.trigger_attr.nTgrMode = TUCAM_CAPTURE_MODES.TUCCM_SEQUENCE.value + self.trigger_attr.nBufFrames = 1 + TUCAM_Cap_SetTrigger(self.TUCAMOPEN.hIdxTUCam, self.trigger_attr) + self.trigger_mode = TriggerMode.CONTINUOUS + + def set_software_triggered_acquisition(self): + self.trigger_attr.nTgrMode = TUCAM_CAPTURE_MODES.TUCCM_TRIGGER_SOFTWARE.value + self.trigger_attr.nBufFrames = 1 + TUCAM_Cap_SetTrigger(self.TUCAMOPEN.hIdxTUCam, self.trigger_attr) + self.trigger_mode = TriggerMode.SOFTWARE + + def set_hardware_triggered_acquisition(self): + self.trigger_attr.nTgrMode = TUCAM_CAPTURE_MODES.TUCCM_TRIGGER_STANDARD.value + self.trigger_attr.nBufFrames = 1 + TUCAM_Cap_SetTrigger(self.TUCAMOPEN.hIdxTUCam, self.trigger_attr) + self.frame_ID_offset_hardware_trigger = None + self.trigger_mode = TriggerMode.HARDWARE + + def set_ROI(self, offset_x=None, offset_y=None, width=None, height=None): + roi_attr = TUCAM_ROI_ATTR() + roi_attr.bEnable = 1 + roi_attr.nHOffset = offset_x if offset_x is not None else self.ROI_offset_x + roi_attr.nVOffset = offset_y if offset_y is not None else self.ROI_offset_y + roi_attr.nWidth = width if width is not None else self.ROI_width + roi_attr.nHeight = height if height is not None else self.ROI_height + + was_streaming = False + if self.is_streaming: + self.stop_streaming() + was_streaming = True + + try: + TUCAM_Cap_SetROI(self.TUCAMOPEN.hIdxTUCam, roi_attr) + + self.ROI_offset_x = roi_attr.nHOffset + self.ROI_offset_y = roi_attr.nVOffset + self.ROI_width = roi_attr.nWidth + self.ROI_height = roi_attr.nHeight + + except Exception: + self.log.error('Cannot set ROI.') + # TODO(imo): Propagate error in some way and handle + + if was_streaming: + self.start_streaming() + + def send_trigger(self): + if self.trigger_mode == TriggerMode.SOFTWARE: + TUCAM_Cap_DoSoftwareTrigger(self.TUCAMOPEN.hIdxTUCam) + self.log.debug("Trigger sent") + + def start_streaming(self): + if self.is_streaming: + return + + self.m_frame = TUCAM_FRAME() + self.m_frame.pBuffer = 0 + self.m_frame.ucFormatGet = TUFRM_FORMATS.TUFRM_FMT_USUAl.value + self.m_frame.uiRsdSize = 1 + + result = TUCAM_Buf_Alloc(self.TUCAMOPEN.hIdxTUCam, pointer(self.m_frame)) + if result != TUCAMRET.TUCAMRET_SUCCESS: + raise Exception("Failed to allocate buffer") + result = TUCAM_Cap_Start(self.TUCAMOPEN.hIdxTUCam, self.trigger_attr.nTgrMode) + if result != TUCAMRET.TUCAMRET_SUCCESS: + TUCAM_Buf_Release(self.TUCAMOPEN.hIdxTUCam) + raise Exception("Failed to start capture") + + self.is_streaming = True + self.log.info('TUCam Camera starts streaming') + + def stop_streaming(self): + if not self.is_streaming: + return + + TUCAM_Cap_Stop(self.TUCAMOPEN.hIdxTUCam) + TUCAM_Buf_Release(self.TUCAMOPEN.hIdxTUCam) + self.is_streaming = False + self.log.info('TUCam Camera streaming stopped') + + def read_frame(self): + result = TUCAM_Buf_WaitForFrame(self.TUCAMOPEN.hIdxTUCam, pointer(self.m_frame), int(self.exposure_time + 1000)) + if result == TUCAMRET.TUCAMRET_SUCCESS: + self.current_frame = self._convert_frame_to_numpy(self.m_frame) + TUCAM_Buf_AbortWait(self.TUCAMOPEN.hIdxTUCam) + return self.current_frame + + return None + + def _convert_frame_to_numpy(self, frame): + buf = create_string_buffer(frame.uiImgSize) + pointer_data = c_void_p(frame.pBuffer + frame.usHeader) + memmove(buf, pointer_data, frame.uiImgSize) + + data = bytes(buf) + image_np = np.frombuffer(data, dtype=np.uint16) + image_np = image_np.reshape((frame.usHeight, frame.usWidth)) + + return image_np + + +class Camera_Simulation(object): + + def __init__(self,sn=None,is_global_shutter=False,rotate_image_angle=None,flip_image=None): + self.log = squid.logging.get_logger(self.__class__.__name__) + + # many to be purged + self.sn = sn + self.is_global_shutter = is_global_shutter + self.device_info_list = None + self.device_index = 0 + self.camera = None + self.is_color = None + self.gamma_lut = None + self.contrast_lut = None + self.color_correction_param = None + + self.rotate_image_angle = rotate_image_angle + self.flip_image = flip_image + + self.exposure_time = 0 + self.analog_gain = 0 + self.frame_ID = 0 + self.frame_ID_software = -1 + self.frame_ID_offset_hardware_trigger = 0 + self.timestamp = 0 + + self.image_locked = False + self.current_frame = None + + self.callback_is_enabled = False + self.is_streaming = False + + self.GAIN_MAX = 0 + self.GAIN_MIN = 0 + self.GAIN_STEP = 0 + self.EXPOSURE_TIME_MS_MIN = 0.01 + self.EXPOSURE_TIME_MS_MAX = 4000 + + self.trigger_mode = None + + self.pixel_format = 'MONO16' + + self.is_live = False + + self.Width = 6240 + self.Height = 4168 + self.WidthMax = 6240 + self.HeightMax = 4168 + self.OffsetX = 0 + self.OffsetY = 0 + + self.new_image_callback_external = None + + + def open(self,index=0): + pass + + def set_callback(self,function): + self.new_image_callback_external = function + + def enable_callback(self): + self.callback_is_enabled = True + + def disable_callback(self): + self.callback_is_enabled = False + + def open_by_sn(self,sn): + pass + + def close(self): + pass + + def set_exposure_time(self,exposure_time): + pass + + def set_analog_gain(self,analog_gain): + pass + + def start_streaming(self): + self.frame_ID_software = 0 + + def stop_streaming(self): + pass + + def set_pixel_format(self,pixel_format): + self.pixel_format = pixel_format + self.log.debug(f"Pixel format={pixel_format}") + self.frame_ID = 0 + + def set_continuous_acquisition(self): + pass + + def set_software_triggered_acquisition(self): + pass + + def set_hardware_triggered_acquisition(self): + pass + + def send_trigger(self): + self.log.debug('send trigger') + self.frame_ID = self.frame_ID + 1 + self.timestamp = time.time() + if self.frame_ID == 1: + if self.pixel_format == 'MONO8': + self.current_frame = np.random.randint(255,size=(2000,2000),dtype=np.uint8) + self.current_frame[901:1100,901:1100] = 200 + elif self.pixel_format == 'MONO16': + self.current_frame = np.random.randint(65535,size=(2000,2000),dtype=np.uint16) + self.current_frame[901:1100,901:1100] = 200*256 + else: + self.current_frame = np.roll(self.current_frame,10,axis=0) + pass + # self.current_frame = np.random.randint(255,size=(768,1024),dtype=np.uint8) + if self.new_image_callback_external is not None and self.callback_is_enabled: + self.new_image_callback_external(self) + + def read_frame(self): + return self.current_frame + + def _on_frame_callback(self, user_param, raw_image): + pass + + def set_ROI(self,offset_x=None,offset_y=None,width=None,height=None): + pass + + def set_line3_to_strobe(self): + pass diff --git a/software/control/console.py b/software/control/console.py new file mode 100644 index 000000000..63c6be2cb --- /dev/null +++ b/software/control/console.py @@ -0,0 +1,193 @@ +import os +# set QT_API environment variable +os.environ["QT_API"] = "pyqt5" + +import qtpy +from qtpy.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout, QPushButton, QLabel +from qtpy.QtCore import QThread, Signal, Qt, QObject, QMetaObject +import sys +import code +import readline +import rlcompleter +import threading +import traceback +import functools +import inspect + +class QtCompleter: + """Custom completer for Qt objects""" + def __init__(self, namespace): + self.namespace = namespace + + def complete(self, text, state): + if state == 0: + if "." in text: + self.matches = self.attr_matches(text) + else: + # Complete global namespace items + self.matches = self.global_matches(text) + try: + return self.matches[state] + except IndexError: + return None + + def global_matches(self, text): + """Compute matches when text is a simple name.""" + matches = [] + n = len(text) + for word in self.namespace: + if word[:n] == text: + matches.append(word) + return matches + + def attr_matches(self, text): + """Match attributes of an object.""" + # Split the text on dots + parts = text.split('.') + if not parts: + return [] + + # Find the object we're looking for completions on + try: + obj = self.namespace[parts[0]] + for part in parts[1:-1]: + if isinstance(obj, GuiProxy): + obj = obj.target + obj = getattr(obj, part) + + if isinstance(obj, GuiProxy): + obj = obj.target + except (KeyError, AttributeError): + return [] + + # Get the incomplete name we're trying to match + incomplete = parts[-1] + + # Get all possible matches + matches = [] + + try: + # Get standard Python attributes + matches.extend(name for name in dir(obj) + if name.startswith(incomplete)) + + # If it's a QObject, also get Qt properties + if isinstance(obj, QObject): + meta = obj.metaObject() + for i in range(meta.propertyCount()): + prop = meta.property(i) + name = prop.name() + if name.startswith(incomplete): + matches.append(name) + + # Get methods with their signatures + if incomplete: + matches.extend( + f"{name}()" for name, member in inspect.getmembers(obj, inspect.ismethod) + if name.startswith(incomplete) + ) + + except Exception as e: + print(f"Error during completion: {e}") + return [] + + # Make the matches into complete dots + if len(parts) > 1: + matches = ['.'.join(parts[:-1] + [m]) for m in matches] + + return matches + +class MainThreadCall(QObject): + """Helper class to execute functions on the main thread""" + execute_signal = Signal(object, tuple, dict) + + def __init__(self): + super().__init__() + self.moveToThread(QApplication.instance().thread()) + self.execute_signal.connect(self._execute) + self._result = None + self._event = threading.Event() + + def _execute(self, func, args, kwargs): + try: + self._result = func(*args, **kwargs) + except Exception as e: + self._result = e + finally: + self._event.set() + + def __call__(self, func, *args, **kwargs): + if QThread.currentThread() is QApplication.instance().thread(): + return func(*args, **kwargs) + + self._event.clear() + self._result = None + self.execute_signal.emit(func, args, kwargs) + self._event.wait() + + if isinstance(self._result, Exception): + raise self._result + return self._result + +class GuiProxy: + """Proxy class to safely execute GUI operations from other threads""" + def __init__(self, target_object): + self.target = target_object + self.main_thread_call = MainThreadCall() + + def __getattr__(self, name): + attr = getattr(self.target, name) + if callable(attr): + @functools.wraps(attr) + def wrapper(*args, **kwargs): + return self.main_thread_call(attr, *args, **kwargs) + return wrapper + return attr + + def __dir__(self): + """Support for auto-completion""" + return dir(self.target) + +class EnhancedInteractiveConsole(code.InteractiveConsole): + """Enhanced console with better completion support""" + def __init__(self, locals=None): + super().__init__(locals) + # Set up readline with our custom completer + self.completer = QtCompleter(locals) + readline.set_completer(self.completer.complete) + readline.parse_and_bind('tab: complete') + + # Use better completion delimiters + readline.set_completer_delims(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>?') + + # Set up readline history + import os + histfile = os.path.expanduser("~/.pyqt_console_history") + try: + readline.read_history_file(histfile) + except FileNotFoundError: + pass + import atexit + atexit.register(readline.write_history_file, histfile) + +class ConsoleThread(QThread): + """Thread for running the interactive console""" + def __init__(self, locals_dict): + super().__init__() + self.locals_dict = locals_dict + self.wrapped_locals = { + key: GuiProxy(value) if isinstance(value, QObject) else value + for key, value in locals_dict.items() + } + self.console = EnhancedInteractiveConsole(locals=self.wrapped_locals) + + def run(self): + while True: + try: + self.console.interact(banner=""" +Squid Microscope Console +----------------------- +Use 'microscope' to access the microscope +""") + except SystemExit: + break diff --git a/software/control/core.py b/software/control/core.py index af95e6611..fd3dd7d46 100644 --- a/software/control/core.py +++ b/software/control/core.py @@ -1,54 +1,92 @@ # set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy +import os +import sys # qt libraries +os.environ["QT_API"] = "pyqt5" +import qtpy +import pyqtgraph as pg from qtpy.QtCore import * from qtpy.QtWidgets import * from qtpy.QtGui import * -from control.processing_handler import ProcessingHandler - -import control.utils as utils +# control from control._def import * +if DO_FLUORESCENCE_RTP: + from control.processing_handler import ProcessingHandler + from control.processing_pipeline import * + from control.multipoint_built_in_functionalities import malaria_rtp +import control.utils as utils +import control.utils_config as utils_config import control.tracking as tracking +import control.serial_peripherals as serial_peripherals + try: - from control.multipoint_custom_script_entry import * + from control.multipoint_custom_script_entry_v2 import * print('custom multipoint script found') except: pass from queue import Queue from threading import Thread, Lock +from pathlib import Path +from datetime import datetime import time +import subprocess +import shutil +from lxml import etree +import json +import math +import random import numpy as np -import pyqtgraph as pg -import scipy +import pandas as pd import scipy.signal import cv2 -from datetime import datetime - -from lxml import etree as ET -from pathlib import Path -import control.utils_config as utils_config - -import math -import json -import pandas as pd - import imageio as iio -import subprocess import control.celesta as celesta class ObjectiveStore: - def __init__(self, objectives_dict = OBJECTIVES, default_objective = DEFAULT_OBJECTIVE): + def __init__(self, objectives_dict=OBJECTIVES, default_objective=DEFAULT_OBJECTIVE, parent=None): self.objectives_dict = objectives_dict self.default_objective = default_objective self.current_objective = default_objective + self.tube_lens_mm = TUBE_LENS_MM + self.sensor_pixel_size_um = CAMERA_PIXEL_SIZE_UM[CAMERA_SENSOR] + self.pixel_binning = self.get_pixel_binning() + self.pixel_size_um = self.calculate_pixel_size(self.current_objective) + + def get_pixel_size(self): + return self.pixel_size_um + + def calculate_pixel_size(self, objective_name): + objective = self.objectives_dict[objective_name] + magnification = objective["magnification"] + objective_tube_lens_mm = objective["tube_lens_f_mm"] + pixel_size_um = self.sensor_pixel_size_um / (magnification / (objective_tube_lens_mm / self.tube_lens_mm)) + pixel_size_um *= self.pixel_binning + return pixel_size_um + + def set_current_objective(self, objective_name): + if objective_name in self.objectives_dict: + self.current_objective = objective_name + self.pixel_size_um = self.calculate_pixel_size(objective_name) + else: + raise ValueError(f"Objective {objective_name} not found in the store.") + + def get_current_objective_info(self): + return self.objectives_dict[self.current_objective] + + def get_pixel_binning(self): + try: + highest_res = max(self.parent.camera.res_list, key=lambda res: res[0] * res[1]) + resolution = self.parent.camera.resolution + pixel_binning = max(1, highest_res[0] / resolution[0]) + except AttributeError: + pixel_binning = 1 + return pixel_binning class StreamHandler(QObject): @@ -121,7 +159,8 @@ def on_new_frame(self, camera): self.timestamp_last = timestamp_now self.fps_real = self.counter self.counter = 0 - print('real camera fps is ' + str(self.fps_real)) + if PRINT_CAMERA_FPS: + print('real camera fps is ' + str(self.fps_real)) # moved down (so that it does not modify the camera.current_frame, which causes minor problems for simulation) - 1/30/2022 # # rotate and flip - eventually these should be done in the camera @@ -130,11 +169,11 @@ def on_new_frame(self, camera): # crop image image_cropped = utils.crop_image(camera.current_frame,self.crop_width,self.crop_height) image_cropped = np.squeeze(image_cropped) - + # # rotate and flip - moved up (1/10/2022) # image_cropped = utils.rotate_and_flip_image(image_cropped,rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) # added on 1/30/2022 - # @@@ to move to camera + # @@@ to move to camera image_cropped = utils.rotate_and_flip_image(image_cropped,rotate_image_angle=camera.rotate_image_angle,flip_image=camera.flip_image) # send image to display @@ -164,7 +203,7 @@ def on_new_frame(self, camera): ''' def on_new_frame_from_simulation(self,image,frame_ID,timestamp): # check whether image is a local copy or pointer, if a pointer, needs to prevent the image being modified while this function is being executed - + self.handler_busy = True # crop image @@ -236,7 +275,7 @@ def process_queue(self): self.image_lock.release() except: pass - + def enqueue(self,image,frame_ID,timestamp): try: self.queue.put_nowait([image,frame_ID,timestamp]) @@ -255,7 +294,7 @@ def set_recording_time_limit(self,time_limit): def start_new_experiment(self,experiment_ID,add_timestamp=True): if add_timestamp: # generate unique experiment ID - self.experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%-S.%f') + self.experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f') else: self.experiment_ID = experiment_ID self.recording_start_time = time.time() @@ -310,7 +349,7 @@ def process_queue(self): self.image_lock.release() except: pass - + def enqueue(self,image,frame_counter,postfix): try: self.queue.put_nowait([image,frame_counter,postfix]) @@ -337,8 +376,8 @@ def __init__(self): self.image_lock = Lock() self.stop_signal_received = False self.thread = Thread(target=self.process_queue) - self.thread.start() - + self.thread.start() + def process_queue(self): while True: # stop the thread if stop signal is received @@ -372,9 +411,10 @@ def close(self): self.thread.join() class Configuration: - def __init__(self,mode_id=None,name=None,camera_sn=None,exposure_time=None,analog_gain=None,illumination_source=None,illumination_intensity=None, z_offset=None, pixel_format=None, _pixel_format_options=None): + def __init__(self,mode_id=None,name=None,color=None,camera_sn=None,exposure_time=None,analog_gain=None,illumination_source=None,illumination_intensity=None,z_offset=None,pixel_format=None,_pixel_format_options=None,emission_filter_position=None): self.id = mode_id self.name = name + self.color = color self.exposure_time = exposure_time self.analog_gain = analog_gain self.illumination_source = illumination_source @@ -387,11 +427,13 @@ def __init__(self,mode_id=None,name=None,camera_sn=None,exposure_time=None,analo self._pixel_format_options = _pixel_format_options if _pixel_format_options is None: self._pixel_format_options = self.pixel_format + self.emission_filter_position = emission_filter_position -class LiveController(QObject): - def __init__(self,camera,microcontroller,configurationManager,control_illumination=True,use_internal_timer_for_hardware_trigger=True,for_displacement_measurement=False): +class LiveController(QObject): + def __init__(self,camera,microcontroller,configurationManager,parent=None,control_illumination=True,use_internal_timer_for_hardware_trigger=True,for_displacement_measurement=False): QObject.__init__(self) + self.microscope = parent self.camera = camera self.microcontroller = microcontroller self.configurationManager = configurationManager @@ -418,37 +460,127 @@ def __init__(self,camera,microcontroller,configurationManager,control_illuminati self.display_resolution_scaling = DEFAULT_DISPLAY_CROP/100 - self.celesta = celesta.CELESTA(ip = '192.168.201.200') - self.activte_celesta_channel = None + self.enable_channel_auto_filter_switching = True + + if USE_LDI_SERIAL_CONTROL: + self.ldi = self.microscope.ldi + + if SUPPORT_SCIMICROSCOPY_LED_ARRAY: + # to do: add error handling + self.led_array = serial_peripherals.SciMicroscopyLEDArray(SCIMICROSCOPY_LED_ARRAY_SN,SCIMICROSCOPY_LED_ARRAY_DISTANCE,SCIMICROSCOPY_LED_ARRAY_TURN_ON_DELAY) + self.led_array.set_NA(SCIMICROSCOPY_LED_ARRAY_DEFAULT_NA) # illumination control def turn_on_illumination(self): - if 'Fluorescence' in self.currentConfiguration.name: - self.celesta.setLaserOnOff(self.activte_celesta_channel,True) + if USE_LDI_SERIAL_CONTROL and 'Fluorescence' in self.currentConfiguration.name and LDI_SHUTTER_MODE == 'PC': + self.ldi.set_active_channel_shutter(1) + elif SUPPORT_SCIMICROSCOPY_LED_ARRAY and 'LED matrix' in self.currentConfiguration.name: + self.led_array.turn_on_illumination() else: self.microcontroller.turn_on_illumination() self.illumination_on = True def turn_off_illumination(self): - if 'Fluorescence' in self.currentConfiguration.name: - self.celesta.setLaserOnOff(self.activte_celesta_channel,False) + if USE_LDI_SERIAL_CONTROL and 'Fluorescence' in self.currentConfiguration.name and LDI_SHUTTER_MODE == 'PC': + self.ldi.set_active_channel_shutter(0) + elif SUPPORT_SCIMICROSCOPY_LED_ARRAY and 'LED matrix' in self.currentConfiguration.name: + self.led_array.turn_off_illumination() else: self.microcontroller.turn_off_illumination() self.illumination_on = False - def set_illumination(self,illumination_source,intensity): + def set_illumination(self,illumination_source,intensity,update_channel_settings=True): if illumination_source < 10: # LED matrix - self.microcontroller.set_illumination_led_matrix(illumination_source,r=(intensity/100)*LED_MATRIX_R_FACTOR,g=(intensity/100)*LED_MATRIX_G_FACTOR,b=(intensity/100)*LED_MATRIX_B_FACTOR) + if SUPPORT_SCIMICROSCOPY_LED_ARRAY: + # set color + if 'BF LED matrix full_R' in self.currentConfiguration.name: + self.led_array.set_color((1,0,0)) + elif 'BF LED matrix full_G' in self.currentConfiguration.name: + self.led_array.set_color((0,1,0)) + elif 'BF LED matrix full_B' in self.currentConfiguration.name: + self.led_array.set_color((0,0,1)) + else: + self.led_array.set_color(SCIMICROSCOPY_LED_ARRAY_DEFAULT_COLOR) + # set intensity + self.led_array.set_brightness(intensity) + # set mode + if 'BF LED matrix left half' in self.currentConfiguration.name: + self.led_array.set_illumination('dpc.l') + if 'BF LED matrix right half' in self.currentConfiguration.name: + self.led_array.set_illumination('dpc.r') + if 'BF LED matrix top half' in self.currentConfiguration.name: + self.led_array.set_illumination('dpc.t') + if 'BF LED matrix bottom half' in self.currentConfiguration.name: + self.led_array.set_illumination('dpc.b') + if 'BF LED matrix full' in self.currentConfiguration.name: + self.led_array.set_illumination('bf') + if 'DF LED matrix' in self.currentConfiguration.name: + self.led_array.set_illumination('df') + else: + if 'BF LED matrix full_R' in self.currentConfiguration.name: + self.microcontroller.set_illumination_led_matrix(illumination_source,r=(intensity/100),g=0,b=0) + elif 'BF LED matrix full_G' in self.currentConfiguration.name: + self.microcontroller.set_illumination_led_matrix(illumination_source,r=0,g=(intensity/100),b=0) + elif 'BF LED matrix full_B' in self.currentConfiguration.name: + self.microcontroller.set_illumination_led_matrix(illumination_source,r=0,g=0,b=(intensity/100)) + else: + self.microcontroller.set_illumination_led_matrix(illumination_source,r=(intensity/100)*LED_MATRIX_R_FACTOR,g=(intensity/100)*LED_MATRIX_G_FACTOR,b=(intensity/100)*LED_MATRIX_B_FACTOR) else: - if 'Fluorescence' in self.currentConfiguration.name: - laser_id = illumination_source-20 - print('set active channel to ' + str(laser_id)) - self.activte_celesta_channel = int(laser_id) - print('set intensity') - self.celesta.set_intensity(self.activte_celesta_channel,intensity*10) + # update illumination + if USE_LDI_SERIAL_CONTROL and 'Fluorescence' in self.currentConfiguration.name: + if LDI_SHUTTER_MODE == 'PC': + # set LDI active channel + print('set active channel to ' + str(illumination_source)) + self.ldi.set_active_channel(int(illumination_source)) + if LDI_INTENSITY_MODE == 'PC': + if update_channel_settings: + # set intensity for active channel + print('set intensity') + self.ldi.set_intensity(int(illumination_source),intensity) + if LDI_SHUTTER_MODE == "EXT" or LDI_INTENSITY_MODE == "EXT": + self.microcontroller.set_illumination(illumination_source,intensity) + elif ENABLE_NL5 and NL5_USE_DOUT and 'Fluorescence' in self.currentConfiguration.name: + wavelength = int(self.currentConfiguration.name[13:16]) + self.microscope.nl5.set_active_channel(NL5_WAVENLENGTH_MAP[wavelength]) + if NL5_USE_AOUT and update_channel_settings: + self.microscope.nl5.set_laser_power(NL5_WAVENLENGTH_MAP[wavelength],int(intensity)) + if ENABLE_CELLX: + self.microscope.cellx.set_laser_power(NL5_WAVENLENGTH_MAP[wavelength],int(intensity)) else: self.microcontroller.set_illumination(illumination_source,intensity) + # set emission filter position + if ENABLE_SPINNING_DISK_CONFOCAL: + try: + self.microscope.xlight.set_emission_filter(XLIGHT_EMISSION_FILTER_MAPPING[illumination_source],extraction=False,validate=XLIGHT_VALIDATE_WHEEL_POS) + except Exception as e: + print('not setting emission filter position due to ' + str(e)) + + if USE_ZABER_EMISSION_FILTER_WHEEL and self.enable_channel_auto_filter_switching: + try: + if self.currentConfiguration.emission_filter_position != self.microscope.emission_filter_wheel.current_index: + if ZABER_EMISSION_FILTER_WHEEL_BLOCKING_CALL: + self.microscope.emission_filter_wheel.set_emission_filter(self.currentConfiguration.emission_filter_position,blocking=True) + else: + self.microscope.emission_filter_wheel.set_emission_filter(self.currentConfiguration.emission_filter_position,blocking=False) + if self.trigger_mode == TriggerMode.SOFTWARE: + time.sleep(ZABER_EMISSION_FILTER_WHEEL_DELAY_MS/1000) + else: + time.sleep(max(0,ZABER_EMISSION_FILTER_WHEEL_DELAY_MS/1000-self.camera.strobe_delay_us/1e6)) + except Exception as e: + print('not setting emission filter position due to ' + str(e)) + + if USE_OPTOSPIN_EMISSION_FILTER_WHEEL and self.enable_channel_auto_filter_switching and OPTOSPIN_EMISSION_FILTER_WHEEL_TTL_TRIGGER == False: + try: + if self.currentConfiguration.emission_filter_position != self.microscope.emission_filter_wheel.current_index: + self.microscope.emission_filter_wheel.set_emission_filter(self.currentConfiguration.emission_filter_position) + if self.trigger_mode == TriggerMode.SOFTWARE: + time.sleep(OPTOSPIN_EMISSION_FILTER_WHEEL_DELAY_MS/1000) + elif self.trigger_mode == TriggerMode.HARDWARE: + time.sleep(max(0,OPTOSPIN_EMISSION_FILTER_WHEEL_DELAY_MS/1000-self.camera.strobe_delay_us/1e6)) + except Exception as e: + print('not setting emission filter position due to ' + str(e)) + def start_live(self): self.is_live = True self.camera.is_live = True @@ -497,7 +629,10 @@ def trigger_acquisition(self): # print('real trigger fps is ' + str(self.fps_real)) elif self.trigger_mode == TriggerMode.HARDWARE: self.trigger_ID = self.trigger_ID + 1 - self.microcontroller.send_hardware_trigger(control_illumination=True,illumination_on_time_us=self.camera.exposure_time*1000) + if ENABLE_NL5 and NL5_USE_DOUT: + self.microscope.nl5.start_acquisition() + else: + self.microcontroller.send_hardware_trigger(control_illumination=True,illumination_on_time_us=self.camera.exposure_time*1000) def _start_triggerred_acquisition(self): self.timer_trigger.start() @@ -523,10 +658,12 @@ def set_trigger_mode(self,mode): self._stop_triggerred_acquisition() # self.camera.reset_camera_acquisition_counter() self.camera.set_hardware_triggered_acquisition() - self.microcontroller.set_strobe_delay_us(self.camera.strobe_delay_us) + self.reset_strobe_arugment() + self.camera.set_exposure_time(self.currentConfiguration.exposure_time) + if self.is_live and self.use_internal_timer_for_hardware_trigger: self._start_triggerred_acquisition() - if mode == TriggerMode.CONTINUOUS: + if mode == TriggerMode.CONTINUOUS: if ( self.trigger_mode == TriggerMode.SOFTWARE ) or ( self.trigger_mode == TriggerMode.HARDWARE and self.use_internal_timer_for_hardware_trigger ): self._stop_triggerred_acquisition() self.camera.set_continuous_acquisition() @@ -535,14 +672,14 @@ def set_trigger_mode(self,mode): def set_trigger_fps(self,fps): if ( self.trigger_mode == TriggerMode.SOFTWARE ) or ( self.trigger_mode == TriggerMode.HARDWARE and self.use_internal_timer_for_hardware_trigger ): self._set_trigger_fps(fps) - + # set microscope mode # @@@ to do: change softwareTriggerGenerator to TriggerGeneratror def set_microscope_mode(self,configuration): self.currentConfiguration = configuration print("setting microscope mode to " + self.currentConfiguration.name) - + # temporarily stop live while changing mode if self.is_live is True: self.timer_trigger.stop() @@ -557,7 +694,7 @@ def set_microscope_mode(self,configuration): if self.control_illumination: self.set_illumination(self.currentConfiguration.illumination_source,self.currentConfiguration.illumination_intensity) - # restart live + # restart live if self.is_live is True: if self.control_illumination: self.turn_on_illumination() @@ -575,6 +712,15 @@ def on_new_frame(self): def set_display_resolution_scaling(self, display_resolution_scaling): self.display_resolution_scaling = display_resolution_scaling/100 + def reset_strobe_arugment(self): + # re-calculate the strobe_delay_us value + try: + self.camera.calculate_hardware_trigger_arguments() + except AttributeError: + pass + self.microcontroller.set_strobe_delay_us(self.camera.strobe_delay_us) + + class NavigationController(QObject): xPos = Signal(float) @@ -584,12 +730,17 @@ class NavigationController(QObject): xyPos = Signal(float,float) signal_joystick_button_pressed = Signal() - def __init__(self,microcontroller, parent=None): + # x y z axis pid enable flag + pid_enable_flag = [False, False, False] + + + def __init__(self,microcontroller, objectivestore, parent=None): # parent should be set to OctopiGUI instance to enable updates # to camera settings, e.g. binning, that would affect click-to-move QObject.__init__(self) self.microcontroller = microcontroller self.parent = parent + self.objectiveStore = objectivestore self.x_pos_mm = 0 self.y_pos_mm = 0 self.z_pos_mm = 0 @@ -598,7 +749,7 @@ def __init__(self,microcontroller, parent=None): self.x_microstepping = MICROSTEPPING_DEFAULT_X self.y_microstepping = MICROSTEPPING_DEFAULT_Y self.z_microstepping = MICROSTEPPING_DEFAULT_Z - self.click_to_move = False + self.click_to_move = ENABLE_CLICK_TO_MOVE_BY_DEFAULT # default on when acquisition not running self.theta_microstepping = MICROSTEPPING_DEFAULT_THETA self.enable_joystick_button_action = True @@ -610,55 +761,79 @@ def __init__(self,microcontroller, parent=None): # self.timer_read_pos.timeout.connect(self.update_pos) # self.timer_read_pos.start() - def set_flag_click_to_move(self, flag): - self.click_to_move = flag + # scan start position (obsolete? only for TiledDisplay) + self.scan_begin_position_x = 0 + self.scan_begin_position_y = 0 + + def get_mm_per_ustep_X(self): + return SCREW_PITCH_X_MM/(self.x_microstepping*FULLSTEPS_PER_REV_X) - def move_from_click(self, click_x, click_y): - if self.click_to_move: - try: - highest_res = (0,0) - for res in self.parent.camera.res_list: - if res[0] > highest_res[0] or res[1] > higest_res[1]: - highest_res = res - resolution = self.parent.camera.resolution + def get_mm_per_ustep_Y(self): + return SCREW_PITCH_Y_MM/(self.y_microstepping*FULLSTEPS_PER_REV_Y) - try: - pixel_binning_x = highest_res[0]/resolution[0] - pixel_binning_y = highest_res[1]/resolution[1] - if pixel_binning_x < 1: - pixel_binning_x = 1 - if pixel_binning_y < 1: - pixel_binning_y = 1 - except: - pixel_binning_x=1 - pixel_binning_y=1 - except AttributeError: - pixel_binning_x = 1 - pixel_binning_y = 1 + def get_mm_per_ustep_Z(self): + return SCREW_PITCH_Z_MM/(self.z_microstepping*FULLSTEPS_PER_REV_Z) - try: - current_objective = self.parent.objectiveStore.current_objective - objective_info = self.parent.objectiveStore.objectives_dict.get(current_objective, {}) - except (AttributeError, KeyError): - objective_info = OBJECTIVES[DEFAULT_OBJECTIVE] - magnification = objective_info["magnification"] - objective_tube_lens_mm = objective_info["tube_lens_f_mm"] - tube_lens_mm = TUBE_LENS_MM - pixel_size_um = CAMERA_PIXEL_SIZE_UM[CAMERA_SENSOR] + def set_flag_click_to_move(self, flag): + self.click_to_move = flag - pixel_size_xy = pixel_size_um/(magnification/(objective_tube_lens_mm/tube_lens_mm)) + def get_flag_click_to_move(self): + return self.click_to_move - pixel_size_x = pixel_size_xy*pixel_binning_x - pixel_size_y = pixel_size_xy*pixel_binning_y + def scan_preview_move_from_click(self, click_x, click_y, image_width, image_height, Nx=1, Ny=1, dx_mm=0.9, dy_mm=0.9): + """ + napariTiledDisplay uses the Nx, Ny, dx_mm, dy_mm fields to move to the correct fov first + imageArrayDisplayWindow assumes only a single fov (default values do not impact calculation but this is less correct) + """ + # check if click to move enabled + if not self.click_to_move: + print("allow click to move") + return + # restore to raw coordicate + click_x = image_width / 2.0 + click_x + click_y = image_height / 2.0 - click_y + print("click - (x, y):", (click_x, click_y)) + fov_col = click_x * Nx // image_width + fov_row = click_y * Ny // image_height + end_position_x = Ny % 2 # right side or left side + fov_col = Nx - (fov_col + 1) if end_position_x else fov_col + fov_row = fov_row + pixel_sign_x = (-1)**end_position_x # inverted + pixel_sign_y = -1 if INVERTED_OBJECTIVE else 1 + + # move to selected fov + x_pos = self.scan_begin_position_x+dx_mm*fov_col*pixel_sign_x + y_pos = self.scan_begin_position_y+dy_mm*fov_row*pixel_sign_y + self.move_to(x_pos, y_pos) + + # move to actual click, offset from center fov + tile_width = (image_width / Nx) * PRVIEW_DOWNSAMPLE_FACTOR + tile_height = (image_height / Ny) * PRVIEW_DOWNSAMPLE_FACTOR + offset_x = (click_x * PRVIEW_DOWNSAMPLE_FACTOR) % tile_width + offset_y = (click_y * PRVIEW_DOWNSAMPLE_FACTOR) % tile_height + offset_x_centered = int(offset_x - tile_width / 2) + offset_y_centered = int(tile_height / 2 - offset_y) + self.move_from_click(offset_x_centered, offset_y_centered, tile_width, tile_height) + + def move_from_click(self, click_x, click_y, image_width, image_height): + if self.click_to_move: + pixel_size_um = self.objectiveStore.get_pixel_size() pixel_sign_x = 1 pixel_sign_y = 1 if INVERTED_OBJECTIVE else -1 - delta_x = pixel_sign_x*pixel_size_x*click_x/1000.0 - delta_y = pixel_sign_y*pixel_size_y*click_y/1000.0 + delta_x = pixel_sign_x * pixel_size_um * click_x / 1000.0 + delta_y = pixel_sign_y * pixel_size_um * click_y / 1000.0 self.move_x(delta_x) + self.microcontroller.wait_till_operation_is_completed() self.move_y(delta_y) + self.microcontroller.wait_till_operation_is_completed() + + def move_from_click_mosaic(self, x_mm, y_mm): + if self.click_to_move: + self.move_to(x_mm, y_mm) + def move_to_cached_position(self): if not os.path.isfile("cache/last_coords.txt"): return @@ -677,26 +852,29 @@ def move_to_cached_position(self): break def cache_current_position(self): - with open("cache/last_coords.txt","w") as f: - f.write(",".join([str(self.x_pos_mm),str(self.y_pos_mm),str(self.z_pos_mm)])) + if (SOFTWARE_POS_LIMIT.X_NEGATIVE <= self.x_pos_mm <= SOFTWARE_POS_LIMIT.X_POSITIVE and + SOFTWARE_POS_LIMIT.Y_NEGATIVE <= self.y_pos_mm <= SOFTWARE_POS_LIMIT.Y_POSITIVE and + SOFTWARE_POS_LIMIT.Z_NEGATIVE <= self.z_pos_mm <= SOFTWARE_POS_LIMIT.Z_POSITIVE): + with open("cache/last_coords.txt","w") as f: + f.write(",".join([str(self.x_pos_mm),str(self.y_pos_mm),str(self.z_pos_mm)])) def move_x(self,delta): - self.microcontroller.move_x_usteps(int(delta/(SCREW_PITCH_X_MM/(self.x_microstepping*FULLSTEPS_PER_REV_X)))) + self.microcontroller.move_x_usteps(int(delta/self.get_mm_per_ustep_X())) def move_y(self,delta): - self.microcontroller.move_y_usteps(int(delta/(SCREW_PITCH_Y_MM/(self.y_microstepping*FULLSTEPS_PER_REV_Y)))) + self.microcontroller.move_y_usteps(int(delta/self.get_mm_per_ustep_Y())) def move_z(self,delta): - self.microcontroller.move_z_usteps(int(delta/(SCREW_PITCH_Z_MM/(self.z_microstepping*FULLSTEPS_PER_REV_Z)))) + self.microcontroller.move_z_usteps(int(delta/self.get_mm_per_ustep_Z())) def move_x_to(self,delta): - self.microcontroller.move_x_to_usteps(STAGE_MOVEMENT_SIGN_X*int(delta/(SCREW_PITCH_X_MM/(self.x_microstepping*FULLSTEPS_PER_REV_X)))) + self.microcontroller.move_x_to_usteps(STAGE_MOVEMENT_SIGN_X*int(delta/self.get_mm_per_ustep_X())) def move_y_to(self,delta): - self.microcontroller.move_y_to_usteps(STAGE_MOVEMENT_SIGN_Y*int(delta/(SCREW_PITCH_Y_MM/(self.y_microstepping*FULLSTEPS_PER_REV_Y)))) + self.microcontroller.move_y_to_usteps(STAGE_MOVEMENT_SIGN_Y*int(delta/self.get_mm_per_ustep_Y())) def move_z_to(self,delta): - self.microcontroller.move_z_to_usteps(STAGE_MOVEMENT_SIGN_Z*int(delta/(SCREW_PITCH_Z_MM/(self.z_microstepping*FULLSTEPS_PER_REV_Z)))) + self.microcontroller.move_z_to_usteps(STAGE_MOVEMENT_SIGN_Z*int(delta/self.get_mm_per_ustep_Z())) def move_x_usteps(self,usteps): self.microcontroller.move_x_usteps(usteps) @@ -707,6 +885,15 @@ def move_y_usteps(self,usteps): def move_z_usteps(self,usteps): self.microcontroller.move_z_usteps(usteps) + def move_x_to_usteps(self,usteps): + self.microcontroller.move_x_to_usteps(usteps) + + def move_y_to_usteps(self,usteps): + self.microcontroller.move_y_to_usteps(usteps) + + def move_z_to_usteps(self,usteps): + self.microcontroller.move_z_to_usteps(usteps) + def update_pos(self,microcontroller): # get position from the microcontroller x_pos, y_pos, z_pos, theta_pos = microcontroller.get_pos() @@ -715,15 +902,15 @@ def update_pos(self,microcontroller): if USE_ENCODER_X: self.x_pos_mm = x_pos*ENCODER_POS_SIGN_X*ENCODER_STEP_SIZE_X_MM else: - self.x_pos_mm = x_pos*STAGE_POS_SIGN_X*(SCREW_PITCH_X_MM/(self.x_microstepping*FULLSTEPS_PER_REV_X)) + self.x_pos_mm = x_pos*STAGE_POS_SIGN_X*self.get_mm_per_ustep_X() if USE_ENCODER_Y: self.y_pos_mm = y_pos*ENCODER_POS_SIGN_Y*ENCODER_STEP_SIZE_Y_MM else: - self.y_pos_mm = y_pos*STAGE_POS_SIGN_Y*(SCREW_PITCH_Y_MM/(self.y_microstepping*FULLSTEPS_PER_REV_Y)) + self.y_pos_mm = y_pos*STAGE_POS_SIGN_Y*self.get_mm_per_ustep_Y() if USE_ENCODER_Z: self.z_pos_mm = z_pos*ENCODER_POS_SIGN_Z*ENCODER_STEP_SIZE_Z_MM else: - self.z_pos_mm = z_pos*STAGE_POS_SIGN_Z*(SCREW_PITCH_Z_MM/(self.z_microstepping*FULLSTEPS_PER_REV_Z)) + self.z_pos_mm = z_pos*STAGE_POS_SIGN_Z*self.get_mm_per_ustep_Z() if USE_ENCODER_THETA: self.theta_pos_rad = theta_pos*ENCODER_POS_SIGN_THETA*ENCODER_STEP_SIZE_THETA else: @@ -766,53 +953,86 @@ def zero_z(self): self.microcontroller.zero_z() def zero_theta(self): - self.microcontroller.zero_tehta() + self.microcontroller.zero_theta() def home(self): pass def set_x_limit_pos_mm(self,value_mm): if STAGE_MOVEMENT_SIGN_X > 0: - self.microcontroller.set_lim(LIMIT_CODE.X_POSITIVE,int(value_mm/(SCREW_PITCH_X_MM/(self.x_microstepping*FULLSTEPS_PER_REV_X)))) + self.microcontroller.set_lim(LIMIT_CODE.X_POSITIVE,int(value_mm/self.get_mm_per_ustep_X())) else: - self.microcontroller.set_lim(LIMIT_CODE.X_NEGATIVE,STAGE_MOVEMENT_SIGN_X*int(value_mm/(SCREW_PITCH_X_MM/(self.x_microstepping*FULLSTEPS_PER_REV_X)))) + self.microcontroller.set_lim(LIMIT_CODE.X_NEGATIVE,STAGE_MOVEMENT_SIGN_X*int(value_mm/self.get_mm_per_ustep_X())) def set_x_limit_neg_mm(self,value_mm): if STAGE_MOVEMENT_SIGN_X > 0: - self.microcontroller.set_lim(LIMIT_CODE.X_NEGATIVE,int(value_mm/(SCREW_PITCH_X_MM/(self.x_microstepping*FULLSTEPS_PER_REV_X)))) + self.microcontroller.set_lim(LIMIT_CODE.X_NEGATIVE,int(value_mm/self.get_mm_per_ustep_X())) else: - self.microcontroller.set_lim(LIMIT_CODE.X_POSITIVE,STAGE_MOVEMENT_SIGN_X*int(value_mm/(SCREW_PITCH_X_MM/(self.x_microstepping*FULLSTEPS_PER_REV_X)))) + self.microcontroller.set_lim(LIMIT_CODE.X_POSITIVE,STAGE_MOVEMENT_SIGN_X*int(value_mm/self.get_mm_per_ustep_X())) def set_y_limit_pos_mm(self,value_mm): if STAGE_MOVEMENT_SIGN_Y > 0: - self.microcontroller.set_lim(LIMIT_CODE.Y_POSITIVE,int(value_mm/(SCREW_PITCH_Y_MM/(self.y_microstepping*FULLSTEPS_PER_REV_Y)))) + self.microcontroller.set_lim(LIMIT_CODE.Y_POSITIVE,int(value_mm/self.get_mm_per_ustep_Y())) else: - self.microcontroller.set_lim(LIMIT_CODE.Y_NEGATIVE,STAGE_MOVEMENT_SIGN_Y*int(value_mm/(SCREW_PITCH_Y_MM/(self.y_microstepping*FULLSTEPS_PER_REV_Y)))) + self.microcontroller.set_lim(LIMIT_CODE.Y_NEGATIVE,STAGE_MOVEMENT_SIGN_Y*int(value_mm/self.get_mm_per_ustep_Y())) def set_y_limit_neg_mm(self,value_mm): if STAGE_MOVEMENT_SIGN_Y > 0: - self.microcontroller.set_lim(LIMIT_CODE.Y_NEGATIVE,int(value_mm/(SCREW_PITCH_Y_MM/(self.y_microstepping*FULLSTEPS_PER_REV_Y)))) + self.microcontroller.set_lim(LIMIT_CODE.Y_NEGATIVE,int(value_mm/self.get_mm_per_ustep_Y())) else: - self.microcontroller.set_lim(LIMIT_CODE.Y_POSITIVE,STAGE_MOVEMENT_SIGN_Y*int(value_mm/(SCREW_PITCH_Y_MM/(self.y_microstepping*FULLSTEPS_PER_REV_Y)))) + self.microcontroller.set_lim(LIMIT_CODE.Y_POSITIVE,STAGE_MOVEMENT_SIGN_Y*int(value_mm/self.get_mm_per_ustep_Y())) def set_z_limit_pos_mm(self,value_mm): if STAGE_MOVEMENT_SIGN_Z > 0: - self.microcontroller.set_lim(LIMIT_CODE.Z_POSITIVE,int(value_mm/(SCREW_PITCH_Z_MM/(self.z_microstepping*FULLSTEPS_PER_REV_Z)))) + self.microcontroller.set_lim(LIMIT_CODE.Z_POSITIVE,int(value_mm/self.get_mm_per_ustep_Z())) else: - self.microcontroller.set_lim(LIMIT_CODE.Z_NEGATIVE,STAGE_MOVEMENT_SIGN_Z*int(value_mm/(SCREW_PITCH_Z_MM/(self.z_microstepping*FULLSTEPS_PER_REV_Z)))) + self.microcontroller.set_lim(LIMIT_CODE.Z_NEGATIVE,STAGE_MOVEMENT_SIGN_Z*int(value_mm/self.get_mm_per_ustep_Z())) def set_z_limit_neg_mm(self,value_mm): if STAGE_MOVEMENT_SIGN_Z > 0: - self.microcontroller.set_lim(LIMIT_CODE.Z_NEGATIVE,int(value_mm/(SCREW_PITCH_Z_MM/(self.z_microstepping*FULLSTEPS_PER_REV_Z)))) + self.microcontroller.set_lim(LIMIT_CODE.Z_NEGATIVE,int(value_mm/self.get_mm_per_ustep_Z())) else: - self.microcontroller.set_lim(LIMIT_CODE.Z_POSITIVE,STAGE_MOVEMENT_SIGN_Z*int(value_mm/(SCREW_PITCH_Z_MM/(self.z_microstepping*FULLSTEPS_PER_REV_Z)))) - + self.microcontroller.set_lim(LIMIT_CODE.Z_POSITIVE,STAGE_MOVEMENT_SIGN_Z*int(value_mm/self.get_mm_per_ustep_Z())) + def move_to(self,x_mm,y_mm): self.move_x_to(x_mm) + self.microcontroller.wait_till_operation_is_completed() self.move_y_to(y_mm) + self.microcontroller.wait_till_operation_is_completed() + + def configure_encoder(self, axis, transitions_per_revolution,flip_direction): + self.microcontroller.configure_stage_pid(axis, transitions_per_revolution=int(transitions_per_revolution), flip_direction=flip_direction) + + def set_pid_control_enable(self, axis, enable_flag): + self.pid_enable_flag[axis] = enable_flag; + if self.pid_enable_flag[axis] is True: + self.microcontroller.turn_on_stage_pid(axis) + else: + self.microcontroller.turn_off_stage_pid(axis) + + def turnoff_axis_pid_control(self): + for i in range(len(self.pid_enable_flag)): + if self.pid_enable_flag[i] is True: + self.microcontroller.turn_off_stage_pid(i) + + def get_pid_control_flag(self, axis): + return self.pid_enable_flag[axis] + + def keep_scan_begin_position(self, x, y): + self.scan_begin_position_x = x + self.scan_begin_position_y = y + + def set_axis_PID_arguments(self, axis, pid_p, pid_i, pid_d): + self.microcontroller.set_pid_arguments(axis, pid_p, pid_i, pid_d) + + def set_piezo_um(self, z_piezo_um): + dac = int(65535 * (z_piezo_um / OBJECTIVE_PIEZO_RANGE_UM)) + dac = 65535 - dac if OBJECTIVE_PIEZO_FLIP_DIR else dac + self.microcontroller.analog_write_onboard_DAC(7, dac) + class SlidePositionControlWorker(QObject): - + finished = Signal() signal_stop_live = Signal() signal_resume_live = Signal() @@ -832,7 +1052,7 @@ def wait_till_operation_is_completed(self,timestamp_start, SLIDE_POTISION_SWITCH print('Error - slide position switching timeout, the program will exit') self.navigationController.move_x(0) self.navigationController.move_y(0) - exit() + sys.exit(1) def move_to_slide_loading_position(self): was_live = self.liveController.is_live @@ -846,7 +1066,7 @@ def move_to_slide_loading_position(self): self.wait_till_operation_is_completed(timestamp_start, SLIDE_POTISION_SWITCHING_TIMEOUT_LIMIT_S) print('z retracted') self.slidePositionController.objective_retracted = True - + # move to position # for well plate if self.slidePositionController.is_for_wellplate: @@ -955,7 +1175,7 @@ def move_to_slide_scanning_position(self): self.navigationController.move_y_to(SLIDE_POSITION.SCANNING_Y_MM) self.wait_till_operation_is_completed(timestamp_start, SLIDE_POTISION_SWITCHING_TIMEOUT_LIMIT_S) - + else: timestamp_start = time.time() self.navigationController.move_x_to(SLIDE_POSITION.SCANNING_X_MM) @@ -996,20 +1216,25 @@ def move_to_slide_scanning_position(self): # restore z if self.slidePositionController.objective_retracted: - _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) - self.navigationController.microcontroller.move_z_to_usteps(self.slidePositionController.z_pos - STAGE_MOVEMENT_SIGN_Z*_usteps_to_clear_backlash) - self.wait_till_operation_is_completed(timestamp_start, SLIDE_POTISION_SWITCHING_TIMEOUT_LIMIT_S) - self.navigationController.move_z_usteps(_usteps_to_clear_backlash) - self.wait_till_operation_is_completed(timestamp_start, SLIDE_POTISION_SWITCHING_TIMEOUT_LIMIT_S) + if self.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) + self.navigationController.move_z_to_usteps(self.slidePositionController.z_pos - STAGE_MOVEMENT_SIGN_Z*_usteps_to_clear_backlash) + self.wait_till_operation_is_completed(timestamp_start, SLIDE_POTISION_SWITCHING_TIMEOUT_LIMIT_S) + self.navigationController.move_z_usteps(_usteps_to_clear_backlash) + self.wait_till_operation_is_completed(timestamp_start, SLIDE_POTISION_SWITCHING_TIMEOUT_LIMIT_S) + else: + self.navigationController.move_z_to_usteps(self.slidePositionController.z_pos) + self.wait_till_operation_is_completed(timestamp_start, SLIDE_POTISION_SWITCHING_TIMEOUT_LIMIT_S) self.slidePositionController.objective_retracted = False print('z position restored') - + if was_live: self.signal_resume_live.emit() self.slidePositionController.slide_scanning_position_reached = True self.finished.emit() + class SlidePositionController(QObject): signal_slide_loading_position_reached = Signal() @@ -1095,7 +1320,7 @@ def __init__(self,autofocusController): self.N = self.autofocusController.N self.deltaZ = self.autofocusController.deltaZ self.deltaZ_usteps = self.autofocusController.deltaZ_usteps - + self.crop_width = self.autofocusController.crop_width self.crop_height = self.autofocusController.crop_height @@ -1119,34 +1344,46 @@ def run_autofocus(self): # maneuver for achiving uniform step size and repeatability when using open-loop control # can be moved to the firmware - _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) - self.navigationController.move_z_usteps(-_usteps_to_clear_backlash-z_af_offset_usteps) - self.wait_till_operation_is_completed() - self.navigationController.move_z_usteps(_usteps_to_clear_backlash) - self.wait_till_operation_is_completed() + if self.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) + self.navigationController.move_z_usteps(-_usteps_to_clear_backlash-z_af_offset_usteps) + self.wait_till_operation_is_completed() + self.navigationController.move_z_usteps(_usteps_to_clear_backlash) + self.wait_till_operation_is_completed() + else: + self.navigationController.move_z_usteps(-z_af_offset_usteps) + self.wait_till_operation_is_completed() steps_moved = 0 for i in range(self.N): self.navigationController.move_z_usteps(self.deltaZ_usteps) self.wait_till_operation_is_completed() steps_moved = steps_moved + 1 - # trigger acquisition (including turning on the illumination) + # trigger acquisition (including turning on the illumination) and read frame if self.liveController.trigger_mode == TriggerMode.SOFTWARE: self.liveController.turn_on_illumination() self.wait_till_operation_is_completed() self.camera.send_trigger() + image = self.camera.read_frame() elif self.liveController.trigger_mode == TriggerMode.HARDWARE: - self.microcontroller.send_hardware_trigger(control_illumination=True,illumination_on_time_us=self.camera.exposure_time*1000) - # read camera frame - image = self.camera.read_frame() + if 'Fluorescence' in config.name and ENABLE_NL5 and NL5_USE_DOUT: + self.camera.image_is_ready = False # to remove + self.microscope.nl5.start_acquisition() + image = self.camera.read_frame(reset_image_ready_flag=False) + else: + self.microcontroller.send_hardware_trigger(control_illumination=True,illumination_on_time_us=self.camera.exposure_time*1000) + image = self.camera.read_frame() if image is None: continue # tunr of the illumination if using software trigger if self.liveController.trigger_mode == TriggerMode.SOFTWARE: self.liveController.turn_off_illumination() + image = utils.crop_image(image,self.crop_width,self.crop_height) image = utils.rotate_and_flip_image(image,rotate_image_angle=self.camera.rotate_image_angle,flip_image=self.camera.flip_image) self.image_to_display.emit(image) + #image_to_display = utils.crop_image(image,round(self.crop_width* self.liveController.display_resolution_scaling), round(self.crop_height* self.liveController.display_resolution_scaling)) + QApplication.processEvents() timestamp_0 = time.time() focus_measure = utils.calculate_focus_measure(image,FOCUS_MEASURE_OPERATOR) @@ -1158,17 +1395,28 @@ def run_autofocus(self): if focus_measure < focus_measure_max*AF.STOP_THRESHOLD: break + QApplication.processEvents() + # move to the starting location # self.navigationController.move_z_usteps(-steps_moved*self.deltaZ_usteps) # combine with the back and forth maneuver below # self.wait_till_operation_is_completed() # maneuver for achiving uniform step size and repeatability when using open-loop control - self.navigationController.move_z_usteps(-_usteps_to_clear_backlash-steps_moved*self.deltaZ_usteps) - # determine the in-focus position - idx_in_focus = focus_measure_vs_z.index(max(focus_measure_vs_z)) - self.wait_till_operation_is_completed() - self.navigationController.move_z_usteps(_usteps_to_clear_backlash+(idx_in_focus+1)*self.deltaZ_usteps) - self.wait_till_operation_is_completed() + if self.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) + self.navigationController.move_z_usteps(-_usteps_to_clear_backlash-steps_moved*self.deltaZ_usteps) + # determine the in-focus position + idx_in_focus = focus_measure_vs_z.index(max(focus_measure_vs_z)) + self.wait_till_operation_is_completed() + self.navigationController.move_z_usteps(_usteps_to_clear_backlash+(idx_in_focus+1)*self.deltaZ_usteps) + self.wait_till_operation_is_completed() + else: + # determine the in-focus position + idx_in_focus = focus_measure_vs_z.index(max(focus_measure_vs_z)) + self.navigationController.move_z_usteps((idx_in_focus+1)*self.deltaZ_usteps-steps_moved*self.deltaZ_usteps) + self.wait_till_operation_is_completed() + + QApplication.processEvents() # move to the calculated in-focus position # self.navigationController.move_z_usteps(idx_in_focus*self.deltaZ_usteps) @@ -1202,7 +1450,7 @@ def set_N(self,N): self.N = N def set_deltaZ(self,deltaZ_um): - mm_per_ustep_Z = SCREW_PITCH_Z_MM/(self.navigationController.z_microstepping*FULLSTEPS_PER_REV_Z) + mm_per_ustep_Z = self.navigationController.get_mm_per_ustep_Z() self.deltaZ = deltaZ_um/1000 self.deltaZ_usteps = round((deltaZ_um/1000)/mm_per_ustep_Z) @@ -1216,7 +1464,7 @@ def autofocus(self, focus_map_override=False): self.navigationController.microcontroller.wait_till_operation_is_completed() x = self.navigationController.x_pos_mm y = self.navigationController.y_pos_mm - + # z here is in mm because that's how the navigation controller stores it target_z = utils.interpolate_plane(*self.focus_map_coords[:3], (x,y)) print(f"Interpolated target z as {target_z} mm from focus map, moving there.") @@ -1265,12 +1513,12 @@ def autofocus(self, focus_map_override=False): self.thread.finished.connect(self.thread.quit) # start the thread self.thread.start() - + def _on_autofocus_completed(self): # re-enable callback if self.callback_was_enabled_before_autofocus: self.camera.enable_callback() - + # re-enable live if it's previously on if self.was_live_before_autofocus: self.liveController.start_live() @@ -1332,7 +1580,7 @@ def gen_focus_map(self, coord1,coord2,coord3): detT = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3) if detT == 0: raise ValueError("Your 3 x-y coordinates are linear") - + self.focus_map_coords = [] for coord in [coord1,coord2,coord3]: @@ -1383,11 +1631,18 @@ class MultiPointWorker(QObject): image_to_display = Signal(np.ndarray) spectrum_to_display = Signal(np.ndarray) image_to_display_multi = Signal(np.ndarray,int) + image_to_display_tiled_preview = Signal(np.ndarray) signal_current_configuration = Signal(Configuration) signal_register_current_fov = Signal(float,float) signal_detection_stats = Signal(object) - signal_update_stats = Signal(object) + signal_z_piezo_um = Signal(float) + napari_layers_init = Signal(int, int, object) + napari_layers_update = Signal(np.ndarray, int, int, int, str) # image, i, j, k, channel + napari_mosaic_update = Signal(np.ndarray, float, float, int, str) # image, x_mm, y_mm, k, channel + napari_rtp_layers_update = Signal(np.ndarray, str) + signal_acquisition_progress = Signal(int, int, int) + signal_region_progress = Signal(int, int) def __init__(self,multiPointController): QObject.__init__(self) @@ -1395,7 +1650,8 @@ def __init__(self,multiPointController): self.signal_update_stats.connect(self.update_stats) self.start_time = 0 - self.processingHandler = multiPointController.processingHandler + if DO_FLUORESCENCE_RTP: + self.processingHandler = multiPointController.processingHandler self.camera = self.multiPointController.camera self.microcontroller = self.multiPointController.microcontroller self.usb_spectrometer = self.multiPointController.usb_spectrometer @@ -1423,20 +1679,49 @@ def __init__(self,multiPointController): self.experiment_ID = self.multiPointController.experiment_ID self.base_path = self.multiPointController.base_path self.selected_configurations = self.multiPointController.selected_configurations + self.use_piezo = self.multiPointController.use_piezo self.detection_stats = {} self.async_detection_stats = {} - self.timestamp_acquisition_started = self.multiPointController.timestamp_acquisition_started self.time_point = 0 + self.af_fov_count = 0 + self.num_fovs = 0 + self.total_scans = 0 + if self.multiPointController.coordinate_dict is not None: + self.coordinate_dict = self.multiPointController.coordinate_dict.copy() + else: + self.coordinate_dict = None + self.use_scan_coordinates = self.multiPointController.use_scan_coordinates + self.scan_coordinates_mm = self.multiPointController.scan_coordinates_mm + self.scan_coordinates_name = self.multiPointController.scan_coordinates_name + self.z_stacking_config = self.multiPointController.z_stacking_config + self.z_range = self.multiPointController.z_range self.microscope = self.multiPointController.parent + self.performance_mode = self.microscope.performance_mode + + try: + self.model = self.microscope.segmentation_model + except: + pass + self.crop = SEGMENTATION_CROP self.t_dpc = [] self.t_inf = [] - self.t_over=[] - + self.t_over = [] + + if USE_NAPARI_FOR_MULTIPOINT or USE_NAPARI_FOR_TILED_DISPLAY: + self.init_napari_layers = False + + self.tiled_preview = None + self.count = 0 + + self.merged_image = None + self.image_count = 0 def update_stats(self, new_stats): + self.count += 1 + print("stats", self.count) for k in new_stats.keys(): try: self.detection_stats[k]+=new_stats[k] @@ -1447,59 +1732,53 @@ def update_stats(self, new_stats): self.detection_stats["Positives per 5M RBC"] = 5e6*(self.detection_stats["Total Positives"]/self.detection_stats["Total RBC"]) self.signal_detection_stats.emit(self.detection_stats) - def run(self): + def update_use_piezo(self, value): + self.use_piezo = value + print("MultiPointWorker: updated use_piezo to", value) + def run(self): self.start_time = time.perf_counter_ns() - if self.camera.is_streaming == False: - self.camera.start_streaming() - - if self.multiPointController.location_list is None: - # use scanCoordinates for well plates or regular multipoint scan - if self.multiPointController.scanCoordinates!=None: - # use scan coordinates for the scan - self.multiPointController.scanCoordinates.get_selected_wells() - self.scan_coordinates_mm = self.multiPointController.scanCoordinates.coordinates_mm - self.scan_coordinates_name = self.multiPointController.scanCoordinates.name - self.use_scan_coordinates = True - else: - # use the current position for the scan - self.scan_coordinates_mm = [(self.navigationController.x_pos_mm,self.navigationController.y_pos_mm)] - self.scan_coordinates_name = [''] - self.use_scan_coordinates = False - else: - # use location_list specified by the multipoint controlller - self.scan_coordinates_mm = self.multiPointController.location_list - self.scan_coordinates_name = None - self.use_scan_coordinates = True + if not self.camera.is_streaming: + self.camera.start_streaming() while self.time_point < self.Nt: # check if abort acquisition has been requested if self.multiPointController.abort_acqusition_requested: break - # run single time point + self.run_single_time_point() + self.time_point = self.time_point + 1 - # continous acquisition - if self.dt == 0: + if self.dt == 0: # continous acquisition pass - # timed acquisition - else: + else: # timed acquisition + # check if the aquisition has taken longer than dt or integer multiples of dt, if so skip the next time point(s) while time.time() > self.timestamp_acquisition_started + self.time_point*self.dt: print('skip time point ' + str(self.time_point+1)) self.time_point = self.time_point+1 + # check if it has reached Nt if self.time_point == self.Nt: break # no waiting after taking the last time point + # wait until it's time to do the next acquisition while time.time() < self.timestamp_acquisition_started + self.time_point*self.dt: if self.multiPointController.abort_acqusition_requested: break time.sleep(0.05) - self.processingHandler.processing_queue.join() - self.processingHandler.upload_queue.join() - elapsed_time = time.perf_counter_ns()-self.start_time - print("Time taken for acquisition/processing: "+str(elapsed_time/10**9)) + + elapsed_time = time.perf_counter_ns() - self.start_time + print("Time taken for acquisition: " + str(elapsed_time/10**9)) + + # End processing using the updated method + if DO_FLUORESCENCE_RTP: + self.processingHandler.processing_queue.join() + self.processingHandler.upload_queue.join() + self.processingHandler.end_processing() + # time.sleep(0.2) + # wait for signal_update_stats in process_fn_with_count_and_display + print("Time taken for acquisition/processing: ", (time.perf_counter_ns() - self.start_time) / 1e9) self.finished.emit() def wait_till_operation_is_completed(self): @@ -1513,442 +1792,803 @@ def run_single_time_point(self): self.navigationController.enable_joystick_button_action = False print('multipoint acquisition - time point ' + str(self.time_point+1)) - + # for each time point, create a new folder current_path = os.path.join(self.base_path,self.experiment_ID,str(self.time_point)) os.mkdir(current_path) slide_path = os.path.join(self.base_path, self.experiment_ID) - # create a dataframe to save coordinates - self.coordinates_pd = pd.DataFrame(columns = ['i', 'j', 'k', 'x (mm)', 'y (mm)', 'z (um)', 'time']) + self.initialize_coordinates_dataframe() - n_regions = len(self.scan_coordinates_mm) + # init z parameters, z range + self.initialize_z_stack() - for coordinate_id in range(n_regions): + self.run_coordinate_acquisition(current_path) - coordiante_mm = self.scan_coordinates_mm[coordinate_id] - print(coordiante_mm) + # finished region scan + self.coordinates_pd.to_csv(os.path.join(current_path,'coordinates.csv'),index=False,header=True) + utils.create_done_file(current_path) + self.navigationController.enable_joystick_button_action = True + print(time.time()) + print(time.time()-start) - if self.scan_coordinates_name is None: - # flexible scan, use a sequencial ID - coordiante_name = str(coordinate_id) - else: - coordiante_name = self.scan_coordinates_name[coordinate_id] - - if self.use_scan_coordinates: - # move to the specified coordinate - self.navigationController.move_x_to(coordiante_mm[0]-self.deltaX*(self.NX-1)/2) - self.navigationController.move_y_to(coordiante_mm[1]-self.deltaY*(self.NY-1)/2) - # check if z is included in the coordinate - if len(coordiante_mm) == 3: - if coordiante_mm[2] >= self.navigationController.z_pos_mm: - self.navigationController.move_z_to(coordiante_mm[2]) - self.wait_till_operation_is_completed() - else: - self.navigationController.move_z_to(coordiante_mm[2]) - self.wait_till_operation_is_completed() - # remove backlash - _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) - self.navigationController.move_z_usteps(-_usteps_to_clear_backlash) # to-do: combine this with the above - self.wait_till_operation_is_completed() - self.navigationController.move_z_usteps(_usteps_to_clear_backlash) - self.wait_till_operation_is_completed() - else: - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_Y/1000) - if len(coordiante_mm) == 3: - time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) - # add '_' to the coordinate name - coordiante_name = coordiante_name + '_' + def initialize_z_stack(self): + self.count_rtp = 0 + self.dz_usteps = 0 # accumulated z displacement + # z stacking config + if self.coordinate_dict is not None: + if self.z_stacking_config == 'FROM TOP': + self.deltaZ_usteps = -abs(self.deltaZ_usteps) + self.move_to_z_level(self.z_range[1]) + else: + self.move_to_z_level(self.z_range[0]) - self.x_scan_direction = 1 - self.dx_usteps = 0 # accumulated x displacement - self.dy_usteps = 0 # accumulated y displacement - self.dz_usteps = 0 # accumulated z displacement - z_pos = self.navigationController.z_pos # zpos at the beginning of the scan + elif self.z_stacking_config == 'FROM TOP': + self.deltaZ_usteps = -abs(self.deltaZ_usteps) - # z stacking config - if Z_STACKING_CONFIG == 'FROM TOP': - self.deltaZ_usteps = -abs(self.deltaZ_usteps) + self.z_pos = self.navigationController.z_pos # zpos at the beginning of the scan - # along y - for i in range(self.NY): + # reset piezo to home position + if self.use_piezo: + self.z_piezo_um = OBJECTIVE_PIEZO_HOME_UM + self.navigationController.set_piezo_um(self.z_piezo_um) + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: # for hardware trigger, delay is in waiting for the last row to start exposure + time.sleep(MULTIPOINT_PIEZO_DELAY_MS/1000) + if MULTIPOINT_PIEZO_UPDATE_DISPLAY: + self.signal_z_piezo_um.emit(self.z_piezo_um) - self.FOV_counter = 0 # for AF, so that AF at the beginning of each new row + def initialize_coordinates_dataframe(self): + base_columns = ['z_level', 'x (mm)', 'y (mm)', 'z (um)', 'time'] + piezo_column = ['z_piezo (um)'] if self.use_piezo else [] - # along x - for j in range(self.NX): + if self.coordinate_dict is not None: + self.coordinates_pd = pd.DataFrame(columns=['region', 'fov'] + base_columns + piezo_column) + else: + self.coordinates_pd = pd.DataFrame(columns=['region', 'i', 'j'] + base_columns + piezo_column) + + def update_coordinates_dataframe(self, region_id, z_level, fov=None, i=None, j=None): + base_data = { + 'z_level': [z_level], + 'x (mm)': [self.navigationController.x_pos_mm], + 'y (mm)': [self.navigationController.y_pos_mm], + 'z (um)': [self.navigationController.z_pos_mm * 1000], + 'time': [datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f')] + } + piezo_data = {'z_piezo (um)': [self.z_piezo_um - OBJECTIVE_PIEZO_HOME_UM]} if self.use_piezo else {} + + if IS_HCS: + if self.coordinate_dict is not None: + new_row = pd.DataFrame({ + 'region': [region_id], + 'fov': [fov], + **base_data, + **piezo_data + }) + else: + new_row = pd.DataFrame({ + 'region': [self.scan_coordinates_name[region_id]], + 'i': [i], 'j': [j], + **base_data, + **piezo_data + }) + else: + new_row = pd.DataFrame({ + 'i': [i], 'j': [j], + **base_data, + **piezo_data + }) - if RUN_CUSTOM_MULTIPOINT and "multipoint_custom_script_entry" in globals(): + self.coordinates_pd = pd.concat([self.coordinates_pd, new_row], ignore_index=True) - print('run custom multipoint') - multipoint_custom_script_entry(self,self.time_point,current_path,coordinate_id,coordiante_name,i,j) - else: + def move_to_coordinate(self, coordinate_mm): + print("moving to coordinate", coordinate_mm) + x_mm = coordinate_mm[0] + self.navigationController.move_x_to(x_mm) + self.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_X/1000) - # autofocus - if self.do_reflection_af == False: - # contrast-based AF; perform AF only if when not taking z stack or doing z stack from center - if ( (self.NZ == 1) or Z_STACKING_CONFIG == 'FROM CENTER' ) and (self.do_autofocus) and (self.FOV_counter%Acquisition.NUMBER_OF_FOVS_PER_AF==0): - # temporary: replace the above line with the line below to AF every FOV - # if (self.NZ == 1) and (self.do_autofocus): - configuration_name_AF = MULTIPOINT_AUTOFOCUS_CHANNEL - config_AF = next((config for config in self.configurationManager.configurations if config.name == configuration_name_AF)) - self.signal_current_configuration.emit(config_AF) - if (self.FOV_counter%Acquisition.NUMBER_OF_FOVS_PER_AF==0) or self.autofocusController.use_focus_map: - self.autofocusController.autofocus() - self.autofocusController.wait_till_autofocus_has_completed() - # upate z location of scan_coordinates_mm after AF - if len(coordiante_mm) == 3: - self.scan_coordinates_mm[coordinate_id,2] = self.navigationController.z_pos_mm - # update the coordinate in the widget - try: - self.microscope.multiPointWidget2._update_z(coordinate_id,self.navigationController.z_pos_mm) - except: - pass - else: - # initialize laser autofocus if it has not been done - if self.microscope.laserAutofocusController.is_initialized==False: - # initialize the reflection AF - self.microscope.laserAutofocusController.initialize_auto() - # do contrast AF for the first FOV (if contrast AF box is checked) - if self.do_autofocus and ( (self.NZ == 1) or Z_STACKING_CONFIG == 'FROM CENTER' ) : - configuration_name_AF = MULTIPOINT_AUTOFOCUS_CHANNEL - config_AF = next((config for config in self.configurationManager.configurations if config.name == configuration_name_AF)) - self.signal_current_configuration.emit(config_AF) - self.autofocusController.autofocus() - self.autofocusController.wait_till_autofocus_has_completed() - # set the current plane as reference - self.microscope.laserAutofocusController.set_reference() - else: - try: - self.microscope.laserAutofocusController.move_to_target(0) - self.microscope.laserAutofocusController.move_to_target(0) # for stepper in open loop mode, repeat the operation to counter backlash - except: - file_ID = coordiante_name + str(i) + '_' + str(j if self.x_scan_direction==1 else self.NX-1-j) - saving_path = os.path.join(current_path, file_ID + '_focus_camera.bmp') - iio.imwrite(saving_path,self.microscope.laserAutofocusController.image) - print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! laser AF failed !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') - - if (self.NZ > 1): - # move to bottom of the z stack - if Z_STACKING_CONFIG == 'FROM CENTER': - self.navigationController.move_z_usteps(-self.deltaZ_usteps*round((self.NZ-1)/2)) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) - # maneuver for achiving uniform step size and repeatability when using open-loop control - self.navigationController.move_z_usteps(-160) - self.wait_till_operation_is_completed() - self.navigationController.move_z_usteps(160) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) - - # z-stack - for k in range(self.NZ): - - # Ensure that i/y-indexing is always top to bottom - sgn_i = -1 if self.deltaY >= 0 else 1 - if INVERTED_OBJECTIVE: - sgn_i = -sgn_i - sgn_j = self.x_scan_direction if self.deltaX >= 0 else -self.x_scan_direction - file_ID = coordiante_name + str(self.NY-1-i if sgn_i == -1 else i) + '_' + str(j if sgn_j == 1 else self.NX-1-j) + '_' + str(k) - # metadata = dict(x = self.navigationController.x_pos_mm, y = self.navigationController.y_pos_mm, z = self.navigationController.z_pos_mm) - # metadata = json.dumps(metadata) - - - current_round_images = {} - # iterate through selected modes - for config in self.selected_configurations: - if config.z_offset is not None: # perform z offset for config, assume - # z_offset is in um - if config.z_offset != 0.0: - print("Moving to Z offset "+str(config.z_offset)) - self.navigationController.move_z(config.z_offset/1000) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) - - if 'USB Spectrometer' not in config.name: - # update the current configuration - self.signal_current_configuration.emit(config) - self.wait_till_operation_is_completed() - # trigger acquisition (including turning on the illumination) - if self.liveController.trigger_mode == TriggerMode.SOFTWARE: - self.liveController.turn_on_illumination() - self.wait_till_operation_is_completed() - self.camera.send_trigger() - elif self.liveController.trigger_mode == TriggerMode.HARDWARE: - self.microcontroller.send_hardware_trigger(control_illumination=True,illumination_on_time_us=self.camera.exposure_time*1000) - # read camera frame - old_pixel_format = self.camera.pixel_format - if config.pixel_format is not None: - if config.pixel_format != "" and config.pixel_format.lower() != "default": - self.camera.set_pixel_format(config.pixel_format) - - image = self.camera.read_frame() - - if config.pixel_format is not None: - if config.pixel_format != "" and config.pixel_format.lower() != "default": - self.camera.set_pixel_format(old_pixel_format) - if image is None: - print('self.camera.read_frame() returned None') - continue - # tunr of the illumination if using software trigger - if self.liveController.trigger_mode == TriggerMode.SOFTWARE: - self.liveController.turn_off_illumination() - # process the image - @@@ to move to camera - image = utils.crop_image(image,self.crop_width,self.crop_height) - image = utils.rotate_and_flip_image(image,rotate_image_angle=self.camera.rotate_image_angle,flip_image=self.camera.flip_image) - # self.image_to_display.emit(cv2.resize(image,(round(self.crop_width*self.display_resolution_scaling), round(self.crop_height*self.display_resolution_scaling)),cv2.INTER_LINEAR)) - image_to_display = utils.crop_image(image,round(self.crop_width*self.display_resolution_scaling), round(self.crop_height*self.display_resolution_scaling)) - self.image_to_display.emit(image_to_display) - self.image_to_display_multi.emit(image_to_display,config.illumination_source) - if image.dtype == np.uint16: - saving_path = os.path.join(current_path, file_ID + '_' + str(config.name).replace(' ','_') + '.tiff') - if self.camera.is_color: - if 'BF LED matrix' in config.name: - if MULTIPOINT_BF_SAVING_OPTION == 'RGB2GRAY': - image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY) - elif MULTIPOINT_BF_SAVING_OPTION == 'Green Channel Only': - image = image[:,:,1] - iio.imwrite(saving_path,image) - else: - saving_path = os.path.join(current_path, file_ID + '_' + str(config.name).replace(' ','_') + '.' + Acquisition.IMAGE_FORMAT) - if self.camera.is_color: - if 'BF LED matrix' in config.name: - if MULTIPOINT_BF_SAVING_OPTION == 'Raw': - image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR) - elif MULTIPOINT_BF_SAVING_OPTION == 'RGB2GRAY': - image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY) - elif MULTIPOINT_BF_SAVING_OPTION == 'Green Channel Only': - image = image[:,:,1] - else: - image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR) - cv2.imwrite(saving_path,image) - - current_round_images[config.name] = np.copy(image) - - QApplication.processEvents() - else: - if self.usb_spectrometer != None: - for l in range(N_SPECTRUM_PER_POINT): - data = self.usb_spectrometer.read_spectrum() - self.spectrum_to_display.emit(data) - saving_path = os.path.join(current_path, file_ID + '_' + str(config.name).replace(' ','_') + '_' + str(l) + '.csv') - np.savetxt(saving_path,data,delimiter=',') - - - if config.z_offset is not None: # undo Z offset - # assume z_offset is in um - if config.z_offset != 0.0: - print("Moving back from Z offset "+str(config.z_offset)) - self.navigationController.move_z(-config.z_offset/1000) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) - - - # add the coordinate of the current location - new_row = pd.DataFrame({'i':[self.NY-1-i if sgn_i == -1 else i],'j':[j if sgn_j == 1 else self.NX-1-j],'k':[k], - 'x (mm)':[self.navigationController.x_pos_mm], - 'y (mm)':[self.navigationController.y_pos_mm], - 'z (um)':[self.navigationController.z_pos_mm*1000], - 'time':datetime.now().strftime('%Y-%m-%d_%H-%M-%-S.%f')}, - ) - self.coordinates_pd = pd.concat([self.coordinates_pd, new_row], ignore_index=True) - - # register the current fov in the navigationViewer - self.signal_register_current_fov.emit(self.navigationController.x_pos_mm,self.navigationController.y_pos_mm) - - # check if the acquisition should be aborted - if self.multiPointController.abort_acqusition_requested: - self.liveController.turn_off_illumination() - self.navigationController.move_x_usteps(-self.dx_usteps) - self.wait_till_operation_is_completed() - self.navigationController.move_y_usteps(-self.dy_usteps) - self.wait_till_operation_is_completed() - _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) - self.navigationController.move_z_usteps(-self.dz_usteps-_usteps_to_clear_backlash) - self.wait_till_operation_is_completed() - self.navigationController.move_z_usteps(_usteps_to_clear_backlash) - self.wait_till_operation_is_completed() - self.coordinates_pd.to_csv(os.path.join(current_path,'coordinates.csv'),index=False,header=True) - self.navigationController.enable_joystick_button_action = True - return - - if self.NZ > 1: - # move z - if k < self.NZ - 1: - self.navigationController.move_z_usteps(self.deltaZ_usteps) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) - self.dz_usteps = self.dz_usteps + self.deltaZ_usteps - - if self.NZ > 1: - # move z back - if Z_STACKING_CONFIG == 'FROM CENTER': - _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) - self.navigationController.move_z_usteps( -self.deltaZ_usteps*(self.NZ-1) + self.deltaZ_usteps*round((self.NZ-1)/2) - _usteps_to_clear_backlash) - self.wait_till_operation_is_completed() - self.navigationController.move_z_usteps(_usteps_to_clear_backlash) - self.wait_till_operation_is_completed() - self.dz_usteps = self.dz_usteps - self.deltaZ_usteps*(self.NZ-1) + self.deltaZ_usteps*round((self.NZ-1)/2) - else: - self.navigationController.move_z_usteps(-self.deltaZ_usteps*(self.NZ-1) - _usteps_to_clear_backlash) - self.wait_till_operation_is_completed() - self.navigationController.move_z_usteps(_usteps_to_clear_backlash) - self.wait_till_operation_is_completed() - self.dz_usteps = self.dz_usteps - self.deltaZ_usteps*(self.NZ-1) - - # update FOV counter - self.FOV_counter = self.FOV_counter + 1 - - if self.NX > 1: - # move x - if j < self.NX - 1: - self.navigationController.move_x_usteps(self.x_scan_direction*self.deltaX_usteps) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_X/1000) - self.dx_usteps = self.dx_usteps + self.x_scan_direction*self.deltaX_usteps - - # finished X scan - ''' - # instead of move back, reverse scan direction (12/29/2021) - if self.NX > 1: - # move x back - self.navigationController.move_x_usteps(-self.deltaX_usteps*(self.NX-1)) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_X/1000) - ''' - self.x_scan_direction = -self.x_scan_direction - - if self.NY > 1: - # move y - if i < self.NY - 1: - self.navigationController.move_y_usteps(self.deltaY_usteps) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_Y/1000) - self.dy_usteps = self.dy_usteps + self.deltaY_usteps - - # finished XY scan - if n_regions == 1: - # only move to the start position if there's only one region in the scan - if self.NY > 1: - # move y back - self.navigationController.move_y_usteps(-self.deltaY_usteps*(self.NY-1)) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_Y/1000) - self.dy_usteps = self.dy_usteps - self.deltaY_usteps*(self.NY-1) + y_mm = coordinate_mm[1] + self.navigationController.move_y_to(y_mm) + self.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_Y/1000) - # move x back at the end of the scan - if self.x_scan_direction == -1: - self.navigationController.move_x_usteps(-self.deltaX_usteps*(self.NX-1)) - self.wait_till_operation_is_completed() - time.sleep(SCAN_STABILIZATION_TIME_MS_X/1000) + # check if z is included in the coordinate + if len(coordinate_mm) == 3: + z_mm = coordinate_mm[2] + self.move_to_z_level(z_mm) - # move z back + def move_to_z_level(self, z_mm): + print("moving z") + if z_mm >= self.navigationController.z_pos_mm: + self.navigationController.move_z_to(z_mm) + self.wait_till_operation_is_completed() + else: + self.navigationController.move_z_to(z_mm) + self.wait_till_operation_is_completed() + # remove backlash + if self.navigationController.get_pid_control_flag(2) is False: _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) - self.navigationController.microcontroller.move_z_to_usteps(z_pos - STAGE_MOVEMENT_SIGN_Z*_usteps_to_clear_backlash) + self.navigationController.move_z_usteps(-_usteps_to_clear_backlash) # to-do: combine this with the above self.wait_till_operation_is_completed() self.navigationController.move_z_usteps(_usteps_to_clear_backlash) self.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) - # finished region scan - self.coordinates_pd.to_csv(os.path.join(current_path,'coordinates.csv'),index=False,header=True) - self.navigationController.enable_joystick_button_action = True - print(time.time()) - print(time.time()-start) + def run_coordinate_acquisition(self, current_path): + n_regions = len(self.scan_coordinates_mm) -class MultiPointController(QObject): + for region_index, (region_id, coordinates) in enumerate(self.coordinate_dict.items()): - acquisitionFinished = Signal() - image_to_display = Signal(np.ndarray) - image_to_display_multi = Signal(np.ndarray,int) - spectrum_to_display = Signal(np.ndarray) - signal_current_configuration = Signal(Configuration) - signal_register_current_fov = Signal(float,float) - detection_stats = Signal(object) + self.signal_acquisition_progress.emit(region_index + 1, n_regions, self.time_point) - def __init__(self,camera,navigationController,liveController,autofocusController,configurationManager,usb_spectrometer=None,scanCoordinates=None,parent=None): - QObject.__init__(self) + self.num_fovs = len(coordinates) + self.total_scans = self.num_fovs * self.NZ * len(self.selected_configurations) - self.camera = camera - self.processingHandler = ProcessingHandler() - self.microcontroller = navigationController.microcontroller # to move to gui for transparency - self.navigationController = navigationController - self.liveController = liveController - self.autofocusController = autofocusController - self.configurationManager = configurationManager - self.NX = 1 - self.NY = 1 - self.NZ = 1 - self.Nt = 1 - mm_per_ustep_X = SCREW_PITCH_X_MM/(self.navigationController.x_microstepping*FULLSTEPS_PER_REV_X) - mm_per_ustep_Y = SCREW_PITCH_Y_MM/(self.navigationController.y_microstepping*FULLSTEPS_PER_REV_Y) - mm_per_ustep_Z = SCREW_PITCH_Z_MM/(self.navigationController.z_microstepping*FULLSTEPS_PER_REV_Z) - self.deltaX = Acquisition.DX - self.deltaX_usteps = round(self.deltaX/mm_per_ustep_X) - self.deltaY = Acquisition.DY - self.deltaY_usteps = round(self.deltaY/mm_per_ustep_Y) - self.deltaZ = Acquisition.DZ/1000 - self.deltaZ_usteps = round(self.deltaZ/mm_per_ustep_Z) - self.deltat = 0 - self.do_autofocus = False - self.do_reflection_af = False - self.gen_focus_map = False - self.focus_map_storage = [] - self.already_using_fmap = False - self.crop_width = Acquisition.CROP_WIDTH - self.crop_height = Acquisition.CROP_HEIGHT - self.display_resolution_scaling = Acquisition.IMAGE_DISPLAY_SCALING_FACTOR - self.counter = 0 - self.experiment_ID = None - self.base_path = None - self.selected_configurations = [] - self.usb_spectrometer = usb_spectrometer - self.scanCoordinates = scanCoordinates - self.parent = parent + for fov_count, coordinate_mm in enumerate(coordinates): + + self.move_to_coordinate(coordinate_mm) + self.acquire_at_position(region_id, current_path, fov_count) + + if self.multiPointController.abort_acqusition_requested: + self.handle_acquisition_abort(current_path, region_id) + return + + def acquire_at_position(self, region_id, current_path, fov, i=None, j=None): + + if RUN_CUSTOM_MULTIPOINT and "multipoint_custom_script_entry" in globals(): + print('run custom multipoint') + multipoint_custom_script_entry(self, current_path, region_id, fov, i, j) + return + + self.perform_autofocus(region_id) + + if self.NZ > 1: + self.prepare_z_stack() + + if self.coordinate_dict is not None: + coordinate_name = region_id + else: + coordinate_name = self.scan_coordinates_name[region_id] + + x_mm = self.navigationController.x_pos_mm + y_mm = self.navigationController.y_pos_mm + + for z_level in range(self.NZ): + if i is not None and j is not None: + file_ID = f"{coordinate_name}_{i}_{j}_{z_level}" + else: + file_ID = f"{coordinate_name}_{fov}_{z_level}" + + metadata = dict(x = self.navigationController.x_pos_mm, y = self.navigationController.y_pos_mm, z = self.navigationController.z_pos_mm) + print(f"Acquiring image: ID={file_ID}, Metadata={metadata}") + + # laser af characterization mode + if LASER_AF_CHARACTERIZATION_MODE: + image = self.microscope.laserAutofocusController.get_image() + saving_path = os.path.join(current_path, file_ID + '_laser af camera' + '.bmp') + iio.imwrite(saving_path,image) + + current_round_images = {} + # iterate through selected modes + for config_idx, config in enumerate(self.selected_configurations): + + self.handle_z_offset(config, True) + + # acquire image + if 'USB Spectrometer' not in config.name and 'RGB' not in config.name: + self.acquire_camera_image(config, file_ID, current_path, current_round_images, i, j, z_level) + elif 'RGB' in config.name: + self.acquire_rgb_image(config, file_ID, current_path, current_round_images, i, j, z_level) + else: + self.acquire_spectrometer_data(config, file_ID, current_path, i, j, z_level) + + self.handle_z_offset(config, False) + + current_image = (fov * self.NZ * len(self.selected_configurations) + z_level * len(self.selected_configurations) + config_idx + 1) + self.signal_region_progress.emit(current_image, self.total_scans) + + ''' + # tiled preview + if not USE_NAPARI_FOR_TILED_DISPLAY and SHOW_TILED_PREVIEW and 'BF LED matrix left half' in current_round_images: + # initialize the variable + if self.tiled_preview is None: + size = current_round_images['BF LED matrix left half'].shape + if len(size) == 2: + self.tiled_preview = np.zeros((int(self.NY*size[0]/PRVIEW_DOWNSAMPLE_FACTOR),self.NX*int(size[1]/PRVIEW_DOWNSAMPLE_FACTOR)),dtype=current_round_images['BF LED matrix full'].dtype) + else: + self.tiled_preview = np.zeros((int(self.NY*size[0]/PRVIEW_DOWNSAMPLE_FACTOR),self.NX*int(size[1]/PRVIEW_DOWNSAMPLE_FACTOR),size[2]),dtype=current_round_images['BF LED matrix full'].dtype) + # downsample the image + I = current_round_images['BF LED matrix left half'] + width = int(I.shape[1]/PRVIEW_DOWNSAMPLE_FACTOR) + height = int(I.shape[0]/PRVIEW_DOWNSAMPLE_FACTOR) + I = cv2.resize(I, (width,height), interpolation=cv2.INTER_AREA) + # populate the tiled_preview + self.tiled_preview[i*height:(i+1)*height, j*width:(j+1)*width, ] = I + # emit the result + self.image_to_display_tiled_preview.emit(self.tiled_preview) + ''' + + # real time processing + if self.multiPointController.do_fluorescence_rtp: + self.run_real_time_processing(current_round_images, i, j, z_level) + + # updates coordinates df + if i is None or j is None: + self.update_coordinates_dataframe(region_id, z_level, fov) + else: + self.update_coordinates_dataframe(region_id, z_level, i, j) + self.signal_register_current_fov.emit(self.navigationController.x_pos_mm, self.navigationController.y_pos_mm) + + # check if the acquisition should be aborted + if self.multiPointController.abort_acqusition_requested: + self.handle_acquisition_abort(current_path, region_id) + return + + # update FOV counter + self.af_fov_count = self.af_fov_count + 1 + + if z_level < self.NZ - 1: + self.move_z_for_stack() + + if self.NZ > 1: + self.move_z_back_after_stack() + + def run_real_time_processing(self, current_round_images, i, j, z_level): + acquired_image_configs = list(current_round_images.keys()) + if 'BF LED matrix left half' in current_round_images and 'BF LED matrix right half' in current_round_images and 'Fluorescence 405 nm Ex' in current_round_images: + try: + print("real time processing", self.count_rtp) + if (self.microscope.model is None) or (self.microscope.device is None) or (self.microscope.classification_th is None) or (self.microscope.dataHandler is None): + raise AttributeError('microscope missing model, device, classification_th, and/or dataHandler') + I_fluorescence = current_round_images['Fluorescence 405 nm Ex'] + I_left = current_round_images['BF LED matrix left half'] + I_right = current_round_images['BF LED matrix right half'] + if len(I_left.shape) == 3: + I_left = cv2.cvtColor(I_left,cv2.COLOR_RGB2GRAY) + if len(I_right.shape) == 3: + I_right = cv2.cvtColor(I_right,cv2.COLOR_RGB2GRAY) + malaria_rtp(I_fluorescence, I_left, I_right, i, j, z_level, self, + classification_test_mode=self.microscope.classification_test_mode, + sort_during_multipoint=SORT_DURING_MULTIPOINT, + disp_th_during_multipoint=DISP_TH_DURING_MULTIPOINT) + self.count_rtp += 1 + except AttributeError as e: + print(repr(e)) + + def perform_autofocus(self, region_id): + if self.do_reflection_af == False: + # contrast-based AF; perform AF only if when not taking z stack or doing z stack from center + if ( (self.NZ == 1) or self.z_stacking_config == 'FROM CENTER' ) and (self.do_autofocus) and (self.af_fov_count%Acquisition.NUMBER_OF_FOVS_PER_AF==0): + configuration_name_AF = MULTIPOINT_AUTOFOCUS_CHANNEL + config_AF = next((config for config in self.configurationManager.configurations if config.name == configuration_name_AF)) + self.signal_current_configuration.emit(config_AF) + if (self.af_fov_count%Acquisition.NUMBER_OF_FOVS_PER_AF==0) or self.autofocusController.use_focus_map: + self.autofocusController.autofocus() + self.autofocusController.wait_till_autofocus_has_completed() + # update z location of scan_coordinates_mm after AF + if len(self.scan_coordinates_mm[region_id]) == 3: + self.scan_coordinates_mm[region_id][2] = self.navigationController.z_pos_mm + # update the coordinate in the widget + if self.coordinate_dict is not None: + self.microscope.wellplateMultiPointWidget.update_region_z_level(region_id, self.navigationController.z_pos_mm) + elif self.multiPointController.location_list is not None: + try: + self.microscope.flexibleMultiPointWidget._update_z(region_id, self.navigationController.z_pos_mm) + except: + print("failed update flexible widget z") + pass + try: + self.microscope.wellplateMultiPointWidget.update_region_z_level(region_id, self.navigationController.z_pos_mm) + except: + print("failed update grid widget z") + pass + else: + # initialize laser autofocus if it has not been done + if self.microscope.laserAutofocusController.is_initialized==False: + print("init reflection af") + # initialize the reflection AF + self.microscope.laserAutofocusController.initialize_auto() + # do contrast AF for the first FOV (if contrast AF box is checked) + if self.do_autofocus and ( (self.NZ == 1) or self.z_stacking_config == 'FROM CENTER' ) : + configuration_name_AF = MULTIPOINT_AUTOFOCUS_CHANNEL + config_AF = next((config for config in self.configurationManager.configurations if config.name == configuration_name_AF)) + self.signal_current_configuration.emit(config_AF) + self.autofocusController.autofocus() + self.autofocusController.wait_till_autofocus_has_completed() + # set the current plane as reference + self.microscope.laserAutofocusController.set_reference() + else: + print("laser reflection af") + try: + if self.navigationController.get_pid_control_flag(2) is False: + self.microscope.laserAutofocusController.move_to_target(0) + self.microscope.laserAutofocusController.move_to_target(0) # for stepper in open loop mode, repeat the operation to counter backlash + else: + self.microscope.laserAutofocusController.move_to_target(0) + except: + file_ID = f"{region_id}_focus_camera.bmp" + saving_path = os.path.join(self.base_path, self.experiment_ID, str(self.time_point), file_ID) + iio.imwrite(saving_path, self.microscope.laserAutofocusController.image) + print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! laser AF failed !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') + + def prepare_z_stack(self): + # move to bottom of the z stack + if self.z_stacking_config == 'FROM CENTER': + self.navigationController.move_z_usteps(-self.deltaZ_usteps*round((self.NZ-1)/2)) + self.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) + # maneuver for achiving uniform step size and repeatability when using open-loop control + self.navigationController.move_z_usteps(-160) + self.wait_till_operation_is_completed() + self.navigationController.move_z_usteps(160) + self.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) + + def handle_z_offset(self, config, not_offset): + if config.z_offset is not None: # perform z offset for config, assume z_offset is in um + if config.z_offset != 0.0: + direction = 1 if not_offset else -1 + print("Moving Z offset" + str(config.z_offset * direction)) + self.navigationController.move_z(config.z_offset/1000*direction) + self.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) + + def acquire_camera_image(self, config, file_ID, current_path, current_round_images, i, j, k): + # update the current configuration + self.signal_current_configuration.emit(config) + self.wait_till_operation_is_completed() + + # trigger acquisition (including turning on the illumination) and read frame + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: + self.liveController.turn_on_illumination() + self.wait_till_operation_is_completed() + self.camera.send_trigger() + image = self.camera.read_frame() + elif self.liveController.trigger_mode == TriggerMode.HARDWARE: + if 'Fluorescence' in config.name and ENABLE_NL5 and NL5_USE_DOUT: + self.camera.image_is_ready = False # to remove + self.microscope.nl5.start_acquisition() + image = self.camera.read_frame(reset_image_ready_flag=False) + else: + self.microcontroller.send_hardware_trigger(control_illumination=True,illumination_on_time_us=self.camera.exposure_time*1000) + image = self.camera.read_frame() + else: # continuous acquisition + image = self.camera.read_frame() + + if image is None: + print('self.camera.read_frame() returned None') + return + + # turn off the illumination if using software trigger + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: + self.liveController.turn_off_illumination() + + # process the image - @@@ to move to camera + image = utils.crop_image(image,self.crop_width,self.crop_height) + image = utils.rotate_and_flip_image(image,rotate_image_angle=self.camera.rotate_image_angle,flip_image=self.camera.flip_image) + image_to_display = utils.crop_image(image,round(self.crop_width*self.display_resolution_scaling), round(self.crop_height*self.display_resolution_scaling)) + self.image_to_display.emit(image_to_display) + self.image_to_display_multi.emit(image_to_display,config.illumination_source) + + self.save_image(image, file_ID, config, current_path) + self.update_napari(image, config.name, i, j, k) + + current_round_images[config.name] = np.copy(image) + + self.handle_dpc_generation(current_round_images) + self.handle_rgb_generation(current_round_images, file_ID, current_path, i, j, k) + + QApplication.processEvents() + + def acquire_rgb_image(self, config, file_ID, current_path, current_round_images, i, j, k): + # go through the channels + rgb_channels = ['BF LED matrix full_R', 'BF LED matrix full_G', 'BF LED matrix full_B'] + images = {} + + for config_ in self.configurationManager.configurations: + if config_.name in rgb_channels: + # update the current configuration + self.signal_current_configuration.emit(config_) + self.wait_till_operation_is_completed() + + # trigger acquisition (including turning on the illumination) + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: + self.liveController.turn_on_illumination() + self.wait_till_operation_is_completed() + self.camera.send_trigger() + + elif self.liveController.trigger_mode == TriggerMode.HARDWARE: + self.microcontroller.send_hardware_trigger(control_illumination=True, illumination_on_time_us=self.camera.exposure_time * 1000) + + # read camera frame + image = self.camera.read_frame() + if image is None: + print('self.camera.read_frame() returned None') + continue + + # turn off the illumination if using software trigger + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: + self.liveController.turn_off_illumination() + + # process the image - @@@ to move to camera + image = utils.crop_image(image, self.crop_width, self.crop_height) + image = utils.rotate_and_flip_image(image, rotate_image_angle=self.camera.rotate_image_angle, flip_image=self.camera.flip_image) + + # add the image to dictionary + images[config_.name] = np.copy(image) + + # Check if the image is RGB or monochrome + i_size = images['BF LED matrix full_R'].shape + i_dtype = images['BF LED matrix full_R'].dtype + + if len(i_size) == 3: + # If already RGB, write and emit individual channels + print('writing R, G, B channels') + self.handle_rgb_channels(images, file_ID, current_path, config, i, j, k) + else: + # If monochrome, reconstruct RGB image + print('constructing RGB image') + self.construct_rgb_image(images, file_ID, current_path, config, i, j, k) + + def acquire_spectrometer_data(self, config, file_ID, current_path): + if self.usb_spectrometer != None: + for l in range(N_SPECTRUM_PER_POINT): + data = self.usb_spectrometer.read_spectrum() + self.spectrum_to_display.emit(data) + saving_path = os.path.join(current_path, file_ID + '_' + str(config.name).replace(' ','_') + '_' + str(l) + '.csv') + np.savetxt(saving_path,data,delimiter=',') + + def save_image(self, image, file_ID, config, current_path): + if image.dtype == np.uint16: + saving_path = os.path.join(current_path, file_ID + '_' + str(config.name).replace(' ','_') + '.tiff') + else: + saving_path = os.path.join(current_path, file_ID + '_' + str(config.name).replace(' ','_') + '.' + Acquisition.IMAGE_FORMAT) + + if self.camera.is_color: + if 'BF LED matrix' in config.name: + if MULTIPOINT_BF_SAVING_OPTION == 'RGB2GRAY': + image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY) + elif MULTIPOINT_BF_SAVING_OPTION == 'Green Channel Only': + image = image[:,:,1] + + if Acquisition.PSEUDO_COLOR: + image = self.return_pseudo_colored_image(image, config) + + if Acquisition.MERGE_CHANNELS: + self._save_merged_image(image, file_ID, current_path) + + iio.imwrite(saving_path,image) + + def _save_merged_image(self, image, file_ID, current_path): + self.image_count += 1 + if self.image_count == 1: + self.merged_image = image + else: + self.merged_image += image + + if self.image_count == len(self.selected_configurations): + if image.dtype == np.uint16: + saving_path = os.path.join(current_path, file_ID + '_merged' + '.tiff') + else: + saving_path = os.path.join(current_path, file_ID + '_merged' + '.' + Acquisition.IMAGE_FORMAT) + + iio.imwrite(saving_path, self.merged_image) + self.image_count = 0 + return + + def return_pseudo_colored_image(self, image, config): + if '405 nm' in config.name: + image = self.grayscale_to_rgb(image, Acquisition.PSEUDO_COLOR_MAP["405"]["hex"]) + elif '488 nm' in config.name: + image = self.grayscale_to_rgb(image, Acquisition.PSEUDO_COLOR_MAP["488"]["hex"]) + elif '561 nm' in config.name: + image = self.grayscale_to_rgb(image, Acquisition.PSEUDO_COLOR_MAP["561"]["hex"]) + elif '638 nm' in config.name: + image = self.grayscale_to_rgb(image, Acquisition.PSEUDO_COLOR_MAP["638"]["hex"]) + elif '730 nm' in config.name: + image = self.grayscale_to_rgb(image, Acquisition.PSEUDO_COLOR_MAP["730"]["hex"]) + + return image + + def grayscale_to_rgb(self, image, hex_color): + rgb_ratios = np.array([(hex_color >> 16) & 0xFF, + (hex_color >> 8) & 0xFF, + hex_color & 0xFF]) / 255 + rgb = np.stack([image] * 3, axis=-1) * rgb_ratios + return rgb.astype(image.dtype) + + def update_napari(self, image, config_name, i, j, k): + if not self.performance_mode: + i = -1 if i is None else i + j = -1 if j is None else j + print("update napari:", i, j, k, config_name) + + if USE_NAPARI_FOR_MULTIPOINT or USE_NAPARI_FOR_TILED_DISPLAY: + if not self.init_napari_layers: + print("init napari layers") + self.init_napari_layers = True + self.napari_layers_init.emit(image.shape[0],image.shape[1], image.dtype) + self.napari_layers_update.emit(image, i, j, k, config_name) + if USE_NAPARI_FOR_MOSAIC_DISPLAY and k == 0: + print(f"Updating mosaic layers: x={self.navigationController.x_pos_mm:.6f}, y={self.navigationController.y_pos_mm:.6f}") + self.napari_mosaic_update.emit(image, self.navigationController.x_pos_mm, self.navigationController.y_pos_mm, k, config_name) + + def handle_dpc_generation(self, current_round_images): + keys_to_check = ['BF LED matrix left half', 'BF LED matrix right half', 'BF LED matrix top half', 'BF LED matrix bottom half'] + if all(key in current_round_images for key in keys_to_check): + # generate dpc + pass + def handle_rgb_generation(self, current_round_images, file_ID, current_path, i, j, k): + keys_to_check = ['BF LED matrix full_R', 'BF LED matrix full_G', 'BF LED matrix full_B'] + if all(key in current_round_images for key in keys_to_check): + print('constructing RGB image') + print(current_round_images['BF LED matrix full_R'].dtype) + size = current_round_images['BF LED matrix full_R'].shape + rgb_image = np.zeros((*size, 3),dtype=current_round_images['BF LED matrix full_R'].dtype) + print(rgb_image.shape) + rgb_image[:, :, 0] = current_round_images['BF LED matrix full_R'] + rgb_image[:, :, 1] = current_round_images['BF LED matrix full_G'] + rgb_image[:, :, 2] = current_round_images['BF LED matrix full_B'] + + # send image to display + image_to_display = utils.crop_image(rgb_image,round(self.crop_width*self.display_resolution_scaling), round(self.crop_height*self.display_resolution_scaling)) + + # write the image + if len(rgb_image.shape) == 3: + print('writing RGB image') + if rgb_image.dtype == np.uint16: + iio.imwrite(os.path.join(current_path, file_ID + '_BF_LED_matrix_full_RGB.tiff'), rgb_image) + else: + iio.imwrite(os.path.join(current_path, file_ID + '_BF_LED_matrix_full_RGB.' + Acquisition.IMAGE_FORMAT),rgb_image) + + def handle_rgb_channels(self, images, file_ID, current_path, config, i, j, k): + for channel in ['BF LED matrix full_R', 'BF LED matrix full_G', 'BF LED matrix full_B']: + image_to_display = utils.crop_image(images[channel], round(self.crop_width * self.display_resolution_scaling), round(self.crop_height * self.display_resolution_scaling)) + self.image_to_display.emit(image_to_display) + self.image_to_display_multi.emit(image_to_display, config.illumination_source) + + self.update_napari(images[channel], channel, i, j, k) + + file_name = file_ID + '_' + channel.replace(' ', '_') + ('.tiff' if images[channel].dtype == np.uint16 else '.' + Acquisition.IMAGE_FORMAT) + iio.imwrite(os.path.join(current_path, file_name), images[channel]) + + def construct_rgb_image(self, images, file_ID, current_path, config, i, j, k): + rgb_image = np.zeros((*images['BF LED matrix full_R'].shape, 3), dtype=images['BF LED matrix full_R'].dtype) + rgb_image[:, :, 0] = images['BF LED matrix full_R'] + rgb_image[:, :, 1] = images['BF LED matrix full_G'] + rgb_image[:, :, 2] = images['BF LED matrix full_B'] + + # send image to display + image_to_display = utils.crop_image(rgb_image, round(self.crop_width * self.display_resolution_scaling), round(self.crop_height * self.display_resolution_scaling)) + self.image_to_display.emit(image_to_display) + self.image_to_display_multi.emit(image_to_display, config.illumination_source) + + self.update_napari(rgb_image, config.name, i, j, k) + + # write the RGB image + print('writing RGB image') + file_name = file_ID + '_BF_LED_matrix_full_RGB' + ('.tiff' if rgb_image.dtype == np.uint16 else '.' + Acquisition.IMAGE_FORMAT) + iio.imwrite(os.path.join(current_path, file_name), rgb_image) + + def handle_acquisition_abort(self, current_path, region_id=0): + self.move_to_coordinate(self.scan_coordinates_mm[region_id]) + self.coordinates_pd.to_csv(os.path.join(current_path,'coordinates.csv'),index=False,header=True) + self.navigationController.enable_joystick_button_action = True + + def move_z_for_stack(self): + if self.use_piezo: + self.z_piezo_um += self.deltaZ*1000 + self.navigationController.set_piezo_um(self.z_piezo_um) + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: # for hardware trigger, delay is in waiting for the last row to start exposure + time.sleep(MULTIPOINT_PIEZO_DELAY_MS/1000) + if MULTIPOINT_PIEZO_UPDATE_DISPLAY: + self.signal_z_piezo_um.emit(self.z_piezo_um) + else: + self.navigationController.move_z_usteps(self.deltaZ_usteps) + self.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_Z/1000) + self.dz_usteps = self.dz_usteps + self.deltaZ_usteps + + def move_z_back_after_stack(self): + if self.use_piezo: + self.z_piezo_um = OBJECTIVE_PIEZO_HOME_UM + self.navigationController.set_piezo_um(self.z_piezo_um) + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: # for hardware trigger, delay is in waiting for the last row to start exposure + time.sleep(MULTIPOINT_PIEZO_DELAY_MS/1000) + if MULTIPOINT_PIEZO_UPDATE_DISPLAY: + self.signal_z_piezo_um.emit(self.z_piezo_um) + else: + _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) + if self.z_stacking_config == 'FROM CENTER': + if self.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) + self.navigationController.move_z_usteps( -self.deltaZ_usteps*(self.NZ-1) + self.deltaZ_usteps*round((self.NZ-1)/2) - _usteps_to_clear_backlash) + self.wait_till_operation_is_completed() + self.navigationController.move_z_usteps(_usteps_to_clear_backlash) + self.wait_till_operation_is_completed() + else: + self.navigationController.move_z_usteps( -self.deltaZ_usteps*(self.NZ-1) + self.deltaZ_usteps*round((self.NZ-1)/2) ) + self.wait_till_operation_is_completed() + self.dz_usteps = self.dz_usteps - self.deltaZ_usteps*(self.NZ-1) + self.deltaZ_usteps*round((self.NZ-1)/2) + else: + if self.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*self.navigationController.z_microstepping) + self.navigationController.move_z_usteps(-self.deltaZ_usteps*(self.NZ-1) - _usteps_to_clear_backlash) + self.wait_till_operation_is_completed() + self.navigationController.move_z_usteps(_usteps_to_clear_backlash) + self.wait_till_operation_is_completed() + else: + self.navigationController.move_z_usteps(-self.deltaZ_usteps*(self.NZ-1)) + self.wait_till_operation_is_completed() + self.dz_usteps = self.dz_usteps - self.deltaZ_usteps*(self.NZ-1) + + def update_tiled_preview(self, current_round_images, i, j, k): + if SHOW_TILED_PREVIEW and 'BF LED matrix full' in current_round_images: + # initialize the variable + if self.tiled_preview is None: + size = current_round_images['BF LED matrix full'].shape + if len(size) == 2: + self.tiled_preview = np.zeros((int(self.NY*size[0]/PRVIEW_DOWNSAMPLE_FACTOR),self.NX*int(size[1]/PRVIEW_DOWNSAMPLE_FACTOR)),dtype=current_round_images['BF LED matrix full'].dtype) + else: + self.tiled_preview = np.zeros((int(self.NY*size[0]/PRVIEW_DOWNSAMPLE_FACTOR),self.NX*int(size[1]/PRVIEW_DOWNSAMPLE_FACTOR),size[2]),dtype=current_round_images['BF LED matrix full'].dtype) + # downsample the image + I = current_round_images['BF LED matrix full'] + width = int(I.shape[1]/PRVIEW_DOWNSAMPLE_FACTOR) + height = int(I.shape[0]/PRVIEW_DOWNSAMPLE_FACTOR) + I = cv2.resize(I, (width,height), interpolation=cv2.INTER_AREA) + # populate the tiled_preview + self.tiled_preview[i*height:(i+1)*height, j*width:(j+1)*width,] = I + # emit the result + self.image_to_display_tiled_preview.emit(self.tiled_preview) + + +class MultiPointController(QObject): + + acquisitionFinished = Signal() + image_to_display = Signal(np.ndarray) + image_to_display_multi = Signal(np.ndarray,int) + image_to_display_tiled_preview = Signal(np.ndarray) + spectrum_to_display = Signal(np.ndarray) + signal_current_configuration = Signal(Configuration) + signal_register_current_fov = Signal(float,float) + detection_stats = Signal(object) + signal_stitcher = Signal(str) + napari_rtp_layers_update = Signal(np.ndarray, str) + napari_layers_init = Signal(int, int, object) + napari_layers_update = Signal(np.ndarray, int, int, int, str) # image, i, j, k, channel + napari_mosaic_update = Signal(np.ndarray, float, float, int, str) # image, x_mm, y_mm, k, channel + signal_z_piezo_um = Signal(float) + signal_acquisition_progress = Signal(int, int, int) + signal_region_progress = Signal(int, int) + + def __init__(self,camera,navigationController,liveController,autofocusController,configurationManager,usb_spectrometer=None,scanCoordinates=None,parent=None): + QObject.__init__(self) + + self.camera = camera + if DO_FLUORESCENCE_RTP: + self.processingHandler = ProcessingHandler() + self.microcontroller = navigationController.microcontroller # to move to gui for transparency + self.navigationController = navigationController + self.liveController = liveController + self.autofocusController = autofocusController + self.configurationManager = configurationManager + self.NX = 1 + self.NY = 1 + self.NZ = 1 + self.Nt = 1 + mm_per_ustep_X = self.navigationController.get_mm_per_ustep_X() + mm_per_ustep_Y = self.navigationController.get_mm_per_ustep_Y() + mm_per_ustep_Z = self.navigationController.get_mm_per_ustep_Z() + self.deltaX = Acquisition.DX + self.deltaX_usteps = round(self.deltaX/mm_per_ustep_X) + self.deltaY = Acquisition.DY + self.deltaY_usteps = round(self.deltaY/mm_per_ustep_Y) + self.deltaZ = Acquisition.DZ/1000 + self.deltaZ_usteps = round(self.deltaZ/mm_per_ustep_Z) + self.deltat = 0 + self.do_autofocus = False + self.do_reflection_af = False + self.gen_focus_map = False + self.focus_map_storage = [] + self.already_using_fmap = False + self.do_segmentation = False + self.do_fluorescence_rtp = DO_FLUORESCENCE_RTP + self.crop_width = Acquisition.CROP_WIDTH + self.crop_height = Acquisition.CROP_HEIGHT + self.display_resolution_scaling = Acquisition.IMAGE_DISPLAY_SCALING_FACTOR + self.counter = 0 + self.experiment_ID = None + self.base_path = None + self.use_piezo = False # MULTIPOINT_USE_PIEZO_FOR_ZSTACKS + self.selected_configurations = [] + self.usb_spectrometer = usb_spectrometer + self.scanCoordinates = scanCoordinates + self.scan_coordinates_mm = [] + self.scan_coordinates_name = [] + self.parent = parent + self.start_time = 0 self.old_images_per_page = 1 + self.z_range = [self.navigationController.z_pos_mm, self.navigationController.z_pos_mm + self.deltaZ * (self.NZ - 1)] # [start_mm, end_mm] + try: if self.parent is not None: self.old_images_per_page = self.parent.dataHandler.n_images_per_page except: pass self.location_list = None # for flexible multipoint + self.coordinate_dict = None # for coordinate grid vs postion grid + self.z_stacking_config = Z_STACKING_CONFIG + + def set_use_piezo(self, checked): + print("set use_piezo to", checked) + self.use_piezo = checked + if hasattr(self, 'multiPointWorker'): + self.multiPointWorker.update_use_piezo(checked) + + def set_z_stacking_config(self, z_stacking_config_index): + if z_stacking_config_index in Z_STACKING_CONFIG_MAP: + self.z_stacking_config = Z_STACKING_CONFIG_MAP[z_stacking_config_index] + print(f"z-stacking configuration set to {self.z_stacking_config}") + + def set_z_range(self, minZ, maxZ): + self.z_range = [minZ, maxZ] def set_NX(self,N): self.NX = N + def set_NY(self,N): self.NY = N + def set_NZ(self,N): self.NZ = N + def set_Nt(self,N): self.Nt = N + def set_deltaX(self,delta): - mm_per_ustep_X = SCREW_PITCH_X_MM/(self.navigationController.x_microstepping*FULLSTEPS_PER_REV_X) + mm_per_ustep_X = self.navigationController.get_mm_per_ustep_X() self.deltaX = delta self.deltaX_usteps = round(delta/mm_per_ustep_X) + def set_deltaY(self,delta): - mm_per_ustep_Y = SCREW_PITCH_Y_MM/(self.navigationController.y_microstepping*FULLSTEPS_PER_REV_Y) + mm_per_ustep_Y = self.navigationController.get_mm_per_ustep_Y() self.deltaY = delta self.deltaY_usteps = round(delta/mm_per_ustep_Y) + def set_deltaZ(self,delta_um): - mm_per_ustep_Z = SCREW_PITCH_Z_MM/(self.navigationController.z_microstepping*FULLSTEPS_PER_REV_Z) + mm_per_ustep_Z = self.navigationController.get_mm_per_ustep_Z() self.deltaZ = delta_um/1000 self.deltaZ_usteps = round((delta_um/1000)/mm_per_ustep_Z) + def set_deltat(self,delta): self.deltat = delta + def set_af_flag(self,flag): self.do_autofocus = flag + def set_reflection_af_flag(self,flag): self.do_reflection_af = flag + def set_gen_focus_map_flag(self, flag): self.gen_focus_map = flag if not flag: self.autofocusController.set_focus_map_use(False) - def set_crop(self,crop_width,height): + + def set_stitch_tiles_flag(self, flag): + self.do_stitch_tiles = flag + + def set_segmentation_flag(self, flag): + self.do_segmentation = flag + + def set_fluorescence_rtp_flag(self, flag): + self.do_fluorescence_rtp = flag + + def set_crop(self,crop_width, crop_height): self.crop_width = crop_width self.crop_height = crop_height @@ -1957,13 +2597,20 @@ def set_base_path(self,path): def start_new_experiment(self,experiment_ID): # @@@ to do: change name to prepare_folder_for_new_experiment # generate unique experiment ID - self.experiment_ID = experiment_ID.replace(' ','_') + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%-S.%f') + self.experiment_ID = experiment_ID.replace(' ','_') + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f') self.recording_start_time = time.time() # create a new folder os.mkdir(os.path.join(self.base_path,self.experiment_ID)) configManagerThrowaway = ConfigurationManager(self.configurationManager.config_filename) configManagerThrowaway.write_configuration_selected(self.selected_configurations,os.path.join(self.base_path,self.experiment_ID)+"/configurations.xml") # save the configuration for the experiment - acquisition_parameters = {'dx(mm)':self.deltaX, 'Nx':self.NX, 'dy(mm)':self.deltaY, 'Ny':self.NY, 'dz(um)':self.deltaZ*1000,'Nz':self.NZ,'dt(s)':self.deltat,'Nt':self.Nt,'with AF':self.do_autofocus,'with reflection AF':self.do_reflection_af} + # Prepare acquisition parameters + acquisition_parameters = { + 'dx(mm)': self.deltaX, 'Nx': self.NX, + 'dy(mm)': self.deltaY, 'Ny': self.NY, + 'dz(um)': self.deltaZ * 1000 if self.deltaZ != 0 else 1, 'Nz': self.NZ, + 'dt(s)': self.deltat, 'Nt': self.Nt, + 'with AF': self.do_autofocus, 'with reflection AF': self.do_reflection_af, + } try: # write objective data if it is available current_objective = self.parent.objectiveStore.current_objective objective_info = self.parent.objectiveStore.objectives_dict.get(current_objective, {}) @@ -1980,30 +2627,40 @@ def start_new_experiment(self,experiment_ID): # @@@ to do: change name to prepar acquisition_parameters['objective']['name']=DEFAULT_OBJECTIVE except: pass + # TODO: USE OBJECTIVE STORE DATA acquisition_parameters['sensor_pixel_size_um'] = CAMERA_PIXEL_SIZE_UM[CAMERA_SENSOR] acquisition_parameters['tube_lens_mm'] = TUBE_LENS_MM f = open(os.path.join(self.base_path,self.experiment_ID)+"/acquisition parameters.json","w") f.write(json.dumps(acquisition_parameters)) f.close() - def set_selected_configurations(self, selected_configurations_name): self.selected_configurations = [] for configuration_name in selected_configurations_name: self.selected_configurations.append(next((config for config in self.configurationManager.configurations if config.name == configuration_name))) - - def run_acquisition(self, location_list=None): # @@@ to do: change name to run_experiment + + def run_acquisition(self, location_list=None, coordinate_dict=None): + # location_list dict -> {key: region_id, value: center coordinate (x,y,z)} + # coordinate_dict dict -> {key: region_id, value: fov coordinates list [(x0,y0,z0), (x1,y1,z1), ... ]} print('start multipoint') - print(str(self.Nt) + '_' + str(self.NX) + '_' + str(self.NY) + '_' + str(self.NZ)) - if location_list is not None: - print(location_list) - self.location_list = location_list - else: + + if coordinate_dict is not None: + print('Using coordinate-based acquisition') # always + total_points = sum(len(coords) for coords in coordinate_dict) + self.coordinate_dict = coordinate_dict self.location_list = None + self.use_scan_coordinates = False + self.scan_coordinates_mm = location_list + self.scan_coordinates_name = list(coordinate_dict.keys()) # list(coordinate_dict.keys()) if not wellplate + else: + print("obsolete functionailty. use coordinate acquisition instead of grid acquisition") + return - self.abort_acqusition_requested = False + print("num regions:",len(self.scan_coordinates_mm)) + print("region ids:", self.scan_coordinates_name) + print("region coordinates:", self.scan_coordinates_mm) - + self.abort_acqusition_requested = False self.configuration_before_running_multipoint = self.liveController.currentConfiguration # stop live @@ -2027,25 +2684,44 @@ def run_acquisition(self, location_list=None): # @@@ to do: change name to run_e else: self.usb_spectrometer_was_streaming = False - if self.parent is not None: - try: - self.parent.imageDisplayTabs.setCurrentWidget(self.parent.imageArrayDisplayWindow.widget) - except: - pass - try: + # set current tabs + if self.parent.performance_mode: + self.parent.imageDisplayTabs.setCurrentIndex(0) + + elif self.parent is not None and not self.parent.live_only_mode: + configs = [config.name for config in self.selected_configurations] + print(configs) + if DO_FLUORESCENCE_RTP and 'BF LED matrix left half' in configs and 'BF LED matrix right half' in configs and 'Fluorescence 405 nm Ex' in configs: self.parent.recordTabWidget.setCurrentWidget(self.parent.statsDisplayWidget) - except: - pass - + if USE_NAPARI_FOR_MULTIPOINT: + self.parent.imageDisplayTabs.setCurrentWidget(self.parent.napariRTPWidget) + else: + self.parent.imageDisplayTabs.setCurrentWidget(self.parent.imageArrayDisplayWindow.widget) + + elif USE_NAPARI_FOR_MOSAIC_DISPLAY and self.coordinate_dict is not None: + self.parent.imageDisplayTabs.setCurrentWidget(self.parent.napariMosaicDisplayWidget) + + elif USE_NAPARI_FOR_TILED_DISPLAY and SHOW_TILED_PREVIEW: + self.parent.imageDisplayTabs.setCurrentWidget(self.parent.napariTiledDisplayWidget) + + elif USE_NAPARI_FOR_MULTIPOINT: + self.parent.imageDisplayTabs.setCurrentWidget(self.parent.napariMultiChannelWidget) + else: + self.parent.imageDisplayTabs.setCurrentIndex(0) + # run the acquisition self.timestamp_acquisition_started = time.time() + + if SHOW_TILED_PREVIEW: + self.navigationController.keep_scan_begin_position(self.navigationController.x_pos_mm, self.navigationController.y_pos_mm) + # create a QThread object if self.gen_focus_map and not self.do_reflection_af: print("Generating focus map for multipoint grid") starting_x_mm = self.navigationController.x_pos_mm starting_y_mm = self.navigationController.y_pos_mm - fmap_Nx = max(2,self.NX) - fmap_Ny = max(2,self.NY) + fmap_Nx = max(2,self.NX-1) + fmap_Ny = max(2,self.NY-1) fmap_dx = self.deltaX fmap_dy = self.deltaY if abs(fmap_dx) < 0.1 and fmap_dx != 0.0: @@ -2074,22 +2750,37 @@ def run_acquisition(self, location_list=None): # @@@ to do: change name to run_e self.thread = QThread() # create a worker object - self.processingHandler.start_processing() - self.processingHandler.start_uploading() + if DO_FLUORESCENCE_RTP: + self.processingHandler.start_processing() + self.processingHandler.start_uploading() self.multiPointWorker = MultiPointWorker(self) + self.multiPointWorker.use_piezo = self.use_piezo # move the worker to the thread self.multiPointWorker.moveToThread(self.thread) # connect signals and slots self.thread.started.connect(self.multiPointWorker.run) self.multiPointWorker.signal_detection_stats.connect(self.slot_detection_stats) self.multiPointWorker.finished.connect(self._on_acquisition_completed) - self.multiPointWorker.finished.connect(self.multiPointWorker.deleteLater) - self.multiPointWorker.finished.connect(self.thread.quit) + if DO_FLUORESCENCE_RTP: + self.processingHandler.finished.connect(self.multiPointWorker.deleteLater) + self.processingHandler.finished.connect(self.thread.quit) + else: + self.multiPointWorker.finished.connect(self.multiPointWorker.deleteLater) + self.multiPointWorker.finished.connect(self.thread.quit) self.multiPointWorker.image_to_display.connect(self.slot_image_to_display) self.multiPointWorker.image_to_display_multi.connect(self.slot_image_to_display_multi) + self.multiPointWorker.image_to_display_tiled_preview.connect(self.slot_image_to_display_tiled_preview) self.multiPointWorker.spectrum_to_display.connect(self.slot_spectrum_to_display) self.multiPointWorker.signal_current_configuration.connect(self.slot_current_configuration,type=Qt.BlockingQueuedConnection) self.multiPointWorker.signal_register_current_fov.connect(self.slot_register_current_fov) + self.multiPointWorker.napari_layers_init.connect(self.slot_napari_layers_init) + self.multiPointWorker.napari_rtp_layers_update.connect(self.slot_napari_rtp_layers_update) + self.multiPointWorker.napari_layers_update.connect(self.slot_napari_layers_update) + self.multiPointWorker.napari_mosaic_update.connect(self.slot_napari_mosaic_update) + self.multiPointWorker.signal_z_piezo_um.connect(self.slot_z_piezo_um) + self.multiPointWorker.signal_acquisition_progress.connect(self.slot_acquisition_progress) + self.multiPointWorker.signal_region_progress.connect(self.slot_region_progress) + # self.thread.finished.connect(self.thread.deleteLater) self.thread.finished.connect(self.thread.quit) # start the thread @@ -2108,7 +2799,7 @@ def _on_acquisition_completed(self): if self.camera_callback_was_enabled_before_multipoint: self.camera.enable_callback() self.camera_callback_was_enabled_before_multipoint = False - + # re-enable live if it's previously on if self.liveController_was_live_before_multipoint: self.liveController.start_live() @@ -2116,17 +2807,20 @@ def _on_acquisition_completed(self): if self.usb_spectrometer != None: if self.usb_spectrometer_was_streaming: self.usb_spectrometer.resume_streaming() - + # emit the acquisition finished signal to enable the UI - self.processingHandler.end_processing() if self.parent is not None: try: - self.parent.dataHandler.set_number_of_images_per_page(self.old_images_per_page) + # self.parent.dataHandler.set_number_of_images_per_page(self.old_images_per_page) self.parent.dataHandler.sort('Sort by prediction score') self.parent.dataHandler.signal_populate_page0.emit() except: pass + print("total time for acquisition + processing + reset:", time.time() - self.recording_start_time) + utils.create_done_file(os.path.join(self.base_path,self.experiment_ID)) self.acquisitionFinished.emit() + if not self.abort_acqusition_requested: + self.signal_stitcher.emit(os.path.join(self.base_path,self.experiment_ID)) QApplication.processEvents() def request_abort_aquisition(self): @@ -2138,6 +2832,9 @@ def slot_detection_stats(self, stats): def slot_image_to_display(self,image): self.image_to_display.emit(image) + def slot_image_to_display_tiled_preview(self,image): + self.image_to_display_tiled_preview.emit(image) + def slot_spectrum_to_display(self,data): self.spectrum_to_display.emit(data) @@ -2150,6 +2847,27 @@ def slot_current_configuration(self,configuration): def slot_register_current_fov(self,x_mm,y_mm): self.signal_register_current_fov.emit(x_mm,y_mm) + def slot_napari_rtp_layers_update(self, image, channel): + self.napari_rtp_layers_update.emit(image, channel) + + def slot_napari_layers_init(self, image_height, image_width, dtype): + self.napari_layers_init.emit(image_height, image_width, dtype) + + def slot_napari_layers_update(self, image, i, j, k, channel): + self.napari_layers_update.emit(image, i, j, k, channel) + + def slot_napari_mosaic_update(self, image, x_mm, y_mm, k, channel): + self.napari_mosaic_update.emit(image, x_mm, y_mm, k, channel) + + def slot_z_piezo_um(self, displacement_um): + self.signal_z_piezo_um.emit(displacement_um) + + def slot_acquisition_progress(self, current_region, total_regions, current_time_point): + self.signal_acquisition_progress.emit(current_region, total_regions, current_time_point) + + def slot_region_progress(self, current_fov, total_fovs): + self.signal_region_progress.emit(current_fov, total_fovs) + class TrackingController(QObject): @@ -2192,11 +2910,11 @@ def __init__(self,camera,microcontroller,navigationController,configurationManag self.objective = None def start_tracking(self): - + # save pre-tracking configuration print('start tracking') self.configuration_before_running_tracking = self.liveController.currentConfiguration - + # stop live if self.liveController.is_live: self.was_live_before_tracking = True @@ -2252,21 +2970,21 @@ def _on_tracking_stopped(self): if self.camera_callback_was_enabled_before_tracking: self.camera.enable_callback() self.camera_callback_was_enabled_before_tracking = False - + # re-enable live if it's previously on if self.was_live_before_tracking: self.liveController.start_live() # show ROI selector self.imageDisplayWindow.show_ROI_selector() - + # emit the acquisition finished signal to enable the UI self.signal_tracking_stopped.emit() QApplication.processEvents() def start_new_experiment(self,experiment_ID): # @@@ to do: change name to prepare_folder_for_new_experiment # generate unique experiment ID - self.experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%-S.%f') + self.experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f') self.recording_start_time = time.time() # create a new folder try: @@ -2332,7 +3050,7 @@ def on_new_frame(self,image,frame_ID,timestamp): # initialize the PID controller pass - # crop the image, resize the image + # crop the image, resize the image # [to fill] # get the location @@ -2386,7 +3104,7 @@ def __init__(self,trackingController): self.base_path = self.trackingController.base_path self.selected_configurations = self.trackingController.selected_configurations self.tracker = trackingController.tracker - + self.number_of_selected_configurations = len(self.selected_configurations) # self.tracking_time_interval_s = self.trackingController.tracking_time_interval_s @@ -2404,11 +3122,11 @@ def run(self): # save metadata self.txt_file = open( os.path.join(self.base_path,self.experiment_ID,"metadata.txt"), "w+") - self.txt_file.write('t0: ' + datetime.now().strftime('%Y-%m-%d_%H-%M-%-S.%f') + '\n') + self.txt_file.write('t0: ' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f') + '\n') self.txt_file.write('objective: ' + self.trackingController.objective + '\n') self.txt_file.close() - # create a file for logging + # create a file for logging self.csv_file = open( os.path.join(self.base_path,self.experiment_ID,"track.csv"), "w+") self.csv_file.write('dt (s), x_stage (mm), y_stage (mm), z_stage (mm), x_image (mm), y_image(mm), image_filename\n') @@ -2436,7 +3154,7 @@ def run(self): self.signal_current_configuration.emit(config) self.wait_till_operation_is_completed() - # do autofocus + # do autofocus if self.trackingController.flag_AF_enabled and tracking_frame_counter > 1: # do autofocus print('>>> autofocus') @@ -2457,7 +3175,7 @@ def run(self): self.liveController.turn_on_illumination() # keep illumination on for single configuration acqusition self.wait_till_operation_is_completed() t = time.time() - self.camera.send_trigger() + self.camera.send_trigger() image = self.camera.read_frame() if(self.number_of_selected_configurations > 1): self.liveController.turn_off_illumination() # keep illumination on for single configuration acqusition @@ -2475,7 +3193,7 @@ def run(self): self.wait_till_operation_is_completed() self.liveController.turn_on_illumination() self.wait_till_operation_is_completed() - self.camera.send_trigger() + self.camera.send_trigger() image_ = self.camera.read_frame() self.liveController.turn_off_illumination() image_ = utils.crop_image(image_,self.crop_width,self.crop_height) @@ -2495,9 +3213,9 @@ def run(self): # track objectFound,centroid,rect_pts = self.tracker.track(image, None, is_first_frame = is_first_frame) if objectFound == False: - print('') + print('tracker: object not found') break - in_plane_position_error_pixel = image_center - centroid + in_plane_position_error_pixel = image_center - centroid in_plane_position_error_mm = in_plane_position_error_pixel*self.trackingController.pixel_size_um_scaled/1000 x_error_mm = in_plane_position_error_mm[0] y_error_mm = in_plane_position_error_mm[1] @@ -2508,16 +3226,16 @@ def run(self): # move if self.trackingController.flag_stage_tracking_enabled: - x_correction_usteps = int(x_error_mm/(SCREW_PITCH_X_MM/FULLSTEPS_PER_REV_X/self.navigationController.x_microstepping)) - y_correction_usteps = int(y_error_mm/(SCREW_PITCH_Y_MM/FULLSTEPS_PER_REV_Y/self.navigationController.y_microstepping)) + x_correction_usteps = int(x_error_mm/self.navigationController.get_mm_per_ustep_X()) + y_correction_usteps = int(y_error_mm/self.navigationController.get_mm_per_ustep_Y()) self.microcontroller.move_x_usteps(TRACKING_MOVEMENT_SIGN_X*x_correction_usteps) - self.microcontroller.move_y_usteps(TRACKING_MOVEMENT_SIGN_Y*y_correction_usteps) + self.microcontroller.move_y_usteps(TRACKING_MOVEMENT_SIGN_Y*y_correction_usteps) # save image if self.trackingController.flag_save_image: self.image_saver.enqueue(image,tracking_frame_counter,str(config.name)) - # save position data + # save position data # self.csv_file.write('dt (s), x_stage (mm), y_stage (mm), z_stage (mm), x_image (mm), y_image(mm), image_filename\n') self.csv_file.write(str(t)+','+str(x_stage)+','+str(y_stage)+','+str(z_stage)+','+str(x_error_mm)+','+str(y_error_mm)+','+str(tracking_frame_counter)+'\n') if tracking_frame_counter%100 == 0: @@ -2530,7 +3248,7 @@ def run(self): while(time.time() - timestamp_last_frame < self.trackingController.tracking_time_interval_s): time.sleep(0.005) - # increament counter + # increament counter tracking_frame_counter = tracking_frame_counter + 1 # tracking terminated @@ -2545,10 +3263,12 @@ def wait_till_operation_is_completed(self): class ImageDisplayWindow(QMainWindow): - image_click_coordinates = Signal(int, int) + image_click_coordinates = Signal(int, int, int, int) - def __init__(self, window_title='', draw_crosshairs = False, show_LUT=False, autoLevels=False): + def __init__(self, liveController=None, contrastManager=None, window_title='', draw_crosshairs=False, show_LUT=False, autoLevels=False): super().__init__() + self.liveController = liveController + self.contrastManager = contrastManager self.setWindowTitle(window_title) self.setWindowFlags(self.windowFlags() | Qt.CustomizeWindowHint) self.setWindowFlags(self.windowFlags() & ~Qt.WindowCloseButtonHint) @@ -2562,10 +3282,10 @@ def __init__(self, window_title='', draw_crosshairs = False, show_LUT=False, aut self.graphics_widget = pg.GraphicsLayoutWidget() self.graphics_widget.view = self.graphics_widget.addViewBox() self.graphics_widget.view.invertY() - + ## lock the aspect ratio so pixels are always square self.graphics_widget.view.setAspectLocked(True) - + ## Create image item if self.show_LUT: self.graphics_widget.view = pg.ImageView() @@ -2573,9 +3293,9 @@ def __init__(self, window_title='', draw_crosshairs = False, show_LUT=False, aut self.graphics_widget.img.setBorder('w') self.graphics_widget.view.ui.roiBtn.hide() self.graphics_widget.view.ui.menuBtn.hide() - # self.LUTWidget = self.graphics_widget.view.getHistogramWidget() - # self.LUTWidget.autoHistogramRange() - # self.graphics_widget.view.autolevels() + self.LUTWidget = self.graphics_widget.view.getHistogramWidget() + self.LUTWidget.region.sigRegionChanged.connect(self.update_contrast_limits) + self.LUTWidget.region.sigRegionChangeFinished.connect(self.update_contrast_limits) else: self.graphics_widget.img = pg.ImageItem(border='w') self.graphics_widget.view.addItem(self.graphics_widget.img) @@ -2605,32 +3325,29 @@ def __init__(self, window_title='', draw_crosshairs = False, show_LUT=False, aut ## Layout layout = QGridLayout() if self.show_LUT: - layout.addWidget(self.graphics_widget.view, 0, 0) + layout.addWidget(self.graphics_widget.view, 0, 0) else: - layout.addWidget(self.graphics_widget, 0, 0) + layout.addWidget(self.graphics_widget, 0, 0) self.widget.setLayout(layout) self.setCentralWidget(self.widget) # set window size - desktopWidget = QDesktopWidget(); - width = min(desktopWidget.height()*0.9,1000) #@@@TO MOVE@@@# + desktopWidget = QDesktopWidget() + width = min(desktopWidget.height()*0.9,1000) height = width self.setFixedSize(int(width),int(height)) + + # Connect mouse click handler if self.show_LUT: - self.graphics_widget.view.getView().scene().sigMouseClicked.connect(self.mouse_clicked) + self.graphics_widget.view.getView().scene().sigMouseClicked.connect(self.handle_mouse_click) else: - self.graphics_widget.view.scene().sigMouseClicked.connect(self.mouse_clicked) - - def is_within_image(self, coordinates): - try: - image_width = self.graphics_widget.img.width() - image_height = self.graphics_widget.img.height() + self.graphics_widget.view.scene().sigMouseClicked.connect(self.handle_mouse_click) - return 0 <= coordinates.x() < image_width and 0 <= coordinates.y() < image_height - except: - return False + def handle_mouse_click(self, evt): + # Only process double clicks + if not evt.double(): + return - def mouse_clicked(self, evt): try: pos = evt.pos() if self.show_LUT: @@ -2644,19 +3361,51 @@ def mouse_clicked(self, evt): if self.is_within_image(image_coord): x_pixel_centered = int(image_coord.x() - self.graphics_widget.img.width()/2) y_pixel_centered = int(image_coord.y() - self.graphics_widget.img.height()/2) - self.image_click_coordinates.emit(x_pixel_centered, y_pixel_centered) + self.image_click_coordinates.emit(x_pixel_centered, y_pixel_centered, + self.graphics_widget.img.width(), + self.graphics_widget.img.height()) + + def is_within_image(self, coordinates): + try: + image_width = self.graphics_widget.img.width() + image_height = self.graphics_widget.img.height() + return 0 <= coordinates.x() < image_width and 0 <= coordinates.y() < image_height + except: + return False - def display_image(self,image): + # [Rest of the methods remain exactly the same...] + def display_image(self, image): if ENABLE_TRACKING: image = np.copy(image) - self.image_height = image.shape[0], - self.image_width = image.shape[1] - if(self.draw_rectangle): - cv2.rectangle(image, self.ptRect1, self.ptRect2,(255,255,255) , 4) + self.image_height, self.image_width = image.shape[:2] + if self.draw_rectangle: + cv2.rectangle(image, self.ptRect1, self.ptRect2, (255,255,255), 4) self.draw_rectangle = False - self.graphics_widget.img.setImage(image,autoLevels=self.autoLevels) - else: - self.graphics_widget.img.setImage(image,autoLevels=self.autoLevels) + + info = np.iinfo(image.dtype) if np.issubdtype(image.dtype, np.integer) else np.finfo(image.dtype) + min_val, max_val = info.min, info.max + + if self.liveController is not None and self.contrastManager is not None: + channel_name = self.liveController.currentConfiguration.name + if self.contrastManager.acquisition_dtype != None and self.contrastManager.acquisition_dtype != np.dtype(image.dtype): + self.contrastManager.scale_contrast_limits(np.dtype(image.dtype)) + min_val, max_val = self.contrastManager.get_limits(channel_name, image.dtype) + + self.graphics_widget.img.setImage(image, autoLevels=self.autoLevels, levels=(min_val, max_val)) + + if not self.autoLevels: + if self.show_LUT: + self.LUTWidget.setLevels(min_val, max_val) + self.LUTWidget.setHistogramRange(info.min, info.max) + else: + self.graphics_widget.img.setLevels((min_val, max_val)) + + self.graphics_widget.img.updateImage() + + def update_contrast_limits(self): + if self.show_LUT and self.contrastManager and self.contrastManager.acquisition_dtype: + min_val, max_val = self.LUTWidget.region.getRegion() + self.contrastManager.update_limits(self.liveController.currentConfiguration.name, min_val, max_val) def update_ROI(self): self.roi_pos = self.ROI.pos() @@ -2688,145 +3437,271 @@ def set_autolevel(self,enabled): self.autoLevels = enabled print('set autolevel to ' + str(enabled)) + class NavigationViewer(QFrame): - def __init__(self, sample = 'glass slide', invertX = False, *args, **kwargs): + signal_coordinates_clicked = Signal(float, float) # Will emit x_mm, y_mm when clicked + signal_update_live_scan_grid = Signal(float, float) + signal_update_well_coordinates = Signal(bool) + + def __init__(self, objectivestore, sample = 'glass slide', invertX = False, *args, **kwargs): super().__init__(*args, **kwargs) self.setFrameStyle(QFrame.Panel | QFrame.Raised) - + self.sample = sample + self.objectiveStore = objectivestore + self.well_size_mm = WELL_SIZE_MM + self.well_spacing_mm = WELL_SPACING_MM + self.number_of_skip = NUMBER_OF_SKIP + self.a1_x_mm = A1_X_MM + self.a1_y_mm = A1_Y_MM + self.a1_x_pixel = A1_X_PIXEL + self.a1_y_pixel = A1_Y_PIXEL + self.location_update_threshold_mm = 0.2 + self.box_color = (255, 0, 0) + self.box_line_thickness = 2 + self.acquisition_size = Acquisition.CROP_HEIGHT + self.x_mm = None + self.y_mm = None + self.acquisition_started = False + self.image_paths = { + 'glass slide': 'images/slide carrier_828x662.png', + '4 glass slide': 'images/4 slide carrier_1509x1010.png', + '6 well plate': 'images/6 well plate_1509x1010.png', + '12 well plate': 'images/12 well plate_1509x1010.png', + '24 well plate': 'images/24 well plate_1509x1010.png', + '96 well plate': 'images/96 well plate_1509x1010.png', + '384 well plate': 'images/384 well plate_1509x1010.png', + '1536 well plate': 'images/1536 well plate_1509x1010.png' + } + + print("navigation viewer:", sample) + self.init_ui(invertX) + + self.load_background_image(self.image_paths.get(sample, 'images/slide carrier_828x662.png')) + self.create_layers() + self.update_display_properties(sample) + # self.update_display() + + def init_ui(self, invertX): # interpret image data as row-major instead of col-major pg.setConfigOptions(imageAxisOrder='row-major') self.graphics_widget = pg.GraphicsLayoutWidget() self.graphics_widget.setBackground("w") - self.graphics_widget.view = self.graphics_widget.addViewBox(invertX=invertX,invertY=True) - ## lock the aspect ratio so pixels are always square - self.graphics_widget.view.setAspectLocked(True) - ## Create image item - self.graphics_widget.img = pg.ImageItem(border='w') - self.graphics_widget.view.addItem(self.graphics_widget.img) + + self.view = self.graphics_widget.addViewBox(invertX=invertX, invertY=True) + self.view.setAspectLocked(True) self.grid = QVBoxLayout() self.grid.addWidget(self.graphics_widget) self.setLayout(self.grid) - - if sample == 'glass slide': - self.background_image = cv2.imread('images/slide carrier_828x662.png') - elif sample == '384 well plate': - self.background_image = cv2.imread('images/384 well plate_1509x1010.png') - elif sample == '96 well plate': - self.background_image = cv2.imread('images/96 well plate_1509x1010.png') - elif sample == '24 well plate': - self.background_image = cv2.imread('images/24 well plate_1509x1010.png') - elif sample == '12 well plate': - self.background_image = cv2.imread('images/12 well plate_1509x1010.png') - elif sample == '6 well plate': - self.background_image = cv2.imread('images/6 well plate_1509x1010.png') + # Connect double-click handler + self.view.scene().sigMouseClicked.connect(self.handle_mouse_click) + + def load_background_image(self, image_path): + self.view.clear() + self.background_image = cv2.imread(image_path) + if self.background_image is None: + #raise ValueError(f"Failed to load image from {image_path}") + self.background_image = cv2.imread(self.image_paths.get('glass slide')) - self.current_image = np.copy(self.background_image) - self.current_image_display = np.copy(self.background_image) - self.image_height = self.background_image.shape[0] - self.image_width = self.background_image.shape[1] + if len(self.background_image.shape) == 2: # Grayscale image + self.background_image = cv2.cvtColor(self.background_image, cv2.COLOR_GRAY2RGBA) + elif self.background_image.shape[2] == 3: # BGR image + self.background_image = cv2.cvtColor(self.background_image, cv2.COLOR_BGR2RGBA) + elif self.background_image.shape[2] == 4: # BGRA image + self.background_image = cv2.cvtColor(self.background_image, cv2.COLOR_BGRA2RGBA) - self.location_update_threshold_mm = 0.4 - self.sample = sample + self.background_image_copy = self.background_image.copy() + self.image_height, self.image_width = self.background_image.shape[:2] + self.background_item = pg.ImageItem(self.background_image) + self.view.addItem(self.background_item) + + def create_layers(self): + self.scan_overlay = np.zeros((self.image_height, self.image_width, 4), dtype=np.uint8) + self.fov_overlay = np.zeros((self.image_height, self.image_width, 4), dtype=np.uint8) + + self.scan_overlay_item = pg.ImageItem() + self.fov_overlay_item = pg.ImageItem() + + self.view.addItem(self.scan_overlay_item) + self.view.addItem(self.fov_overlay_item) + def update_display_properties(self, sample): if sample == 'glass slide': - self.origin_bottom_left_x = 200 - self.origin_bottom_left_y = 120 + self.location_update_threshold_mm = 0.2 self.mm_per_pixel = 0.1453 - self.fov_size_mm = 3000*1.85/(50/9)/1000 + self.origin_x_pixel = 200 + self.origin_y_pixel = 120 + elif sample == '4 glass slide': + self.location_update_threshold_mm = 0.2 + self.mm_per_pixel = 0.084665 + self.origin_x_pixel = 50 + self.origin_y_pixel = 0 + self.view.invertX(False) + self.view.invertY(True) else: self.location_update_threshold_mm = 0.05 self.mm_per_pixel = 0.084665 - self.fov_size_mm = 3000*1.85/(50/10)/1000 - self.origin_bottom_left_x = X_ORIGIN_384_WELLPLATE_PIXEL - (X_MM_384_WELLPLATE_UPPERLEFT)/self.mm_per_pixel - self.origin_bottom_left_y = Y_ORIGIN_384_WELLPLATE_PIXEL - (Y_MM_384_WELLPLATE_UPPERLEFT)/self.mm_per_pixel + self.origin_x_pixel = self.a1_x_pixel - (self.a1_x_mm)/self.mm_per_pixel + self.origin_y_pixel = self.a1_y_pixel - (self.a1_y_mm)/self.mm_per_pixel + self.view.invertX(False) + self.view.invertY(True) + self.update_fov_size() + + def update_fov_size(self): + pixel_size_um = self.objectiveStore.get_pixel_size() + self.fov_size_mm = self.acquisition_size * pixel_size_um / 1000 + + def on_objective_changed(self): + self.clear_overlay() + self.update_fov_size() + if self.x_mm is not None and self.y_mm is not None: + if 'glass slide'in self.sample: + self.signal_update_live_scan_grid.emit(self.x_mm, self.y_mm) + self.draw_current_fov(self.x_mm, self.y_mm) + + def on_acquisition_start(self, acquisition_started): + self.acquisition_started = acquisition_started + + def update_wellplate_settings(self, sample_format, a1_x_mm, a1_y_mm, a1_x_pixel, a1_y_pixel, well_size_mm, well_spacing_mm, number_of_skip, rows, cols): + if isinstance(sample_format, QVariant): + sample_format = sample_format.value() + + if sample_format == 'glass slide': + if IS_HCS: + sample = '4 glass slide' + else: + sample = 'glass slide' + else: + sample = sample_format - self.box_color = (255, 0, 0) - self.box_line_thickness = 2 + self.sample = sample + self.a1_x_mm = a1_x_mm + self.a1_y_mm = a1_y_mm + self.a1_x_pixel = a1_x_pixel + self.a1_y_pixel = a1_y_pixel + self.well_size_mm = well_size_mm + self.well_spacing_mm = well_spacing_mm + self.number_of_skip = number_of_skip + self.rows = rows + self.cols = cols + + # Try to find the image for the wellplate + image_path = self.image_paths.get(sample) + if image_path is None or not os.path.exists(image_path): + # Look for a custom wellplate image + custom_image_path = os.path.join('images', self.sample + '.png') + print(custom_image_path) + if os.path.exists(custom_image_path): + image_path = custom_image_path + else: + print(f"Warning: Image not found for {sample}. Using default image.") + image_path = self.image_paths.get('glass slide') # Use a default image - self.x_mm = None - self.y_mm = None + self.load_background_image(image_path) + self.create_layers() + self.update_display_properties(sample) + self.draw_current_fov(self.x_mm, self.y_mm) - self.update_display() + def update_current_location(self, x_mm=None, y_mm=None): + if x_mm is None and y_mm is None: + if self.x_mm is None and self.y_mm is None: + return + else: + self.draw_current_fov(self.x_mm, self.y_mm) - def update_current_location(self,x_mm,y_mm): - if self.x_mm != None and self.y_mm != None: + elif self.x_mm is not None and self.y_mm is not None: # update only when the displacement has exceeded certain value if abs(x_mm - self.x_mm) > self.location_update_threshold_mm or abs(y_mm - self.y_mm) > self.location_update_threshold_mm: - self.draw_current_fov(x_mm,y_mm) - self.update_display() + self.draw_current_fov(x_mm, y_mm) self.x_mm = x_mm self.y_mm = y_mm + # update_live_scan_grid + if 'glass slide' in self.sample and not self.acquisition_started: + self.signal_update_live_scan_grid.emit(x_mm, y_mm) else: - self.draw_current_fov(x_mm,y_mm) - self.update_display() + self.draw_current_fov(x_mm, y_mm) self.x_mm = x_mm self.y_mm = y_mm + # update_live_scan_grid + if 'glass slide' in self.sample and not self.acquisition_started: + self.signal_update_live_scan_grid.emit(x_mm, y_mm) - def draw_current_fov(self,x_mm,y_mm): - self.current_image_display = np.copy(self.current_image) + def get_FOV_pixel_coordinates(self, x_mm, y_mm): if self.sample == 'glass slide': - current_FOV_top_left = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), - round(self.image_height - (self.origin_bottom_left_y + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel)) - current_FOV_bottom_right = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), - round(self.image_height - (self.origin_bottom_left_y + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel)) + current_FOV_top_left = ( + round(self.origin_x_pixel + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), + round(self.image_height - (self.origin_y_pixel + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel) + ) + current_FOV_bottom_right = ( + round(self.origin_x_pixel + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), + round(self.image_height - (self.origin_y_pixel + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel) + ) else: - current_FOV_top_left = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), - round((self.origin_bottom_left_y + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel)) - current_FOV_bottom_right = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), - round((self.origin_bottom_left_y + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel)) - cv2.rectangle(self.current_image_display, current_FOV_top_left, current_FOV_bottom_right, self.box_color, self.box_line_thickness) - - def update_display(self): - self.graphics_widget.img.setImage(self.current_image_display,autoLevels=False) + current_FOV_top_left = ( + round(self.origin_x_pixel + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), + round((self.origin_y_pixel + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel) + ) + current_FOV_bottom_right = ( + round(self.origin_x_pixel + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), + round((self.origin_y_pixel + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel) + ) + return current_FOV_top_left, current_FOV_bottom_right + + def draw_current_fov(self, x_mm, y_mm): + self.fov_overlay.fill(0) + current_FOV_top_left, current_FOV_bottom_right = self.get_FOV_pixel_coordinates(x_mm, y_mm) + cv2.rectangle(self.fov_overlay, current_FOV_top_left, current_FOV_bottom_right, (255, 0, 0, 255), self.box_line_thickness) + self.fov_overlay_item.setImage(self.fov_overlay) + + def register_fov(self, x_mm, y_mm): + color = (0, 0, 255, 255) # Blue RGBA + current_FOV_top_left, current_FOV_bottom_right = self.get_FOV_pixel_coordinates(x_mm, y_mm) + cv2.rectangle(self.background_image, current_FOV_top_left, current_FOV_bottom_right, color, self.box_line_thickness) + self.background_item.setImage(self.background_image) + + def register_fov_to_image(self, x_mm, y_mm): + color = (252, 174, 30, 128) # Yellow RGBA + current_FOV_top_left, current_FOV_bottom_right = self.get_FOV_pixel_coordinates(x_mm, y_mm) + cv2.rectangle(self.scan_overlay, current_FOV_top_left, current_FOV_bottom_right, color, self.box_line_thickness) + self.scan_overlay_item.setImage(self.scan_overlay) + + def deregister_fov_to_image(self, x_mm, y_mm): + current_FOV_top_left, current_FOV_bottom_right = self.get_FOV_pixel_coordinates(x_mm, y_mm) + cv2.rectangle(self.scan_overlay, current_FOV_top_left, current_FOV_bottom_right, (0, 0, 0, 0), self.box_line_thickness) + self.scan_overlay_item.setImage(self.scan_overlay) def clear_slide(self): - self.current_image = np.copy(self.background_image) - self.current_image_display = np.copy(self.background_image) - self.update_display() + self.background_image = self.background_image_copy.copy() + self.background_item.setImage(self.background_image) + self.draw_current_fov(self.x_mm, self.y_mm) + + def update_slide(self): + self.background_image = self.background_image_copy.copy() + self.background_item.setImage(self.background_image) + self.draw_current_fov(self.x_mm, self.y_mm) + self.signal_update_well_coordinates.emit(True) + + def clear_overlay(self): + self.scan_overlay.fill(0) + self.scan_overlay_item.setImage(self.scan_overlay) + + def handle_mouse_click(self, evt): + if not evt.double(): + return + try: + # Get mouse position in image coordinates (independent of zoom) + mouse_point = self.background_item.mapFromScene(evt.scenePos()) - def register_fov(self,x_mm,y_mm): - color = (0,0,255) - if self.sample == 'glass slide': - current_FOV_top_left = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), - round(self.image_height - (self.origin_bottom_left_y + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel)) - current_FOV_bottom_right = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), - round(self.image_height - (self.origin_bottom_left_y + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel)) - else: - current_FOV_top_left = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), - round((self.origin_bottom_left_y + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel)) - current_FOV_bottom_right = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), - round((self.origin_bottom_left_y + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel)) - cv2.rectangle(self.current_image, current_FOV_top_left, current_FOV_bottom_right, color, self.box_line_thickness) - - def register_fov_to_image(self,x_mm,y_mm): - color = (252,174,30) - if self.sample == 'glass slide': - current_FOV_top_left = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), - round(self.image_height - (self.origin_bottom_left_y + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel)) - current_FOV_bottom_right = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), - round(self.image_height - (self.origin_bottom_left_y + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel)) - else: - current_FOV_top_left = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), - round((self.origin_bottom_left_y + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel)) - current_FOV_bottom_right = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), - round((self.origin_bottom_left_y + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel)) - cv2.rectangle(self.current_image, current_FOV_top_left, current_FOV_bottom_right, color, self.box_line_thickness) - - def deregister_fov_to_image(self,x_mm,y_mm): - color = (255,255,255) - if self.sample == 'glass slide': - current_FOV_top_left = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), - round(self.image_height - (self.origin_bottom_left_y + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel)) - current_FOV_bottom_right = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), - round(self.image_height - (self.origin_bottom_left_y + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel)) - else: - current_FOV_top_left = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel - self.fov_size_mm/2/self.mm_per_pixel), - round((self.origin_bottom_left_y + y_mm/self.mm_per_pixel) - self.fov_size_mm/2/self.mm_per_pixel)) - current_FOV_bottom_right = (round(self.origin_bottom_left_x + x_mm/self.mm_per_pixel + self.fov_size_mm/2/self.mm_per_pixel), - round((self.origin_bottom_left_y + y_mm/self.mm_per_pixel) + self.fov_size_mm/2/self.mm_per_pixel)) - cv2.rectangle(self.current_image, current_FOV_top_left, current_FOV_bottom_right, color, self.box_line_thickness) + # Subtract origin offset before converting to mm + x_mm = (mouse_point.x() - self.origin_x_pixel) * self.mm_per_pixel + y_mm = (mouse_point.y() - self.origin_y_pixel) * self.mm_per_pixel + + self.signal_coordinates_clicked.emit(x_mm, y_mm) + + except Exception as e: + print(f"Error processing navigation click: {e}") + return class ImageArrayDisplayWindow(QMainWindow): @@ -2845,7 +3720,7 @@ def __init__(self, window_title=''): self.graphics_widget_1.view = self.graphics_widget_1.addViewBox() self.graphics_widget_1.view.setAspectLocked(True) self.graphics_widget_1.img = pg.ImageItem(border='w') - self.graphics_widget_1.view.addItem(self.graphics_widget_1.img) + self.graphics_widget_1.view.addItem(self.graphics_widget_1.img) self.graphics_widget_1.view.invertY() self.graphics_widget_2 = pg.GraphicsLayoutWidget() @@ -2873,7 +3748,7 @@ def __init__(self, window_title=''): layout.addWidget(self.graphics_widget_1, 0, 0) layout.addWidget(self.graphics_widget_2, 0, 1) layout.addWidget(self.graphics_widget_3, 1, 0) - layout.addWidget(self.graphics_widget_4, 1, 1) + layout.addWidget(self.graphics_widget_4, 1, 1) self.widget.setLayout(layout) self.setCentralWidget(self.widget) @@ -2899,7 +3774,7 @@ def __init__(self,filename="channel_configurations.xml"): self.config_filename = filename self.configurations = [] self.read_configurations() - + def save_configurations(self): self.write_configuration(self.config_filename) @@ -2909,15 +3784,17 @@ def write_configuration(self,filename): def read_configurations(self): if(os.path.isfile(self.config_filename)==False): utils_config.generate_default_configuration(self.config_filename) - self.config_xml_tree = ET.parse(self.config_filename) + print('genenrate default config files') + self.config_xml_tree = etree.parse(self.config_filename) self.config_xml_tree_root = self.config_xml_tree.getroot() self.num_configurations = 0 for mode in self.config_xml_tree_root.iter('mode'): - self.num_configurations = self.num_configurations + 1 + self.num_configurations += 1 self.configurations.append( Configuration( mode_id = mode.get('ID'), name = mode.get('Name'), + color = self.get_channel_color(mode.get('Name')), exposure_time = float(mode.get('ExposureTime')), analog_gain = float(mode.get('AnalogGain')), illumination_source = int(mode.get('IlluminationSource')), @@ -2925,7 +3802,8 @@ def read_configurations(self): camera_sn = mode.get('CameraSN'), z_offset = float(mode.get('ZOffset')), pixel_format = mode.get('PixelFormat'), - _pixel_format_options = mode.get('_PixelFormat_options') + _pixel_format_options = mode.get('_PixelFormat_options'), + emission_filter_position = int(mode.get('EmissionFilterPosition', 1)) ) ) @@ -2941,7 +3819,6 @@ def update_configuration_without_writing(self, configuration_id, attribute_name, mode_to_update.set(attribute_name,str(new_value)) def write_configuration_selected(self,selected_configurations,filename): # to be only used with a throwaway instance - # of this class for conf in self.configurations: self.update_configuration_without_writing(conf.id, "Selected", 0) for conf in selected_configurations: @@ -2950,7 +3827,72 @@ def write_configuration_selected(self,selected_configurations,filename): # to be for conf in selected_configurations: self.update_configuration_without_writing(conf.id, "Selected", 0) -class PlateReaderNavigationController(QObject): + def get_channel_color(self, channel): + channel_info = CHANNEL_COLORS_MAP.get(self.extract_wavelength(channel), {'hex': 0xFFFFFF, 'name': 'gray'}) + return channel_info['hex'] + + def extract_wavelength(self, name): + # Split the string and find the wavelength number immediately after "Fluorescence" + parts = name.split() + if 'Fluorescence' in parts: + index = parts.index('Fluorescence') + 1 + if index < len(parts): + return parts[index].split()[0] # Assuming 'Fluorescence 488 nm Ex' and taking '488' + for color in ['R', 'G', 'B']: + if color in parts or "full_" + color in parts: + return color + return None + + +class ContrastManager: + def __init__(self): + self.contrast_limits = {} + self.acquisition_dtype = None + + def update_limits(self, channel, min_val, max_val): + self.contrast_limits[channel] = (min_val, max_val) + + def get_limits(self, channel, dtype=None): + if dtype is not None: + if self.acquisition_dtype is None: + self.acquisition_dtype = dtype + elif self.acquisition_dtype != dtype: + self.scale_contrast_limits(dtype) + return self.contrast_limits.get(channel, self.get_default_limits()) + + def get_default_limits(self): + if self.acquisition_dtype is None: + return (0, 1) + elif np.issubdtype(self.acquisition_dtype, np.integer): + info = np.iinfo(self.acquisition_dtype) + return (info.min, info.max) + elif np.issubdtype(self.acquisition_dtype, np.floating): + return (0.0, 1.0) + else: + return (0, 1) + + def get_scaled_limits(self, channel, target_dtype): + min_val, max_val = self.get_limits(channel) + if self.acquisition_dtype == target_dtype: + return min_val, max_val + + source_info = np.iinfo(self.acquisition_dtype) + target_info = np.iinfo(target_dtype) + + scaled_min = (min_val - source_info.min) / (source_info.max - source_info.min) * (target_info.max - target_info.min) + target_info.min + scaled_max = (max_val - source_info.min) / (source_info.max - source_info.min) * (target_info.max - target_info.min) + target_info.min + + return scaled_min, scaled_max + + def scale_contrast_limits(self, target_dtype): + print(f"{self.acquisition_dtype} -> {target_dtype}") + for channel in self.contrast_limits.keys(): + self.contrast_limits[channel] = self.get_scaled_limits(channel, target_dtype) + + self.acquisition_dtype = target_dtype + + +class PlateReaderNavigationController(QObject): # Not implemented for Prior stage signal_homing_complete = Signal() signal_current_well = Signal(str) @@ -3065,22 +4007,54 @@ def home_x(self): def home_y(self): self.microcontroller.home_y() + class ScanCoordinates(object): def __init__(self): self.coordinates_mm = [] self.name = [] self.well_selector = None + self.format = WELLPLATE_FORMAT + self.a1_x_mm = A1_X_MM + self.a1_y_mm = A1_Y_MM + self.wellplate_offset_x_mm = WELLPLATE_OFFSET_X_mm + self.wellplate_offset_y_mm = WELLPLATE_OFFSET_Y_mm + self.well_spacing_mm = WELL_SPACING_MM + self.well_size_mm = WELL_SIZE_MM + + def _index_to_row(self,index): + index += 1 + row = "" + while index > 0: + index -= 1 + row = chr(index % 26 + ord('A')) + row + index //= 26 + return row def add_well_selector(self,well_selector): self.well_selector = well_selector + def update_wellplate_settings(self, format_, a1_x_mm, a1_y_mm, a1_x_pixel, a1_y_pixel, size_mm, spacing_mm, number_of_skip): + self.format = format_ + self.a1_x_mm = a1_x_mm + self.a1_y_mm = a1_y_mm + self.a1_x_pixel = a1_x_pixel + self.a1_y_pixel = a1_y_pixel + self.well_size_mm = size_mm + self.well_spacing_mm = spacing_mm + self.number_of_skip = number_of_skip + def get_selected_wells(self): # get selected wells from the widget + print("getting selected wells for acquisition") + if not self.well_selector or self.format == 'glass slide': + return False selected_wells = self.well_selector.get_selected_cells() selected_wells = np.array(selected_wells) # clear the previous selection self.coordinates_mm = [] self.name = [] + if len(selected_wells) == 0: + return False # if no well selected # populate the coordinates rows = np.unique(selected_wells[:,0]) _increasing = True @@ -3091,11 +4065,12 @@ def get_selected_wells(self): if _increasing==False: columns = np.flip(columns) for column in columns: - x_mm = X_MM_384_WELLPLATE_UPPERLEFT + WELL_SIZE_MM_384_WELLPLATE/2 - (A1_X_MM_384_WELLPLATE+WELL_SPACING_MM_384_WELLPLATE*NUMBER_OF_SKIP_384) + column*WELL_SPACING_MM + A1_X_MM + WELLPLATE_OFFSET_X_mm - y_mm = Y_MM_384_WELLPLATE_UPPERLEFT + WELL_SIZE_MM_384_WELLPLATE/2 - (A1_Y_MM_384_WELLPLATE+WELL_SPACING_MM_384_WELLPLATE*NUMBER_OF_SKIP_384) + row*WELL_SPACING_MM + A1_Y_MM + WELLPLATE_OFFSET_Y_mm + x_mm = self.a1_x_mm + column*self.well_spacing_mm + self.wellplate_offset_x_mm + y_mm = self.a1_y_mm + row*self.well_spacing_mm + self.wellplate_offset_y_mm self.coordinates_mm.append((x_mm,y_mm)) - self.name.append(chr(ord('A')+row)+str(column+1)) + self.name.append(self._index_to_row(row)+str(column+1)) _increasing = not _increasing + return len(selected_wells) # if wells selected class LaserAutofocusController(QObject): @@ -3121,7 +4096,7 @@ def __init__(self,microcontroller,camera,liveController,navigationController,has self.has_two_interfaces = has_two_interfaces # e.g. air-glass and glass water, set to false when (1) using oil immersion (2) using 1 mm thick slide (3) using metal coated slide or Si wafer self.use_glass_top = use_glass_top self.spot_spacing_pixels = None # spacing between the spots from the two interfaces (unit: pixel) - + self.look_for_cache = look_for_cache self.image = None # for saving the focus camera image for debugging when centroid cannot be found @@ -3172,7 +4147,7 @@ def initialize_auto(self): # update camera settings self.camera.set_exposure_time(FOCUS_CAMERA_EXPOSURE_TIME_MS) self.camera.set_analog_gain(FOCUS_CAMERA_ANALOG_GAIN) - + # turn on the laser self.microcontroller.turn_on_AF_laser() self.wait_till_operation_is_completed() @@ -3191,7 +4166,7 @@ def initialize_auto(self): # set camera crop self.initialize_manual(x_offset, y_offset, LASER_AF_CROP_WIDTH, LASER_AF_CROP_HEIGHT, 1, x) - # turn on laser + # turn on laser self.microcontroller.turn_on_AF_laser() self.wait_till_operation_is_completed() @@ -3217,12 +4192,13 @@ def initialize_auto(self): self.microcontroller.turn_off_AF_laser() self.wait_till_operation_is_completed() - # calculate the conversion factor - self.pixel_to_um = 6.0/(x1-x0) - print('pixel to um conversion factor is ' + str(self.pixel_to_um) + ' um/pixel') - # for simulation if x1-x0 == 0: + # for simulation self.pixel_to_um = 0.4 + else: + # calculate the conversion factor + self.pixel_to_um = 6.0/(x1-x0) + print('pixel to um conversion factor is ' + str(self.pixel_to_um) + ' um/pixel') # set reference self.x_reference = x1 @@ -3255,7 +4231,6 @@ def initialize_auto(self): print(e) pass - def measure_displacement(self): # turn on the laser self.microcontroller.turn_on_AF_laser() @@ -3272,10 +4247,14 @@ def measure_displacement(self): def move_to_target(self,target_um): current_displacement_um = self.measure_displacement() - um_to_move = target_um - current_displacement_um - # limit the range of movement - um_to_move = min(um_to_move,200) - um_to_move = max(um_to_move,-200) + print("Laser AF displacement: ", current_displacement_um) + + if abs(current_displacement_um) > LASER_AF_RANGE: + print(f'Warning: Measured displacement ({current_displacement_um:.1f} μm) is unreasonably large, using previous z position') + um_to_move = 0 + else: + um_to_move = target_um - current_displacement_um + self.navigationController.move_z(um_to_move/1000) self.wait_till_operation_is_completed() # update the displacement measurement @@ -3355,7 +4334,7 @@ def _get_laser_spot_centroid(self): tmp_y = 0 for i in range(LASER_AF_AVERAGING_N): # send camera trigger - if self.liveController.trigger_mode == TriggerMode.SOFTWARE: + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: self.camera.send_trigger() elif self.liveController.trigger_mode == TriggerMode.HARDWARE: # self.microcontroller.send_hardware_trigger(control_illumination=True,illumination_on_time_us=self.camera.exposure_time*1000) @@ -3377,4 +4356,16 @@ def _get_laser_spot_centroid(self): def wait_till_operation_is_completed(self): while self.microcontroller.is_busy(): time.sleep(SLEEP_TIME_S) - + + def get_image(self): + # turn on the laser + self.microcontroller.turn_on_AF_laser() + self.wait_till_operation_is_completed() + # send trigger, grab image and display image + self.camera.send_trigger() + image = self.camera.read_frame() + self.image_to_display.emit(image) + # turn off the laser + self.microcontroller.turn_off_AF_laser() + self.wait_till_operation_is_completed() + return image diff --git a/software/control/core_PDAF.py b/software/control/core_PDAF.py index 055256027..86f75dfd3 100644 --- a/software/control/core_PDAF.py +++ b/software/control/core_PDAF.py @@ -149,7 +149,7 @@ def set_base_path(self,path): self.base_path = path def start_new_experiment(self,experiment_ID): # @@@ to do: change name to prepare_folder_for_new_experiment # generate unique experiment ID - self.experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d %H-%M-%-S.%f') + self.experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d %H-%M-%S.%f') self.recording_start_time = time.time() # create a new folder try: @@ -280,4 +280,4 @@ def _run_multipoint_single(self): self.navigationController.move_z_usteps(self.deltaZ_usteps) # move z back - self.navigationController.move_z_usteps(-self.deltaZ_usteps*(self.NZ-1)) \ No newline at end of file + self.navigationController.move_z_usteps(-self.deltaZ_usteps*(self.NZ-1)) diff --git a/software/control/core_platereader.py b/software/control/core_platereader.py index 21c3f7d29..7185b5ab6 100644 --- a/software/control/core_platereader.py +++ b/software/control/core_platereader.py @@ -269,7 +269,7 @@ def set_base_path(self,path): def start_new_experiment(self,experiment_ID): # @@@ to do: change name to prepare_folder_for_new_experiment # generate unique experiment ID - self.experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%-S.%f') + self.experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f') self.recording_start_time = time.time() # create a new folder try: diff --git a/software/control/core_usbspectrometer.py b/software/control/core_usbspectrometer.py index e08404fbd..44659bb8b 100644 --- a/software/control/core_usbspectrometer.py +++ b/software/control/core_usbspectrometer.py @@ -138,7 +138,7 @@ def set_recording_time_limit(self,time_limit): def start_new_experiment(self,experiment_ID,add_timestamp=True): if add_timestamp: # generate unique experiment ID - self.experiment_ID = experiment_ID + '_spectrum_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%-S.%f') + self.experiment_ID = experiment_ID + '_spectrum_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f') else: self.experiment_ID = experiment_ID self.recording_start_time = time.time() diff --git a/software/control/dcam.py b/software/control/dcam.py new file mode 100755 index 000000000..bdc06cbb8 --- /dev/null +++ b/software/control/dcam.py @@ -0,0 +1,748 @@ +"""Module to control DCAM on the console. +This imports dcamapi4 and implements functions and classes +Dcamapi and Dcam to make DCAM easier to use. +Dcamapi initializes DCAM only. +The declarations of classes and functions in this file +are subject to change without notice. +""" + +__date__ = '2021-06-30' +__copyright__ = 'Copyright (C) 2021-2024 Hamamatsu Photonics K.K.' + +from control.dcamapi4 import * +# DCAM-API v4 module + +import numpy as np +# pip install numpy +# allocated to receive the image data + + +# ==== DCAMAPI helper functions ==== + +def dcammisc_setupframe(hdcam, bufframe: DCAMBUF_FRAME): + """Setup DCAMBUF_FRAME instance. + Setup DCAMBUF_FRAME instance based on camera setting with hdcam. + + Args: + hdcam (c_void_p): DCAM handle. + bufframe (DCAMBUF_FRAME): Frame information. + + Returns: + DCAMERR: DCAMERR value. + """ + fValue = c_double() + idprop = DCAM_IDPROP.IMAGE_PIXELTYPE + err = dcamprop_getvalue(hdcam, idprop, byref(fValue)) + if not err.is_failed(): + bufframe.type = int(fValue.value) + + idprop = DCAM_IDPROP.IMAGE_WIDTH + err = dcamprop_getvalue(hdcam, idprop, byref(fValue)) + if not err.is_failed(): + bufframe.width = int(fValue.value) + + idprop = DCAM_IDPROP.IMAGE_HEIGHT + err = dcamprop_getvalue(hdcam, idprop, byref(fValue)) + if not err.is_failed(): + bufframe.height = int(fValue.value) + + idprop = DCAM_IDPROP.FRAMEBUNDLE_MODE + err = dcamprop_getvalue(hdcam, idprop, byref(fValue)) + if not err.is_failed() and int(fValue.value) == DCAMPROP.MODE.ON: + idprop = DCAM_IDPROP.FRAMEBUNDLE_ROWBYTES + err = dcamprop_getvalue(hdcam, idprop, byref(fValue)) + if not err.is_failed(): + bufframe.rowbytes = int(fValue.value) + else: + idprop = DCAM_IDPROP.IMAGE_ROWBYTES + err = dcamprop_getvalue(hdcam, idprop, byref(fValue)) + if not err.is_failed(): + bufframe.rowbytes = int(fValue.value) + + return err + + +def dcammisc_alloc_ndarray(frame: DCAMBUF_FRAME, framebundlenum=1): + """Allocate NumPy ndarray. + Allocate NumPy ndarray based on information of DCAMBUF_FRAME. + + Args: + frame (DCAMBUF_FRAME): Frame information. + framebundlenum (int): Frame Bundle number. + + Returns: + NumPy ndarray: NumPy ndarray buffer. + bool: False if failed to allocate NumPy ndarray buffer. + """ + height = frame.height * framebundlenum + + if frame.type == DCAM_PIXELTYPE.MONO16: + return np.zeros((height, frame.width), dtype='uint16') + + if frame.type == DCAM_PIXELTYPE.MONO8: + return np.zeros((height, frame.width), dtype='uint8') + + return False + + +# ==== declare Dcamapi class ==== + + +class Dcamapi: + # class instance + __lasterr = DCAMERR.SUCCESS # the last error from functions with dcamapi_ prefix. + __bInitialized = False # Once Dcamapi.init() is called, then True. Dcamapi.uninit() reset this. + __devicecount = 0 + + @classmethod + def __result(cls, errvalue): + """Keep last error code. + Internal use. Keep last error code. + """ + if errvalue < 0: + cls.__lasterr = errvalue + return False + + return True + + @classmethod + def lasterr(cls): + """Return last error code. + Return last error code of Dcamapi member functions. + """ + return cls.__lasterr + + @classmethod + def init(cls, *initparams): + """Initialize DCAM-API. + Initialize DCAM-API. + Do not call this when Dcam object exists because constructor of Dcam ececute this. + After calling close(), call this again if you need to resume measurement. + + Returns: + bool: True if initialize DCAM-API was succeeded. False if dcamapi_init() returned DCAMERR except SUCCESS. lasterr() returns the DCAMERR value. + """ + if cls.__bInitialized: + return cls.__result(DCAMERR.ALREADYINITIALIZED) # dcamapi_init() is called. New Error. + + paraminit = DCAMAPI_INIT() + err = dcamapi_init(byref(paraminit)) + cls.__bInitialized = True + if cls.__result(err) is False: + return False + + cls.__devicecount = paraminit.iDeviceCount + return True, cls.__devicecount + + @classmethod + def uninit(cls): + """Uninitialize DCAM-API. + Uninitialize DCAM-API. + After using DCAM-API, call this function to close all resources. + + Returns: + bool: True if uninitialize DCAM-API was succeeded. + """ + if cls.__bInitialized: + dcamapi_uninit() + cls.__lasterr = DCAMERR.SUCCESS + cls.__bInitialized = False + cls.__devicecount = 0 + + return True + + @classmethod + def get_devicecount(cls): + """Return number of connected cameras. + Return number of connected cameras. + + Returns: + int: Number of connected cameras. + bool: False if not initialized. + """ + if not cls.__bInitialized: + return False + + return cls.__devicecount + +# ==== Dcam class ==== + + +class Dcam: + def __init__(self, iDevice=0): + self.__lasterr = DCAMERR.SUCCESS + self.__iDevice = iDevice + self.__hdcam = 0 + self.__hdcamwait = 0 + self.__bufframe = DCAMBUF_FRAME() + + def __repr__(self): + return 'Dcam()' + + def __result(self, errvalue): + """Keep last error code. + Internal use. Keep last error code. + """ + if errvalue < 0: + self.__lasterr = errvalue + return False + + return True + + def lasterr(self): + """Return last error code. + Return last error code of Dcam member functions. + """ + return self.__lasterr + + def is_opened(self): + """Check DCAM handle is opened. + Check DCAM handle is opened. + + Returns: + bool: True if DCAM handle is opened. False if DCAM handle is not opened. + """ + if self.__hdcam == 0: + return False + else: + return True + + def dev_open(self, index=-1): + """Get DCAM handle. + Get DCAM handle for controling camera. + After calling close(), call this again if you need to resume measurement. + + Args: + index (int): Device index. + + Returns: + bool: True if get DCAM handle was succeeded. False if dcamdev_open() returned DCAMERR except SUCCESS. lasterr() returns the DCAMERR value. + """ + if self.is_opened(): + return self.__result(DCAMERR.ALREADYOPENED) # instance is already opened. New Error. + + paramopen = DCAMDEV_OPEN() + if index >= 0: + paramopen.index = index + else: + paramopen.index = self.__iDevice + + ret = self.__result(dcamdev_open(byref(paramopen))) + if ret is False: + return False + + self.__hdcam = paramopen.hdcam + return True + + def dev_close(self): + """Close DCAM handle. + Close DCAM handle. + Call this if you need to close the current device. + + Returns: + bool: True if close DCAM handle was succeeded. + """ + if self.is_opened(): + self.__close_hdcamwait() + dcamdev_close(self.__hdcam) + self.__lasterr = DCAMERR.SUCCESS + self.__hdcam = 0 + + return True + + def dev_getstring(self, idstr: DCAM_IDSTR): + """Get string of device. + Get string of device. + + Args: + idstr (DCAM_IDSTR): String id. + + Returns: + string: Device information specified by DCAM_IDSTR. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + if self.is_opened(): + hdcam = self.__hdcam + else: + hdcam = self.__iDevice + + paramdevstr = DCAMDEV_STRING() + paramdevstr.iString = idstr + paramdevstr.alloctext(256) + + ret = self.__result(dcamdev_getstring(hdcam, byref(paramdevstr))) + if ret is False: + return False + + return paramdevstr.text.decode() + + # dcamprop functions + + def prop_getattr(self, idprop: DCAM_IDPROP): + """Get property attribute. + Get property attribute. + + args: + idprop (DCAM_IDPROP): Property id. + + Returns: + DCAMPROP_ATTR: Attribute information of the property. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + propattr = DCAMPROP_ATTR() + propattr.iProp = idprop + ret = self.__result(dcamprop_getattr(self.__hdcam, byref(propattr))) + if ret is False: + return False + + return propattr + + def prop_getvalue(self, idprop: DCAM_IDPROP): + """Get property value. + Get property value. + + args: + idprop (DCAM_IDPROP): Property id. + + Returns: + float: Property value of property id. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + cDouble = c_double() + ret = self.__result(dcamprop_getvalue(self.__hdcam, idprop, byref(cDouble))) + if ret is False: + return False + + return cDouble.value + + def prop_setvalue(self, idprop: DCAM_IDPROP, fValue): + """Set property value. + Set property value. + + args: + idprop (DCAM_IDPROP): Property id. + fValue (float): Setting value. + + Returns: + bool: True if set property value was succeeded. False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + ret = self.__result(dcamprop_setvalue(self.__hdcam, idprop, fValue)) + if ret is False: + return False + + return True + + def prop_setgetvalue(self, idprop: DCAM_IDPROP, fValue, option=0): + """Set and get property value. + Set and get property value. + + args: + idprop (DCAM_IDPROP): Property id. + fValue (float): Input value for setting and receive actual set value by ref. + + Returns: + float: Accurate value set in device. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + cDouble = c_double(fValue) + cOption = c_int32(option) + ret = self.__result(dcamprop_setgetvalue(self.__hdcam, idprop, byref(cDouble), cOption)) + if ret is False: + return False + + return cDouble.value + + def prop_queryvalue(self, idprop: DCAM_IDPROP, fValue, option=0): + """Query property value. + Query property value. + + Args: + idprop (DCAM_IDPROP): Property id. + fValue (float): Value of property. + + Returns: + float: Property value specified by option. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + cDouble = c_double(fValue) + cOption = c_int32(option) + ret = self.__result(dcamprop_queryvalue(self.__hdcam, idprop, byref(cDouble), cOption)) + if ret is False: + return False + + return cDouble.value + + def prop_getnextid(self, idprop: DCAM_IDPROP): + """Get next property id. + Get next property id. + + Args: + idprop (DCAM_IDPROP): Property id. + + Returns: + DCAM_IDPROP: Next property id. + bool: False if no more property or error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + cIdprop = c_int32(idprop) + cOption = c_int32(0) # search next ID + + ret = self.__result(dcamprop_getnextid(self.__hdcam, byref(cIdprop), cOption)) + if ret is False: + return False + + return cIdprop.value + + def prop_getname(self, idprop: DCAM_IDPROP): + """Get name of property. + Get name of property. + + Args: + idprop (DCAM_IDPROP): Property id. + + Returns: + string: Caracter string of property id. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + textbuf = create_string_buffer(256) + ret = self.__result(dcamprop_getname(self.__hdcam, idprop, textbuf, sizeof(textbuf))) + if ret is False: + return False + + return textbuf.value.decode() + + def prop_getvaluetext(self, idprop: DCAM_IDPROP, fValue): + """Get text of property value. + Get text of property value. + + Args: + idprop (DCAM_IDPROP): Property id. + fValue (float): Setting value. + + Returns: + string: Caracter string of property value. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + paramvaluetext = DCAMPROP_VALUETEXT() + paramvaluetext.iProp = idprop + paramvaluetext.value = fValue + paramvaluetext.alloctext(256) + + ret = self.__result(dcamprop_getvaluetext(self.__hdcam, byref(paramvaluetext))) + if ret is False: + return False + + return paramvaluetext.text.decode() + + # dcambuf functions + + def buf_alloc(self, nFrame): + """Alloc DCAM internal buffer. + Alloc DCAM internal buffer. + + Arg: + nFrame (int): Number of frames. + + Returns: + bool: True if buffer is prepared. False if buffer is not prepared. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + cFrame = c_int32(nFrame) + ret = self.__result(dcambuf_alloc(self.__hdcam, cFrame)) + if ret is False: + return False + + return self.__result(dcammisc_setupframe(self.__hdcam, self.__bufframe)) + + def buf_release(self): + """Release DCAM internal buffer. + Release DCAM internal buffer. + + Returns: + bool: True if release DCAM internal buffser was succeeded. False if error happens during releasing buffer. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + cOption = c_int32(0) + return self.__result(dcambuf_release(self.__hdcam, cOption)) + + def buf_getframe(self, iFrame): + """Return DCAMBUF_FRAME instance. + Return DCAMBUF_FRAME instance with image data specified by iFrame. + + Arg: + iFrame (int): Index of target frame. + + Returns: + (aFrame, npBuf): aFrame is DCAMBUF_FRAME, npBuf is NumPy buffer. + bool: False if error happens. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + framebundlenum = 1 + + fValue = c_double() + err = dcamprop_getvalue(self.__hdcam, DCAM_IDPROP.FRAMEBUNDLE_MODE, byref(fValue)) + if not err.is_failed() and int(fValue.value) == DCAMPROP.MODE.ON: + err = dcamprop_getvalue(self.__hdcam, DCAM_IDPROP.FRAMEBUNDLE_NUMBER, byref(fValue)) + if not err.is_failed(): + framebundlenum = int(fValue.value) + else: + return False + + npBuf = dcammisc_alloc_ndarray(self.__bufframe, framebundlenum) + if npBuf is False: + return self.__result(DCAMERR.INVALIDPIXELTYPE) + + aFrame = DCAMBUF_FRAME() + aFrame.iFrame = iFrame + + aFrame.buf = npBuf.ctypes.data_as(c_void_p) + aFrame.rowbytes = self.__bufframe.rowbytes + aFrame.type = self.__bufframe.type + aFrame.width = self.__bufframe.width + aFrame.height = self.__bufframe.height + + ret = self.__result(dcambuf_copyframe(self.__hdcam, byref(aFrame))) + if ret is False: + return False + + return (aFrame, npBuf) + + def buf_getframedata(self, iFrame): + """Return NumPy buffer. + Return NumPy buffer of image data specified by iFrame. + + Arg: + iFrame (int): Index of target frame. + + Returns: + npBuf: NumPy buffer. + bool: False if error happens. lasterr() returns the DCAMERR value. + """ + ret = self.buf_getframe(iFrame) + if ret is False: + return False + + return ret[1] + + def buf_getlastframedata(self): + """Return NumPy buffer of last updated. + Return NumPy buffer of image data of last updated frame. + + Returns: + npBuf: NumPy buffer. + bool: False if error happens. lasterr() returns the DCAMERR value. + """ + return self.buf_getframedata(-1) + + # dcamcap functions + + def cap_start(self, bSequence=True): + """Start capturing. + Start capturing. + + Arg: + bSequence (bool): False means SNAPSHOT, others means SEQUENCE. + + Returns: + bool: True if start capture. False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + if bSequence: + mode = DCAMCAP_START.SEQUENCE + else: + mode = DCAMCAP_START.SNAP + + return self.__result(dcamcap_start(self.__hdcam, mode)) + + def cap_snapshot(self): + """Capture snapshot. + Capture snapshot. Get the frames specified in buf_alloc(). + + Returns: + bool: True if start snapshot. False if error happened. lasterr() returns the DCAMERR value. + """ + return self.cap_start(False) + + def cap_stop(self): + """Stop capturing. + Stop capturing. + + Returns: + bool: True if Stop capture. False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + return self.__result(dcamcap_stop(self.__hdcam)) + + def cap_status(self): + """Get capture status. + Get capture status. + + Returns: + DCAMCAP_STATUS: Current capturing status. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + cStatus = c_int32() + ret = self.__result(dcamcap_status(self.__hdcam, byref(cStatus))) + if ret is False: + return False + + return cStatus.value + + def cap_transferinfo(self): + """Get transfer info. + Get transfer info. + + Returns: + DCAMCAP_TRANSFERINFO: Current image transfer status. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + paramtransferinfo = DCAMCAP_TRANSFERINFO() + ret = self.__result(dcamcap_transferinfo(self.__hdcam, byref(paramtransferinfo))) + if ret is False: + return False + + return paramtransferinfo + + def cap_firetrigger(self): + """Fire software trigger. + Fire software trigger. + + Returns: + bool: True if firing trigger was succeeded. False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.is_opened(): + return self.__result(DCAMERR.INVALIDHANDLE) # instance is not opened yet. + + cOption = c_int32(0) + ret = self.__result(dcamcap_firetrigger(self.__hdcam, cOption)) + if ret is False: + return False + + return True + + + # dcamwait functions + + def __open_hdcamwait(self): + """Get DCAMWAIT handle. + Get DCAMWAIT handle. + + Returns: + bool: True if get DCAMWAIT handle was succeeded. False if error happened. lasterr() returns the DCAMERR value. + """ + if not self.__hdcamwait == 0: + return True + + paramwaitopen = DCAMWAIT_OPEN() + paramwaitopen.hdcam = self.__hdcam + ret = self.__result(dcamwait_open(byref(paramwaitopen))) + if ret is False: + return False + + if paramwaitopen.hwait == 0: + return self.__result(DCAMERR.INVALIDWAITHANDLE) + + self.__hdcamwait = paramwaitopen.hwait + return True + + def __close_hdcamwait(self): + """Close DCAMWAIT handle. + Close DCAMWAIT handle. + + Returns: + bool: True if close DCAMWAIT handle was succeeded. False if error happened. lasterr() returns the DCAMERR value. + """ + + if self.__hdcamwait == 0: + return True + + ret = self.__result(dcamwait_close(self.__hdcamwait)) + if ret is False: + return False + + self.__hdcamwait = 0 + return True + + def wait_event(self, eventmask: DCAMWAIT_CAPEVENT, timeout_millisec): + """Wait specified event. + Wait specified event. + + Arg: + eventmask (DCAMWAIT_CAPEVENT): Event mask to wait. + timeout_millisec (int): Timeout by milliseconds. + + Returns: + DCAMWAIT_CAPEVENT: Happened event. + bool: False if error happened. lasterr() returns the DCAMERR value. + """ + ret = self.__open_hdcamwait() + if ret is False: + return False + + paramwaitstart = DCAMWAIT_START() + paramwaitstart.eventmask = eventmask + paramwaitstart.timeout = timeout_millisec + ret = self.__result(dcamwait_start(self.__hdcamwait, byref(paramwaitstart))) + if ret is False: + return False + + return paramwaitstart.eventhappened + + def wait_capevent_frameready(self, timeout_millisec): + """Wait DCAMWAIT_CAPEVENT.FRAMEREADY event. + Wait DCAMWAIT_CAPEVENT.FRAMEREADY event. + + Arg: + timeout_millisec (int): Timeout by milliseconds. + + Returns: + bool: True if wait capture. False if error happened. lasterr() returns the DCAMERR value. + """ + ret = self.wait_event(DCAMWAIT_CAPEVENT.FRAMEREADY, timeout_millisec) + if ret is False: + return False + + # ret is DCAMWAIT_CAPEVENT.FRAMEREADY + + return True + + diff --git a/software/control/dcamapi4.py b/software/control/dcamapi4.py new file mode 100755 index 000000000..8e5be9d5b --- /dev/null +++ b/software/control/dcamapi4.py @@ -0,0 +1,1369 @@ +# dcamapi4.py : Dec 16, 2022 +# +# Copyright (C) 2021-2022 Hamamatsu Photonics K.K.. All right reserved. + + +import platform +from enum import IntEnum +from ctypes import * + + +# ==== load shared library ==== + +# abosorb platform dependency + +__platform_system = platform.system() +if __platform_system == 'Windows': + __dll = windll.LoadLibrary('dcamapi.dll') +else: # Linux + __dll = cdll.LoadLibrary('/usr/local/lib/libdcamapi.so') + + +# ==== declare constants ==== + +class DCAMERR(IntEnum): + # status error + BUSY = -2147483391 # 0x80000101, API cannot process in busy state. + NOTREADY = -2147483389 # 0x80000103, API requires ready state. + NOTSTABLE = -2147483388 # 0x80000104, API requires stable or unstable state. + UNSTABLE = -2147483387 # 0x80000105, API does not support in unstable state. + NOTBUSY = -2147483385 # 0x80000107, API requires busy state. + EXCLUDED = -2147483376 # 0x80000110, some resource is exclusive and already used + COOLINGTROUBLE = -2147482878 # 0x80000302, something happens near cooler + NOTRIGGER = -2147482877 # 0x80000303, no trigger when necessary. Some camera supports this error. + TEMPERATURE_TROUBLE = -2147482876 # 0x80000304, camera warns its temperature + TOOFREQUENTTRIGGER = -2147482875 # 0x80000305, input too frequent trigger. Some camera supports this error. + # wait error + ABORT = -2147483390 # 0x80000102, abort process + TIMEOUT = -2147483386 # 0x80000106, timeout + LOSTFRAME = -2147482879 # 0x80000301, frame data is lost + MISSINGFRAME_TROUBLE = -2147479802 # 0x80000f06, frame is lost but reason is low lever driver's bug + INVALIDIMAGE = -2147482847 # 0x80000321, hpk format data is invalid data + # initialization error + NORESOURCE = -2147483135 # 0x80000201, not enough resource except memory + NOMEMORY = -2147483133 # 0x80000203, not enough memory + NOMODULE = -2147483132 # 0x80000204, no sub module + NODRIVER = -2147483131 # 0x80000205, no driver + NOCAMERA = -2147483130 # 0x80000206, no camera + NOGRABBER = -2147483129 # 0x80000207, no grabber + NOCOMBINATION = -2147483128 # 0x80000208, no combination on registry + FAILOPEN = -2147479551 # 0x80001001, DEPRECATED + FRAMEGRABBER_NEEDS_FIRMWAREUPDATE = -2147479550 # 0x80001002, need to update frame grabber firmware to use the camera + INVALIDMODULE = -2147483119 # 0x80000211, dcam_init() found invalid module + INVALIDCOMMPORT = -2147483118 # 0x80000212, invalid serial port + FAILOPENBUS = -2130702335 # 0x81001001, the bus or driver are not available + FAILOPENCAMERA = -2113925119 # 0x82001001, camera report error during opening + DEVICEPROBLEM = -2113925118 # 0x82001002, initialization failed(for maico) + # calling error + INVALIDCAMERA = -2147481594 # 0x80000806, invalid camera + INVALIDHANDLE = -2147481593 # 0x80000807, invalid camera handle + INVALIDPARAM = -2147481592 # 0x80000808, invalid parameter + INVALIDVALUE = -2147481567 # 0x80000821, invalid property value + OUTOFRANGE = -2147481566 # 0x80000822, value is out of range + NOTWRITABLE = -2147481565 # 0x80000823, the property is not writable + NOTREADABLE = -2147481564 # 0x80000824, the property is not readable + INVALIDPROPERTYID = -2147481563 # 0x80000825, the property id is invalid + NEWAPIREQUIRED = -2147481562 # 0x80000826, old API cannot present the value because only new API need to be used + WRONGHANDSHAKE = -2147481561 # 0x80000827, this error happens DCAM get error code from camera unexpectedly + NOPROPERTY = -2147481560 # 0x80000828, there is no altenative or influence id, or no more property id + INVALIDCHANNEL = -2147481559 # 0x80000829, the property id specifies channel but channel is invalid + INVALIDVIEW = -2147481558 # 0x8000082a, the property id specifies channel but channel is invalid + INVALIDSUBARRAY = -2147481557 # 0x8000082b, the combination of subarray values are invalid. e.g. DCAM_IDPROP_SUBARRAYHPOS + DCAM_IDPROP_SUBARRAYHSIZE is greater than the number of horizontal pixel of sensor. + ACCESSDENY = -2147481556 # 0x8000082c, the property cannot access during this DCAM STATUS + NOVALUETEXT = -2147481555 # 0x8000082d, the property does not have value text + WRONGPROPERTYVALUE = -2147481554 # 0x8000082e, at least one property value is wrong + DISHARMONY = -2147481552 # 0x80000830, the paired camera does not have same parameter + FRAMEBUNDLESHOULDBEOFF = -2147481550 # 0x80000832, framebundle mode should be OFF under current property settings + INVALIDFRAMEINDEX = -2147481549 # 0x80000833, the frame index is invalid + INVALIDSESSIONINDEX = -2147481548 # 0x80000834, the session index is invalid + NOCORRECTIONDATA = -2147481544 # 0x80000838, not take the dark and shading correction data yet. + CHANNELDEPENDENTVALUE = -2147481543 # 0x80000839, each channel has own property value so can't return overall property value. + VIEWDEPENDENTVALUE = -2147481542 # 0x8000083a, each view has own property value so can't return overall property value. + NODEVICEBUFFER = -2147481541 # 0x8000083b, the frame count is larger than device momory size on using device memory. + REQUIREDSNAP = -2147481540 # 0x8000083c, the capture mode is sequence on using device memory. + LESSSYSTEMMEMORY = -2147481537 # 0x8000083f, the sysmte memory size is too small. PC doesn't have enough memory or is limited memory by 32bit OS. + INVALID_SELECTEDLINES = -2147481534 # 0x80000842, the combination of selected lines values are invalid. e.g. DCAM_IDPROP_SELECTEDLINES_VPOS + DCAM_IDPROP_SELECTEDLINES_VSIZE is greater than the number of vertical lines of sensor. + NOTSUPPORT = -2147479805 # 0x80000f03, camera does not support the function or property with current settings + # camera or bus trouble + FAILREADCAMERA = -2097147902 # 0x83001002, failed to read data from camera + FAILWRITECAMERA = -2097147901 # 0x83001003, failed to write data to the camera + CONFLICTCOMMPORT = -2097147900 # 0x83001004, conflict the com port name user set + OPTICS_UNPLUGGED = -2097147899 # 0x83001005, Optics part is unplugged so please check it. + FAILCALIBRATION = -2097147898 # 0x83001006, fail calibration + MISMATCH_CONFIGURATION = -2097147887 # 0x83001011, mismatch between camera output(connection) and frame grabber specs + # 0x84000100 - 0x840001FF, DCAMERR_INVALIDMEMBER_x + INVALIDMEMBER_3 = -2080374525 # 0x84000103, 3th member variable is invalid value + INVALIDMEMBER_5 = -2080374523 # 0x84000105, 5th member variable is invalid value + INVALIDMEMBER_7 = -2080374521 # 0x84000107, 7th member variable is invalid value + INVALIDMEMBER_8 = -2080374520 # 0x84000108, 7th member variable is invalid value + INVALIDMEMBER_9 = -2080374519 # 0x84000109, 9th member variable is invalid value + FAILEDOPENRECFILE = -2080370687 # 0x84001001, DCAMREC failed to open the file + INVALIDRECHANDLE = -2080370686 # 0x84001002, DCAMREC is invalid handle + FAILEDWRITEDATA = -2080370685 # 0x84001003, DCAMREC failed to write the data + FAILEDREADDATA = -2080370684 # 0x84001004, DCAMREC failed to read the data + NOWRECORDING = -2080370683 # 0x84001005, DCAMREC is recording data now + WRITEFULL = -2080370682 # 0x84001006, DCAMREC writes full frame of the session + ALREADYOCCUPIED = -2080370681 # 0x84001007, DCAMREC handle is already occupied by other HDCAM + TOOLARGEUSERDATASIZE = -2080370680 # 0x84001008, DCAMREC is set the large value to user data size + INVALIDWAITHANDLE = -2080366591 # 0x84002001, DCAMWAIT is invalid handle + NEWRUNTIMEREQUIRED = -2080366590 # 0x84002002, DCAM Module Version is older than the version that the camera requests + VERSIONMISMATCH = -2080366589 # 0x84002003, Camre returns the error on setting parameter to limit version + RUNAS_FACTORYMODE = -2080366588 # 0x84002004, Camera is running as a factory mode + IMAGE_UNKNOWNSIGNATURE = -2080362495 # 0x84003001, sigunature of image header is unknown or corrupted + IMAGE_NEWRUNTIMEREQUIRED = -2080362494 # 0x84003002, version of image header is newer than version that used DCAM supports + IMAGE_ERRORSTATUSEXIST = -2080362493 # 0x84003003, image header stands error status + IMAGE_HEADERCORRUPTED = -2080358396 # 0x84004004, image header value is strange + IMAGE_BROKENCONTENT = -2080358395 # 0x84004005, image content is corrupted + # calling error for DCAM-API 2.1.3 + UNKNOWNMSGID = -2147481599 # 0x80000801, unknown message id + UNKNOWNSTRID = -2147481598 # 0x80000802, unknown string id + UNKNOWNPARAMID = -2147481597 # 0x80000803, unkown parameter id + UNKNOWNBITSTYPE = -2147481596 # 0x80000804, unknown bitmap bits type + UNKNOWNDATATYPE = -2147481595 # 0x80000805, unknown frame data type + # internal error + NONE = 0 # 0, no error, nothing to have done + INSTALLATIONINPROGRESS = -2147479808 # 0x80000f00, installation progress + UNREACH = -2147479807 # 0x80000f01, internal error + UNLOADED = -2147479804 # 0x80000f04, calling after process terminated + THRUADAPTER = -2147479803 # 0x80000f05 + NOCONNECTION = -2147479801 # 0x80000f07, HDCAM lost connection to camera + NOTIMPLEMENT = -2147479806 # 0x80000f02, not yet implementation + DELAYEDFRAME = -2147479799 # 0x80000f09, the frame waiting re-load from hardware buffer with SNAPSHOT of DEVICEBUFFER MODE + DEVICEINITIALIZING = -1342177279 # 0xb0000001 + APIINIT_INITOPTIONBYTES = -1543438333 # 0xa4010003, DCAMAPI_INIT::initoptionbytes is invalid + APIINIT_INITOPTION = -1543438332 # 0xa4010004, DCAMAPI_INIT::initoption is invalid + INITOPTION_COLLISION_BASE = -1543389184 # 0xa401C000 + INITOPTION_COLLISION_MAX = -1543372801 # 0xa401FFFF + # Between DCAMERR_INITOPTION_COLLISION_BASE and DCAMERR_INITOPTION_COLLISION_MAX means there is collision with initoption in DCAMAPI_INIT. + # The value "(error code) - DCAMERR_INITOPTION_COLLISION_BASE" indicates the index which second INITOPTION group happens. + MISSPROP_TRIGGERSOURCE = -535822064 # 0xE0100110, the trigger mode is internal or syncreadout on using device memory. + # success + SUCCESS = 1 # 1, no error, general success code, app should check the value is positive + + + ALREADYINITIALIZED = -520093695 # 0xE1000001 + ALREADYOPENED = -520093694 # 0xE1000002 + INVALIDPIXELTYPE = -520093440 # 0xE1000100 + + SUCCESS_PROP_GETATTR = 88 # dcamprop_getattr() returns this value at SUCCESS + + def is_failed(self): + return True if int(self) < 0 else False + + def is_timeout(self): + return True if int(self) == DCAMERR.TIMEOUT else False + + +class DCAM_PIXELTYPE(IntEnum): + NONE = 0 # no pixeltype specified + MONO8 = 1 # B/W 8 bit + MONO16 = 2 # B/W 16 bit + + +class DCAMCAP_STATUS(IntEnum): + ERROR = 0x0000 + BUSY = 0x0001 + READY = 0x0002 + STABLE = 0x0003 + UNSTABLE = 0x0004 + + +class DCAMWAIT_CAPEVENT(IntEnum): + TRANSFERRED = 0x0001 + FRAMEREADY = 0x0002 + CYCLEEND = 0x0004 + EXPOSUREEND = 0x0008 + STOPPED = 0x0010 + + +class DCAMWAIT_RECEVENT(IntEnum): + STOPPED = 0x0100 + WARNING = 0x0200 + MISSED = 0x0400 + DISKFULL = 0x1000 + WRITEFAULT = 0x2000 + SKIPPED = 0x4000 + + +class DCAMCAP_START(IntEnum): + SEQUENCE = -1 + SNAP = 0 + + +class DCAM_IDSTR(IntEnum): + BUS = 0x04000101 + CAMERAID = 0x04000102 + VENDOR = 0x04000103 + MODEL = 0x04000104 + CAMERAVERSION = 0x04000105 + DRIVERVERSION = 0x04000106 + MODULEVERSION = 0x04000107 + DCAMAPIVERSION = 0x04000108 + CAMERA_SERIESNAME = 0x0400012c + + +class DCAMAPI_INITOPTION(IntEnum): + """ + Initialize parameter. + """ + APIVER__LATEST = 0x00000001 + APIVER__4_0 = 0x00000400 + ENDMARK = 0x00000000 + + +class DCAM_CODEPAGE(IntEnum): + SHIFT_JIS = 932 # Shift JIS + UTF16_LE = 1200 # UTF-16 (Little Endian) + UTF16_BE = 1201 # UTF-16 (Big Endian) + UTF7 = 65000 # UTF-7 translation + UTF8 = 65001 # UTF-8 translation + NONE = 0 + + +class DCAM_IDPROP(IntEnum): + # 0x00000000 - 0x00100000, reserved + # Group: TIMING + TRIGGERSOURCE = 1048848 # 0x00100110, R/W, mode, "TRIGGER SOURCE" + TRIGGERACTIVE = 1048864 # 0x00100120, R/W, mode, "TRIGGER ACTIVE" + TRIGGER_MODE = 1049104 # 0x00100210, R/W, mode, "TRIGGER MODE" + TRIGGERPOLARITY = 1049120 # 0x00100220, R/W, mode, "TRIGGER POLARITY" + TRIGGER_CONNECTOR = 1049136 # 0x00100230, R/W, mode, "TRIGGER CONNECTOR" + TRIGGERTIMES = 1049152 # 0x00100240, R/W, long, "TRIGGER TIMES" + # 0x00100250 is reserved + TRIGGERDELAY = 1049184 # 0x00100260, R/W, sec, "TRIGGER DELAY" + INTERNALTRIGGER_HANDLING = 1049200 # 0x00100270 + TRIGGERMULTIFRAME_COUNT = 1049216 # 0x00100280 + SYNCREADOUT_SYSTEMBLANK = 1049232 # 0x00100290, R/W, mode, "SYNC READOUT SYSTEM BLANK" + TRIGGERENABLE_ACTIVE = 1049616 # 0x00100410, R/W, mode, "TRIGGER ENABLE ACTIVE" + TRIGGERENABLE_POLARITY = 1049632 # 0x00100420 + TRIGGERENABLE_SOURCE = 1049648 # 0x00100430, R/W, mode, "TRIGGER ENABLE SOURCE" + TRIGGERENABLE_BURSTTIMES = 1049664 # 0x00100440, R/W, mode, "TRIGGER ENABLE BURST TIMES" + TRIGGERNUMBER_FORFIRSTIMAGE = 1050640 # 0x00100810, R/O, long, "TRIGGER NUMBER FOR FIRST IMAGE" + TRIGGERNUMBER_FORNEXTIMAGE = 1050656 # 0x00100820, R/O, long, "TRIGGER NUMBER FOR NEXT IMAGE" + NUMBEROF_OUTPUTTRIGGERCONNECTOR = 1835024 # 0x001C0010 + OUTPUTTRIGGER_CHANNELSYNC = 1835056 # 0x001C0030, R/W, mode, "OUTPUT TRIGGER CHANNEL SYNC" + OUTPUTTRIGGER_PROGRAMABLESTART = 1835088 # 0x001C0050, R/W, mode, "OUTPUT TRIGGER PROGRAMABLE START" + OUTPUTTRIGGER_SOURCE = 1835280 # 0x001C0110, R/W, mode, "OUTPUT TRIGGER SOURCE" + OUTPUTTRIGGER_POLARITY = 1835296 # 0x001C0120, R/W, mode, "OUTPUT TRIGGER POLARITY" + OUTPUTTRIGGER_ACTIVE = 1835312 # 0x001C0130, R/W, mode, "OUTPUT TRIGGER ACTIVE" + OUTPUTTRIGGER_DELAY = 1835328 # 0x001C0140, R/W, sec, "OUTPUT TRIGGER DELAY" + OUTPUTTRIGGER_PERIOD = 1835344 # 0x001C0150, R/W, sec, "OUTPUT TRIGGER PERIOD" + OUTPUTTRIGGER_KIND = 1835360 # 0x001C0160, R/W, mode, "OUTPUT TRIGGER KIND" + OUTPUTTRIGGER_BASESENSOR = 1835376 # 0x001C0170, R/W, mode, "OUTPUT TRIGGER BASE SENSOR" + OUTPUTTRIGGER_PREHSYNCCOUNT = 1835408 # 0x001C0190, R/W, mode, "OUTPUT TRIGGER PRE HSYNC COUNT" + # - 0x001C10FF for 16 output trigger connector, reserved + _OUTPUTTRIGGER = 256 # 0x00000100, the offset of ID for Nth OUTPUT TRIGGER parameter + MASTERPULSE_MODE = 1966112 # 0x001E0020, R/W, mode, "MASTER PULSE MODE" + MASTERPULSE_TRIGGERSOURCE = 1966128 # 0x001E0030, R/W, mode, "MASTER PULSE TRIGGER SOURCE" + MASTERPULSE_INTERVAL = 1966144 # 0x001E0040, R/W, sec, "MASTER PULSE INTERVAL" + MASTERPULSE_BURSTTIMES = 1966160 # 0x001E0050, R/W, long, "MASTER PULSE BURST TIMES" + # Group: FEATURE + # exposure period + EXPOSURETIME = 2031888 # 0x001F0110, R/W, sec, "EXPOSURE TIME" + EXPOSURETIME_CONTROL = 2031920 # 0x001F0130, R/W, mode, "EXPOSURE TIME CONTROL" + TRIGGER_FIRSTEXPOSURE = 2032128 # 0x001F0200, R/W, mode, "TRIGGER FIRST EXPOSURE" + TRIGGER_GLOBALEXPOSURE = 2032384 # 0x001F0300, R/W, mode, "TRIGGER GLOBAL EXPOSURE" + FIRSTTRIGGER_BEHAVIOR = 2032400 # 0x001F0310, R/W, mode, "FIRST TRIGGER BEHAVIOR" + MULTIFRAME_EXPOSURE = 2035712 # 0x001F1000, R/W, sec, "MULTI FRAME EXPOSURE TIME" + # - 0x001F1FFF for 256 MULTI FRAME + _MULTIFRAME = 16 # 0x00000010, the offset of ID for Nth MULTIFRAME + # anti-blooming + LIGHTMODE = 2097424 # 0x00200110, R/W, mode, "LIGHT MODE" + # 0x00200120 is reserved + # sensitivity + SENSITIVITYMODE = 2097680 # 0x00200210, R/W, mode, "SENSITIVITY MODE" + SENSITIVITY = 2097696 # 0x00200220, R/W, long, "SENSITIVITY" + DIRECTEMGAIN_MODE = 2097744 # 0x00200250, R/W, mode, "DIRECT EM GAIN MODE" + EMGAINWARNING_STATUS = 2097760 # 0x00200260 + EMGAINWARNING_LEVEL = 2097776 # 0x00200270, R/W, long, "EM GAIN WARNING LEVEL" + EMGAINWARNING_ALARM = 2097792 # 0x00200280, R/W, mode, "EM GAIN WARNING ALARM" + EMGAINPROTECT_MODE = 2097808 # 0x00200290, R/W, mode, "EM GAIN PROTECT MODE" + EMGAINPROTECT_AFTERFRAMES = 2097824 # 0x002002A0, R/W, long, "EM GAIN PROTECT AFTER FRAMES" + MEASURED_SENSITIVITY = 2097840 # 0x002002B0, R/O, real, "MEASURED SENSITIVITY" + PHOTONIMAGINGMODE = 2097904 # 0x002002F0, R/W, mode, "PHOTON IMAGING MODE" + # sensor cooler + SENSORTEMPERATURE = 2097936 # 0x00200310, R/O, celsius,"SENSOR TEMPERATURE" + SENSORCOOLER = 2097952 # 0x00200320, R/W, mode, "SENSOR COOLER" + SENSORTEMPERATURETARGET = 2097968 # 0x00200330, R/W, celsius,"SENSOR TEMPERATURE TARGET" + SENSORCOOLERSTATUS = 2097984 # 0x00200340, R/O, mode, "SENSOR COOLER STATUS" + SENSORCOOLERFAN = 2098000 # 0x00200350, R/W, mode, "SENSOR COOLER FAN" + SENSORTEMPERATURE_AVE = 2098016 # 0x00200360, R/O, celsius,"SENSOR TEMPERATURE AVE" + SENSORTEMPERATURE_MIN = 2098032 # 0x00200370, R/O, celsius,"SENSOR TEMPERATURE MIN" + SENSORTEMPERATURE_MAX = 2098048 # 0x00200380, R/O, celsius,"SENSOR TEMPERATURE MAX" + SENSORTEMPERATURE_STATUS = 2098064 # 0x00200390, R/O, mode, "SENSOR TEMPERATURE STATUS" + SENSORTEMPERATURE_PROTECT = 2098176 # 0x00200400, R/W, mode, "SENSOR TEMPERATURE MODE" + # mechanical shutter + MECHANICALSHUTTER = 2098192 # 0x00200410, R/W, mode, "MECHANICAL SHUTTER" + # contrast enhance + CONTRASTGAIN = 3146016 # 0x00300120, R/W, long, "CONTRAST GAIN" + CONTRASTOFFSET = 3146032 # 0x00300130, R/W, long, "CONTRAST OFFSET" + # 0x00300140 is reserved + HIGHDYNAMICRANGE_MODE = 3146064 # 0x00300150, R/W, mode, "HIGH DYNAMIC RANGE MODE" + DIRECTGAIN_MODE = 3146080 # 0x00300160, R/W, mode, "DIRECT GAIN MODE" + REALTIMEGAINCORRECT_MODE = 3146096 # 0x00300170, R/W, mode, "REALTIME GAIN CORRECT MODE" + REALTIMEGAINCORRECT_LEVEL = 3146112 # 0x00300180, R/W, mode, "REALTIME GAIN CORRECT LEVEL" + REALTIMEGAINCORRECT_INTERVAL = 3146128 # 0x00300190, R/W, mode, "REALTIME GAIN CORRECT INTERVAL" + NUMBEROF_REALTIMEGAINCORRECTREGION = 3146144 # 0x003001A0 + # color features + VIVIDCOLOR = 3146240 # 0x00300200, R/W, mode, "VIVID COLOR" + WHITEBALANCEMODE = 3146256 # 0x00300210, R/W, mode, "WHITEBALANCE MODE" + WHITEBALANCETEMPERATURE = 3146272 # 0x00300220, R/W, color-temp., "WHITEBALANCE TEMPERATURE" + WHITEBALANCEUSERPRESET = 3146288 # 0x00300230, R/W, long, "WHITEBALANCE USER PRESET" + # 0x00300310 is reserved + REALTIMEGAINCORRECTREGION_HPOS = 3149824 # 0x00301000, R/W, long, "REALTIME GAIN CORRECT REGION HPOS" + REALTIMEGAINCORRECTREGION_HSIZE = 3153920 # 0x00302000, R/W, long, "REALTIME GAIN CORRECT REGION HSIZE" + _REALTIMEGAINCORRECTIONREGION = 16 # 0x00000010, the offset of ID for Nth REALTIME GAIN CORRECT REGION parameter + # Group: ALU + # ALU + INTERFRAMEALU_ENABLE = 3670032 # 0x00380010, R/W, mode, "INTERFRAME ALU ENABLE" + RECURSIVEFILTER = 3670288 # 0x00380110, R/W, mode, "RECURSIVE FILTER" + RECURSIVEFILTERFRAMES = 3670304 # 0x00380120 + SPOTNOISEREDUCER = 3670320 # 0x00380130, R/W, mode, "SPOT NOISE REDUCER" + SUBTRACT = 3670544 # 0x00380210, R/W, mode, "SUBTRACT" + SUBTRACTIMAGEMEMORY = 3670560 # 0x00380220, R/W, mode, "SUBTRACT IMAGE MEMORY" + STORESUBTRACTIMAGETOMEMORY = 3670576 # 0x00380230, W/O, mode, "STORE SUBTRACT IMAGE TO MEMORY" + SUBTRACTOFFSET = 3670592 # 0x00380240, R/W, long "SUBTRACT OFFSET" + DARKCALIB_STABLEMAXINTENSITY = 3670608 # 0x00380250, R/W, long, "DARKCALIB STABLE MAX INTENSITY" + SUBTRACT_DATASTATUS = 3670768 # 0x003802F0, R/W mode, "SUBTRACT DATA STATUS" + SHADINGCALIB_DATASTATUS = 3670784 # 0x00380300, R/W mode, "SHADING CALIB DATA STATUS" + SHADINGCORRECTION = 3670800 # 0x00380310, R/W, mode, "SHADING CORRECTION" + SHADINGCALIBDATAMEMORY = 3670816 # 0x00380320, R/W, mode, "SHADING CALIB DATA MEMORY" + STORESHADINGCALIBDATATOMEMORY = 3670832 # 0x00380330, W/O, mode, "STORE SHADING DATA TO MEMORY" + SHADINGCALIB_METHOD = 3670848 # 0x00380340, R/W, mode, "SHADING CALIB METHOD" + SHADINGCALIB_TARGET = 3670864 # 0x00380350, R/W, long, "SHADING CALIB TARGET" + SHADINGCALIB_STABLEMININTENSITY = 3670880 # 0x00380360, R/W, long, "SHADING CALIB STABLE MIN INTENSITY" + SHADINGCALIB_SAMPLES = 3670896 # 0x00380370, R/W, long, "SHADING CALIB SAMPLES" + SHADINGCALIB_STABLESAMPLES = 3670912 # 0x00380380, R/W, long, "SHADING CALIB STABLE SAMPLES" + SHADINGCALIB_STABLEMAXERRORPERCENT = 3670928 # 0x00380390, R/W, long, "SHADING CALIB STABLE MAX ERROR PERCENT" + FRAMEAVERAGINGMODE = 3670944 # 0x003803A0, R/W, mode, "FRAME AVERAGING MODE" + FRAMEAVERAGINGFRAMES = 3670960 # 0x003803B0 + DARKCALIB_STABLESAMPLES = 3670976 # 0x003803C0, R/W, long, "DARKCALIB STABLE SAMPLES" + DARKCALIB_SAMPLES = 3670992 # 0x003803D0, R/W, long, "DARKCALIB SAMPLES" + DARKCALIB_TARGET = 3671008 # 0x003803E0, R/W, long, "DARKCALIB TARGET" + CAPTUREMODE = 3671056 # 0x00380410, R/W, mode, "CAPTURE MODE" + LINEAVERAGING = 3671120 # 0x00380450, R/W, long, "LINE AVERAGING" + IMAGEFILTER = 3671136 # 0x00380460, R/W, mode, "IMAGE FILTER" + INTENSITYLUT_MODE = 3671312 # 0x00380510, R/W, mode, "INTENSITY LUT MODE" + INTENSITYLUT_PAGE = 3671328 # 0x00380520, R/W, long, "INTENSITY LUT PAGE" + INTENSITYLUT_WHITECLIP = 3671344 # 0x00380530, R/W, long, "INTENSITY LUT WHITE CLIP" + INTENSITYLUT_BLACKCLIP = 3671360 # 0x00380540, R/W, long, "INTENSITY LUT BLACK CLIP" + INTENSITY_GAMMA = 3671392 # 0x00380560, R/W, real, "INTENSITY GAMMA" + SENSORGAPCORRECT_MODE = 3671584 # 0x00380620, R/W, long, "SENSOR GAP CORRECT MODE" + ADVANCEDEDGEENHANCEMENT_MODE = 3671600 # 0x00380630, R/W, mode, "ADVANCED EDGE ENHANCEMENT MODE" + ADVANCEDEDGEENHANCEMENT_LEVEL = 3671616 # 0x00380640, R/W, long, "ADVANCED EDGE ENHANCEMENT LEVEL" + SHADINGCALIB_TARGETMIN = 3671680 # 0x00380680, R/W, long, "SHADING CALIB TARGET MIN" + # TAP CALIBRATION + TAPGAINCALIB_METHOD = 3673872 # 0x00380F10, R/W, mode, "TAP GAIN CALIB METHOD" + TAPCALIB_BASEDATAMEMORY = 3673888 # 0x00380F20 + STORETAPCALIBDATATOMEMORY = 3673904 # 0x00380F30 + TAPCALIBDATAMEMORY = 3673920 # 0x00380F40, W/O, mode, "TAP CALIB DATA MEMORY" + NUMBEROF_TAPCALIB = 3674096 # 0x00380FF0, R/W, long, "NUMBER OF TAP CALIB" + TAPCALIB_GAIN = 3674112 # 0x00381000, R/W, mode, "TAP CALIB GAIN" + TAPCALIB_OFFSET = 3678208 # 0x00382000, R/W, mode, "TAP CALIB OFFSET" + _TAPCALIB = 16 # 0x00000010, the offset of ID for Nth TAPCALIB + # Group: READOUT + # readout speed + READOUTSPEED = 4194576 # 0x00400110, R/W, long, "READOUT SPEED" + # 0x00400120 is reserved + READOUT_DIRECTION = 4194608 # 0x00400130, R/W, mode, "READOUT DIRECTION" + READOUT_UNIT = 4194624 # 0x00400140, R/O, mode, "READOUT UNIT" + SHUTTER_MODE = 4194640 # 0x00400150, R/W, mode, "SHUTTER MODE" + # sensor mode + SENSORMODE = 4194832 # 0x00400210, R/W, mode, "SENSOR MODE" + SENSORMODE_LINEBUNDLEHEIGHT = 4194896 # 0x00400250, R/W, long, "SENSOR MODE LINE BUNDLEHEIGHT" + SENSORMODE_PANORAMICSTARTV = 4194944 # 0x00400280, R/W, long, "SENSOR MODE PANORAMIC START V" + SENSORMODE_TDISTAGE = 4194960 # 0x00400290, R/W, long, "SENSOR MODE TDI STAGE" + # other readout mode + CCDMODE = 4195088 # 0x00400310, R/W, mode, "CCD MODE" + EMCCD_CALIBRATIONMODE = 4195104 # 0x00400320, R/W, mode, "EM CCD CALIBRATION MODE" + CMOSMODE = 4195152 # 0x00400350, R/W, mode, "CMOS MODE" + MULTILINESENSOR_READOUTMODE = 4195200 # 0x00400380, R/W, mode, "MULTI LINE SENSOR READOUT MODE" + MULTILINESENSOR_TOP = 4195216 # 0x00400390, R/W, long, "MULTI LINE SENSOR TOP" + MULTILINESENSOR_HEIGHT = 4195232 # 0x004003A0, R/W, long, "MULTI LINE SENSOR HEIGHT" + # output mode + OUTPUT_INTENSITY = 4195344 # 0x00400410, R/W, mode, "OUTPUT INTENSITY" + OUTPUTDATA_OPERATION = 4195392 # 0x00400440, R/W, mode, "OUTPUT DATA OPERATION" + TESTPATTERN_KIND = 4195600 # 0x00400510, R/W, mode, "TEST PATTERN KIND" + TESTPATTERN_OPTION = 4195616 # 0x00400520, R/W, long, "TEST PATTERN OPTION" + EXTRACTION_MODE = 4195872 # 0x00400620 + # Group: ROI + # binning and subarray + BINNING = 4198672 # 0x00401110, R/W, mode, "BINNING" + BINNING_INDEPENDENT = 4198688 # 0x00401120, R/W, mode, "BINNING INDEPENDENT" + BINNING_HORZ = 4198704 # 0x00401130, R/W, long, "BINNING HORZ" + BINNING_VERT = 4198720 # 0x00401140, R/W, long, "BINNING VERT" + SUBARRAYHPOS = 4202768 # 0x00402110, R/W, long, "SUBARRAY HPOS" + SUBARRAYHSIZE = 4202784 # 0x00402120, R/W, long, "SUBARRAY HSIZE" + SUBARRAYVPOS = 4202800 # 0x00402130, R/W, long, "SUBARRAY VPOS" + SUBARRAYVSIZE = 4202816 # 0x00402140, R/W, long, "SUBARRAY VSIZE" + SUBARRAYMODE = 4202832 # 0x00402150, R/W, mode, "SUBARRAY MODE" + DIGITALBINNING_METHOD = 4202848 # 0x00402160, R/W, mode, "DIGITALBINNING METHOD" + DIGITALBINNING_HORZ = 4202864 # 0x00402170, R/W, long, "DIGITALBINNING HORZ" + DIGITALBINNING_VERT = 4202880 # 0x00402180, R/W, long, "DIGITALBINNING VERT" + # Group: TIMING + # synchronous timing + TIMING_READOUTTIME = 4206608 # 0x00403010, R/O, sec, "TIMING READOUT TIME" + TIMING_CYCLICTRIGGERPERIOD = 4206624 # 0x00403020, R/O, sec, "TIMING CYCLIC TRIGGER PERIOD" + TIMING_MINTRIGGERBLANKING = 4206640 # 0x00403030, R/O, sec, "TIMING MINIMUM TRIGGER BLANKING" + # 0x00403040 is reserved + TIMING_MINTRIGGERINTERVAL = 4206672 # 0x00403050, R/O, sec, "TIMING MINIMUM TRIGGER INTERVAL" + TIMING_EXPOSURE = 4206688 # 0x00403060, R/O, mode, "TIMING EXPOSURE" + TIMING_INVALIDEXPOSUREPERIOD = 4206704 # 0x00403070, R/O, sec, "INVALID EXPOSURE PERIOD" + TIMING_FRAMESKIPNUMBER = 4206720 # 0x00403080, R/W, long, "TIMING FRAME SKIP NUMBER" + TIMING_GLOBALEXPOSUREDELAY = 4206736 # 0x00403090, R/O, sec, "TIMING GLOBAL EXPOSURE DELAY" + INTERNALFRAMERATE = 4208656 # 0x00403810, R/W, 1/sec, "INTERNAL FRAME RATE" + INTERNAL_FRAMEINTERVAL = 4208672 # 0x00403820, R/W, sec, "INTERNAL FRAME INTERVAL" + INTERNALLINERATE = 4208688 # 0x00403830, R/W, 1/sec, "INTERNAL LINE RATE" + INTERNALLINESPEED = 4208704 # 0x00403840, R/W, m/sec, "INTERNAL LINE SPEEED" + INTERNAL_LINEINTERVAL = 4208720 # 0x00403850, R/W, sec, "INTERNAL LINE INTERVAL" + INTERNALLINERATE_CONTROL = 4208752 # 0x00403870, R/W, mode, "INTERNAL LINE RATE CONTROL" + # system information + TIMESTAMP_PRODUCER = 4262416 # 0x00410A10, R/O, mode, "TIME STAMP PRODUCER" + FRAMESTAMP_PRODUCER = 4262432 # 0x00410A20, R/O, mode, "FRAME STAMP PRODUCER" + TRANSFERINFO_FRAMECOUNT = 4262672 # 0x00410B10, R/O, long, "TRANSFER INFO FRAME COUNT" + TRANSFERINFO_LOSTCOUNT = 4262673 # 0x00410B11, R/O, long, "TRANSFER INFO LOST COUNT" + # Group: READOUT + # image information + # 0x00420110 is reserved + COLORTYPE = 4325664 # 0x00420120, R/W, mode, "COLORTYPE" + BITSPERCHANNEL = 4325680 # 0x00420130, R/W, long, "BIT PER CHANNEL" + # 0x00420140 is reserved + # 0x00420150 is reserved + NUMBEROF_CHANNEL = 4325760 # 0x00420180, R/O, long, "NUMBER OF CHANNEL" + ACTIVE_CHANNELINDEX = 4325776 # 0x00420190, R/W, mode, "ACTIVE CHANNEL INDEX" + NUMBEROF_VIEW = 4325824 # 0x004201C0, R/O, long, "NUMBER OF VIEW" + ACTIVE_VIEWINDEX = 4325840 # 0x004201D0, R/W, mode, "ACTIVE VIEW INDEX" + IMAGE_WIDTH = 4325904 # 0x00420210, R/O, long, "IMAGE WIDTH" + IMAGE_HEIGHT = 4325920 # 0x00420220, R/O, long, "IMAGE HEIGHT" + IMAGE_ROWBYTES = 4325936 # 0x00420230, R/O, long, "IMAGE ROWBYTES" + IMAGE_FRAMEBYTES = 4325952 # 0x00420240, R/O, long, "IMAGE FRAMEBYTES" + IMAGE_TOPOFFSETBYTES = 4325968 # 0x00420250 + IMAGE_PIXELTYPE = 4326000 # 0x00420270, R/W, DCAM_PIXELTYPE, "IMAGE PIXEL TYPE" + IMAGE_CAMERASTAMP = 4326144 # 0x00420300, R/W, long, "IMAGE CAMERA STAMP" + RECORDFIXEDBYTES_PERFILE = 4326416 # 0x00420410, R/O, long "RECORD FIXED BYTES PER FILE" + RECORDFIXEDBYTES_PERSESSION = 4326432 # 0x00420420 + RECORDFIXEDBYTES_PERFRAME = 4326448 # 0x00420430, R/O, long "RECORD FIXED BYTES PER FRAME" + # frame bundle + FRAMEBUNDLE_MODE = 4329488 # 0x00421010, R/W, mode, "FRAMEBUNDLE MODE" + FRAMEBUNDLE_NUMBER = 4329504 # 0x00421020, R/W, long, "FRAMEBUNDLE NUMBER" + FRAMEBUNDLE_ROWBYTES = 4329520 # 0x00421030, R/O, long, "FRAMEBUNDLE ROWBYTES" + FRAMEBUNDLE_FRAMESTEPBYTES = 4329536 # 0x00421040, R/O, long, "FRAMEBUNDLE FRAME STEP BYTES" + # partial area + NUMBEROF_PARTIALAREA = 4390928 # 0x00430010 + PARTIALAREA_HPOS = 4395008 # 0x00431000, R/W, long, "PARTIAL AREA HPOS" + PARTIALAREA_HSIZE = 4399104 # 0x00432000, R/W, long, "PARTIAL AREA HSIZE" + PARTIALAREA_VPOS = 4403200 # 0x00433000, R/W, long, "PARTIAL AREA VPOS" + PARTIALAREA_VSIZE = 4407296 # 0x00434000, R/W, long, "PARTIAL AREA VSIZE" + _PARTIALAREA = 16 # 0x00000010, the offset of ID for Nth PARTIAL AREA + # multi line + NUMBEROF_MULTILINE = 4517904 # 0x0044F010, R/W, long, "NUMBER OF MULTI LINE" + MULTILINE_VPOS = 4521984 # 0x00450000, R/W, long, "MULTI LINE VPOS" + MULTILINE_VSIZE = 4587520 # 0x00460000, R/W, long, "MULTI LINE VSIZE" + _MULTILINE = 16 # 0x00000010, the offset of ID for Nth MULTI LINE + # defect + DEFECTCORRECT_MODE = 4653072 # 0x00470010, R/W, mode, "DEFECT CORRECT MODE" + NUMBEROF_DEFECTCORRECT = 4653088 # 0x00470020, R/W, long, "NUMBER OF DEFECT CORRECT" + HOTPIXELCORRECT_LEVEL = 4653104 # 0x00470030, R/W, mode, "HOT PIXEL CORRECT LEVEL" + DEFECTCORRECT_HPOS = 4657152 # 0x00471000, R/W, long, "DEFECT CORRECT HPOS" + DEFECTCORRECT_METHOD = 4665344 # 0x00473000, R/W, mode, "DEFECT CORRECT METHOD" + # - 0x0047FFFF for 256 DEFECT + _DEFECTCORRECT = 16 # 0x00000010, the offset of ID for Nth DEFECT + # Group: device buffer countrol + DEVICEBUFFER_MODE = 4784128 # 0x00490000, R/W, mode, "DEVICE BUFFER MODE" + DEVICEBUFFER_FRAMECOUNTMAX = 4784160 # 0x00490020, R/O, long, "DEVICE BUFFER FRAME COUNT MAX" + # Group: CALIBREGION + CALIBREGION_MODE = 4203536 # 0x00402410, R/W, mode, "CALIBRATE REGION MODE" + NUMBEROF_CALIBREGION = 4203552 # 0x00402420 + CALIBREGION_HPOS = 4915200 # 0x004B0000, R/W, long, "CALIBRATE REGION HPOS" + CALIBREGION_HSIZE = 4919296 # 0x004B1000, R/W, long, "CALIBRATE REGION HSIZE" + # - 0x0048FFFF for 256 REGIONs at least + _CALIBREGION = 16 # 0x00000010, the offset of ID for Nth REGION + # Group: MASKREGION + MASKREGION_MODE = 4203792 # 0x00402510, R/W, mode, "MASK REGION MODE" + NUMBEROF_MASKREGION = 4203808 # 0x00402520, R/W, long, "NUMBER OF MASK REGION" + MASKREGION_HPOS = 4980736 # 0x004C0000, R/W, long, "MASK REGION HPOS" + MASKREGION_HSIZE = 4984832 # 0x004C1000, R/W, long, "MASK REGION HSIZE" + # - 0x0048FFFF for 256 REGIONs at least + _MASKREGION = 16 # 0x00000010, the offset of ID for Nth REGION + # Group: Camera Status + CAMERASTATUS_INTENSITY = 5050640 # 0x004D1110, R/O, mode, "CAMERASTATUS INTENSITY" + CAMERASTATUS_INPUTTRIGGER = 5050656 # 0x004D1120 + CAMERASTATUS_CALIBRATION = 5050672 # 0x004D1130, R/O, mode, "CAMERASTATUS CALIBRATION" + # Group: Back Focus Position + BACKFOCUSPOS_TARGET = 8405008 # 0x00804010, R/W, micro-meter,"BACK FOCUS POSITION TARGET" + BACKFOCUSPOS_CURRENT = 8405024 # 0x00804020, R/O, micro-meter,"BACK FOCUS POSITION CURRENT" + BACKFOCUSPOS_LOADFROMMEMORY = 8405072 # 0x00804050 + BACKFOCUSPOS_STORETOMEMORY = 8405088 # 0x00804060, W/O, long, "BACK FOCUS POSITION STORE TO MEMORY" + # Group: MAICO + CONFOCAL_SCANMODE = 9502736 # 0x00910010, R/W, mode, "CONFOCAL SCAN MODE" + CONFOCAL_SCANLINES = 9502752 # 0x00910020, R/W, long, "CONFOCAL SCANLINES" + CONFOCAL_ZOOM = 9502768 # 0x00910030, R/W, long, "CONFOCAL ZOOM" + SUBUNIT_IMAGEWIDTH = 9502944 # 0x009100e0, R/O, long, "SUBUNIT IMAGE WIDTH + NUMBEROF_SUBUNIT = 9502960 # 0x009100f0, R/O, long, "NUMBER OF SUBUNIT" + SUBUNIT_CONTROL = 9502976 # 0x00910100, R/W, mode, "SUBUNIT CONTROL" + SUBUNIT_LASERPOWER = 9503232 # 0x00910200, R/W, long, "SUBUNIT LASERPOWER" + SUBUNIT_PMTGAIN = 9503488 # 0x00910300, R/W, real, "SUBUNIT PMTGAIN" + SUBUNIT_PINHOLESIZE = 9503744 # 0x00910400, R/O, long, "SUBUNIT PINHOLE SIZE" + SUBUNIT_WAVELENGTH = 9504000 # 0x00910500, R/O, long, "SUBUNIT WAVELENGTH" + SUBUNIT_TOPOFFSETBYTES = 9504256 # 0x00910600, R/O, long, "SUBUNIT TOP OFFSET BYTES" + _SUBUNIT = 16 # 0x00000010, the offset of ID for Nth Subunit parameter + # Group: SYSTEM + # system property + SYSTEM_ALIVE = 16711696 # 0x00FF0010, R/O, mode, "SYSTEM ALIVE" + CONVERSIONFACTOR_COEFF = 16769040 # 0x00FFE010, R/O, double, "CONVERSION FACTOR COEFF" + CONVERSIONFACTOR_OFFSET = 16769056 # 0x00FFE020, R/O, double, "CONVERSION FACTOR OFFSET" + + +# ==== declare structures for DCAM-API functions ==== + + +class DCAMAPI_INIT(Structure): + _pack_ = 8 + _fields_ = [ + ('size', c_int32), + ('iDeviceCount', c_int32), # out + ('reserved', c_int32), + ('initoptionbytes', c_int32), + ('initoption', POINTER(c_int32)), + ('guid', c_void_p) # const DCAM_GUID* + ] + + def __init__(self): + self.size = sizeof(DCAMAPI_INIT) + + +class DCAMDEV_OPEN(Structure): + _pack_ = 8 + _fields_ = [ + ('size', c_int32), + ('index', c_int32), + ('hdcam', c_void_p) # out + ] + + def __init__(self): + self.size = sizeof(DCAMDEV_OPEN) + self.index = 0 + + +class DCAMDEV_STRING(Structure): + _pack_ = 8 + _fields_ = [ + ('size', c_int32), + ('iString', c_int32), + ('text', c_char_p), + ('textbytes', c_int32) + ] + + def __init__(self): + self.size = sizeof(DCAMDEV_STRING) + + def alloctext(self, maxlen): + self._textbuf = create_string_buffer(maxlen) + self.text = addressof(self._textbuf) + self.textbytes = sizeof(self._textbuf) + + +class DCAM_PROP: + class ATTR(IntEnum): + """ + Attributer flags. + """ + HASRANGE = -2147483647 # 0x80000000 + HASSTEP = 0x40000000 + HASDEFAULT = 0x20000000 + HASVALUETEXT = 0x10000000 + HASCHANNEL = 0x08000000 + AUTOROUNDING = 0x00800000 + STEPPING_INCONSISTENT = 0x00400000 + DATASTREAM = 0x00200000 + HASRATIO = 0x00100000 + VOLATILE = 0x00080000 + WRITABLE = 0x00020000 + READABLE = 0x00010000 + HASVIEW = 0x00008000 + _SYSTEM = 0x00004000 + ACCESSREADY = 0x00002000 + ACCESSBUSY = 0x00001000 + ADVANCED = 0x00000800 + EFFECTIVE = 0x00000200 + + class TYPE(IntEnum): + """ + property types. + """ + NONE = 0 + MODE = 1 + LONG = 2 + REAL = 3 + MASK = 15 + + class ATTR2(IntEnum): + """ + Attributer2 flags. + """ + ARRAYBASE = 134217728 # 0x08000000 + ARRAYELEMENT = 67108864 # 0x04000000 + REAL32 = 33554432 # 0x02000000 + INITIALIZEIMPROPER = 1 # 0x00000001 + CHANNELSEPARATEDDATA = 262144 # 0x00040000, Channel 0 value is total of each channels. + + + +class DCAMPROP_OPTION(IntEnum): + """ + options + """ + PRIOR = -16777215 # 0xFF000000 + NEXT = 0x01000000 + NEAREST = 0x80000000 + SUPPORT = 0x00000000 + UPDATED = 0x00000001 + VOLATILE = 0x00000002 + ARRAYELEMENT = 0x00000004 + NONE = 0x00000000 + + +class DCAMPROP_UNIT(IntEnum): + """ + Unit of value + """ + SECOND = 1 + CELSIUS = 2 + KELVIN = 3 + METERPERSECOND = 4 + PERSECOND = 5 + DEGREE = 6 + MICROMETER = 7 + NONE = 0 + +# property values + + +class DCAMPROP: + + class SENSORMODE(IntEnum): + AREA = 1 + LINE = 3 + TDI = 4 + TDI_EXTENDED = 10 + PROGRESSIVE = 12 + SPLITVIEW = 14 + DUALLIGHTSHEET = 16 + PHOTONNUMBERRESOLVING = 18 + WHOLELINES = 19 + + class SHUTTER_MODE(IntEnum): + GLOBAL = 1 + ROLLING = 2 + + class READOUTSPEED(IntEnum): + SLOWEST = 1 + FASTEST = 0x7FFFFFFF + + class READOUT_DIRECTION(IntEnum): + FORWARD = 1 + BACKWARD = 2 + BYTRIGGER = 3 + DIVERGE = 5 + FORWARDBIDIRECTION = 6 + REVERSEBIDIRECTION = 7 + + class READOUT_UNIT(IntEnum): + FRAME = 2 + BUNDLEDLINE = 3 + BUNDLEDFRAME = 4 + + class CCDMODE(IntEnum): + NORMALCCD = 1 + EMCCD = 2 + + class CMOSMODE(IntEnum): + NORMAL = 1 + NONDESTRUCTIVE = 2 + + class MULTILINESENSOR_READOUTMODE(IntEnum): + SYNCACCUMULATE = 1 + SYNCAVERAGE = 2 + + class OUTPUT_INTENSITY(IntEnum): + NORMAL = 1 + TESTPATTERN = 2 + + class OUTPUTDATA_OPERATION(IntEnum): + RAW = 1 + ALIGNED = 2 + + class TESTPATTERN_KIND(IntEnum): + FLAT = 2 + IFLAT = 3 + HORZGRADATION = 4 + IHORZGRADATION = 5 + VERTGRADATION = 6 + IVERTGRADATION = 7 + LINE = 8 + ILINE = 9 + DIAGONAL = 10 + IDIAGONAL = 11 + FRAMECOUNT = 12 + + class DIGITALBINNING_METHOD(IntEnum): + MINIMUM = 1 + MAXIMUM = 2 + ODD = 3 + EVEN = 4 + SUM = 5 + AVERAGE = 6 + + class TRIGGERSOURCE(IntEnum): + INTERNAL = 1 + EXTERNAL = 2 + SOFTWARE = 3 + MASTERPULSE = 4 + + class TRIGGERACTIVE(IntEnum): + EDGE = 1 + LEVEL = 2 + SYNCREADOUT = 3 + POINT = 4 + + class BUS_SPEED(IntEnum): + SLOWEST = 1 + FASTEST = 0x7FFFFFFF + + class TRIGGER_MODE(IntEnum): + NORMAL = 1 + PIV = 3 + START = 6 + + class TRIGGERPOLARITY(IntEnum): + NEGATIVE = 1 + POSITIVE = 2 + + class TRIGGER_CONNECTOR(IntEnum): + INTERFACE = 1 + BNC = 2 + MULTI = 3 + + class INTERNALTRIGGER_HANDLING(IntEnum): + SHORTEREXPOSURETIME = 1 + FASTERFRAMERATE = 2 + ABANDONWRONGFRAME = 3 + BURSTMODE = 4 + INDIVIDUALEXPOSURE = 7 + + class SYNCREADOUT_SYSTEMBLANK(IntEnum): + STANDARD = 1 + MINIMUM = 2 + + class TRIGGERENABLE_ACTIVE(IntEnum): + DENY = 1 + ALWAYS = 2 + LEVEL = 3 + START = 4 + BURST = 6 + + class TRIGGERENABLE_SOURCE(IntEnum): + MULTI = 7 + SMA = 8 + + class TRIGGERENABLE_POLARITY(IntEnum): + NEGATIVE = 1 + POSITIVE = 2 + INTERLOCK = 3 + + class OUTPUTTRIGGER_CHANNELSYNC(IntEnum): + _1CHANNEL = 1 + _2CHANNELS = 2 + _3CHANNELS = 3 + + class OUTPUTTRIGGER_PROGRAMABLESTART(IntEnum): + FIRSTEXPOSURE = 1 + FIRSTREADOUT = 2 + + class OUTPUTTRIGGER_SOURCE(IntEnum): + EXPOSURE = 1 + READOUTEND = 2 + VSYNC = 3 + HSYNC = 4 + TRIGGER = 6 + + class OUTPUTTRIGGER_POLARITY(IntEnum): + NEGATIVE = 1 + POSITIVE = 2 + + class OUTPUTTRIGGER_ACTIVE(IntEnum): + EDGE = 1 + LEVEL = 2 + + class OUTPUTTRIGGER_KIND(IntEnum): + LOW = 1 + GLOBALEXPOSURE = 2 + PROGRAMABLE = 3 + TRIGGERREADY = 4 + HIGH = 5 + ANYROWEXPOSURE = 6 + + class OUTPUTTRIGGER_BASESENSOR(IntEnum): + VIEW1 = 1 + VIEW2 = 2 + ANYVIEW = 15 + ALLVIEWS = 16 + + class EXPOSURETIME_CONTROL(IntEnum): + OFF = 1 + NORMAL = 2 + + class TRIGGER_FIRSTEXPOSURE(IntEnum): + NEW = 1 + CURRENT = 2 + + class TRIGGER_GLOBALEXPOSURE(IntEnum): + NONE = 1 + ALWAYS = 2 + DELAYED = 3 + EMULATE = 4 + GLOBALRESET = 5 + + class FIRSTTRIGGER_BEHAVIOR(IntEnum): + STARTEXPOSURE = 1 + STARTREADOUT = 2 + + class MASTERPULSE_MODE(IntEnum): + CONTINUOUS = 1 + START = 2 + BURST = 3 + + class MASTERPULSE_TRIGGERSOURCE(IntEnum): + EXTERNAL = 1 + SOFTWARE = 2 + + class MECHANICALSHUTTER(IntEnum): + AUTO = 1 + CLOSE = 2 + OPEN = 3 + + class LIGHTMODE(IntEnum): + LOWLIGHT = 1 + HIGHLIGHT = 2 + + class SENSITIVITYMODE(IntEnum): + OFF = 1 + ON = 2 + INTERLOCK = 3 + + class EMGAINWARNING_STATUS(IntEnum): + NORMAL = 1 + WARNING = 2 + PROTECTED = 3 + + class PHOTONIMAGINGMODE(IntEnum): + _0 = 0 + _1 = 1 + _2 = 2 + _3 = 3 + + class SENSORCOOLER(IntEnum): + OFF = 1 + ON = 2 + MAX = 4 + + class SENSORTEMPERATURE_STATUS(IntEnum): + NORMAL = 0 + WARNING = 1 + PROTECTION = 2 + + class SENSORCOOLERSTATUS(IntEnum): + ERROR4 = -4 + ERROR3 = -3 + ERROR2 = -2 + ERROR1 = -1 + NONE = 0 + OFF = 1 + READY = 2 + BUSY = 3 + ALWAYS = 4 + WARNING = 5 + + class REALTIMEGAINCORRECT_LEVEL(IntEnum): + _1 = 1 + _2 = 2 + _3 = 3 + _4 = 4 + _5 = 5 + + class WHITEBALANCEMODE(IntEnum): + FLAT = 1 + AUTO = 2 + TEMPERATURE = 3 + USERPRESET = 4 + + class DARKCALIB_TARGET(IntEnum): + ALL = 1 + ANALOG = 2 + + class SHADINGCALIB_METHOD(IntEnum): + AVERAGE = 1 + MAXIMUM = 2 + USETARGET = 3 + + class CAPTUREMODE(IntEnum): + NORMAL = 1 + DARKCALIB = 2 + SHADINGCALIB = 3 + TAPGAINCALIB = 4 + BACKFOCUSCALIB = 5 + + class IMAGEFILTER(IntEnum): + THROUGH = 0 + PATTERN_1 = 1 + + class INTERFRAMEALU_ENABLE(IntEnum): + OFF = 1 + TRIGGERSOURCE_ALL = 2 + TRIGGERSOURCE_INTERNAL = 3 + + class SHADINGCALIB_DATASTATUS(IntEnum): + NONE = 1 + FORWARD = 2 + BACKWARD = 3 + BOTH = 4 + + class TAPGAINCALIB_METHOD(IntEnum): + AVE = 1 + MAX = 2 + MIN = 3 + + class RECURSIVEFILTERFRAMES(IntEnum): + _2 = 2 + _4 = 4 + _8 = 8 + _16 = 16 + _32 = 32 + _64 = 64 + + class INTENSITYLUT_MODE(IntEnum): + THROUGH = 1 + PAGE = 2 + CLIP = 3 + + class BINNING(IntEnum): + _1 = 1 + _2 = 2 + _4 = 4 + _8 = 8 + _16 = 16 + _1_2 = 102 + _2_4 = 204 + + class COLORTYPE(IntEnum): + BW = 0x00000001 + RGB = 0x00000002 + BGR = 0x00000003 + + class BITSPERCHANNEL(IntEnum): + _8 = 8 + _10 = 10 + _12 = 12 + _14 = 14 + _16 = 16 + + class DEFECTCORRECT_MODE(IntEnum): + OFF = 1 + ON = 2 + + class DEFECTCORRECT_METHOD(IntEnum): + CEILING = 3 + PREVIOUS = 4 + NEXT = 5 + + class HOTPIXELCORRECT_LEVEL(IntEnum): + STANDARD = 1 + MINIMUM = 2 + AGGRESSIVE = 3 + + class DEVICEBUFFER_MODE(IntEnum): + THRU = 1 + SNAPSHOT = 2 + SNAPSHOTEX = 6 + + class INTERNALLINERATE_CONTROL(IntEnum): + SYNC_EXPOSURETIME = 1 + PRIORITIZE_LINERATE = 2 + PRIORITIZE_EXPOSURETIME = 3 + + class SYSTEM_ALIVE(IntEnum): + OFFLINE = 1 + ONLINE = 2 + ERROR = 3 + + class TIMESTAMP_MODE(IntEnum): + NONE = 1 + LINEBEFORELEFT = 2 + LINEOVERWRITELEFT = 3 + AREABEFORELEFT = 4 + AREAOVERWRITELEFT = 5 + + class TIMING_EXPOSURE(IntEnum): + AFTERREADOUT = 1 + OVERLAPREADOUT = 2 + ROLLING = 3 + ALWAYS = 4 + TDI = 5 + + class TIMESTAMP_PRODUCER(IntEnum): + NONE = 1 + DCAMMODULE = 2 + KERNELDRIVER = 3 + CAPTUREDEVICE = 4 + IMAGINGDEVICE = 5 + + class FRAMESTAMP_PRODUCER(IntEnum): + NONE = 1 + DCAMMODULE = 2 + KERNELDRIVER = 3 + CAPTUREDEVICE = 4 + IMAGINGDEVICE = 5 + + class CAMERASTATUS_INTENSITY(IntEnum): + GOOD = 1 + TOODARK = 2 + TOOBRIGHT = 3 + UNCARE = 4 + EMGAIN_PROTECTION = 5 + INCONSISTENT_OPTICS = 6 + NODATA = 7 + + class CAMERASTATUS_INPUTTRIGGER(IntEnum): + GOOD = 1 + NONE = 2 + TOOFREQUENT = 3 + + class CAMERASTATUS_CALIBRATION(IntEnum): + DONE = 1 + NOTYET = 2 + NOTRIGGER = 3 + TOOFREQUENTTRIGGER = 4 + OUTOFADJUSTABLERANGE = 5 + UNSUITABLETABLE = 6 + TOODARK = 7 + TOOBRIGHT = 8 + NOTDETECTOBJECT = 9 + + class CONFOCAL_SCANMODE(IntEnum): + SIMULTANEOUS = 1 + SEQUENTIAL = 2 + + class SUBUNIT_CONTROL(IntEnum): + NOTINSTALLED = 0 + OFF = 1 + ON = 2 + + class SUBUNIT_PINHOLESIZE(IntEnum): + ERROR = 1 + SMALL = 2 + MEDIUM = 3 + LARGE = 4 + + class MODE(IntEnum): + OFF = 1 + ON = 2 + +class DCAMPROP_ATTR(Structure): + _pack_ = 8 + _fields_ = [ + ('cbSize', c_int32), + ('iProp', c_int32), + ('option', c_int32), + ('iReserved1', c_int32), + ('attribute', c_int32), + ('iGroup', c_int32), + ('iUnit', c_int32), + ('attribute2', c_int32), + ('valuemin', c_double), + ('valuemax', c_double), + ('valuestep', c_double), + ('valuedefault', c_double), + ('nMaxChannel', c_int32), + ('iReserved3', c_int32), + ('nMaxView', c_int32), + ('iProp_NumberOfElement', c_int32), + ('iProp_ArrayBase', c_int32), + ('iPropStep_Element', c_int32) + ] + + def __init__(self): + self.cbSize = sizeof(DCAMPROP_ATTR) + + def is_effective(self): + return True if self.attribute & DCAM_PROP.ATTR.EFFECTIVE else False + + def is_writable(self): + return True if self.attribute & DCAM_PROP.ATTR.WRITABLE else False + + def is_readable(self): + return True if self.attribute & DCAM_PROP.ATTR.READABLE else False + + def is_volatile(self): + return True if self.attribute & DCAM_PROP.ATTR.VOLATILE else False + + def is_accessready(self): + return True if self.attribute & DCAM_PROP.ATTR.ACCESSREADY else False + + def is_accessbusy(self): + return True if self.attribute & DCAM_PROP.ATTR.ACCESSBUSY else False + + def is_datastream(self): + return True if self.attribute & DCAM_PROP.ATTR.DATASTREAM else False + + def is_autorounding(self): + return True if self.attribute & DCAM_PROP.ATTR.AUTOROUNDING else False + + def is_stepping_inconsistent(self): + return True if self.attribute & DCAM_PROP.ATTR.STEPPING_INCONSISTENT else False + + def is_hasview(self): + return True if self.attribute & DCAM_PROP.ATTR.HASVIEW else False + + def is_haschannel(self): + return True if self.attribute & DCAM_PROP.ATTR.HASCHANNEL else False + + +class DCAMPROP_VALUETEXT(Structure): + _pack_ = 8 + _fields_ = [ + ('cbSize', c_int32), + ('iProp', c_int32), + ('value', c_double), + ('text', c_char_p), + ('textbytes', c_int32) + ] + + def __init__(self): + self.cbSize = sizeof(DCAMPROP_VALUETEXT) + + def alloctext(self, maxlen): + self._textbuf = create_string_buffer(maxlen) + self.text = addressof(self._textbuf) + self.textbytes = sizeof(self._textbuf) + + +class DCAM_TIMESTAMP(Structure): + _pack_ = 8 + _fields_ = [ + ('sec', c_uint32), + ('microsec', c_int32) + ] + + def __init__(self): + self.sec = 0 + self.microsec = 0 + + +class DCAMCAP_TRANSFERINFO(Structure): + _pack_ = 8 + _fields_ = [ + ('size', c_int32), + ('iKind', c_int32), + ('nNewestFrameIndex', c_int32), + ('nFrameCount', c_int32) + ] + + def __init__(self): + self.size = sizeof(DCAMCAP_TRANSFERINFO) + self.iKind = 0 + self.nNewestFrameIndex = -1 + self.nFrameCount = 0 + + +class DCAMBUF_FRAME(Structure): + _pack_ = 8 + _fields_ = [ + ('size', c_int32), + ('iKind', c_int32), + ('option', c_int32), + ('iFrame', c_int32), + ('buf', c_void_p), + ('rowbytes', c_int32), + ('type', c_int32), # DCAM_PIXELTYPE + ('width', c_int32), + ('height', c_int32), + ('left', c_int32), + ('top', c_int32), + ('timestamp', DCAM_TIMESTAMP), + ('framestamp', c_int32), + ('camerastamp', c_int32) + ] + + def __init__(self): + self.size = sizeof(DCAMBUF_FRAME) + self.iKind = 0 + self.option = 0 + self.iFrame = 0 + self.buf = 0 + self.rowbytes = 0 + self.type = DCAM_PIXELTYPE.MONO16 + self.width = 0 + self.height = 0 + self.left = 0 + self.top = 0 + self.timestamp = DCAM_TIMESTAMP() + self.framestamp = 0 + self.camerastamp = 0 + + +class DCAMWAIT_OPEN(Structure): + _pack_ = 8 + _fields_ = [ + ('size', c_int32), + ('supportevent', c_int32), # out + ('hwait', c_void_p), # out + ('hdcam', c_void_p) + ] + + def __init__(self): + self.size = sizeof(DCAMWAIT_OPEN) + + +class DCAMWAIT_START(Structure): + _pack_ = 8 + _fields_ = [ + ('size', c_int32), + ('eventhappened', c_int32), # out + ('eventmask', c_int32), + ('timeout', c_int32) + ] + + def __init__(self): + self.size = sizeof(DCAMWAIT_START) + + +if __platform_system == 'Windows': + class DCAMREC_OPEN(Structure): # DCAMREC_OPENW + _pack_ = 8 + _fields_ = [ + ('size', c_int32), # [in] size of this structure. + ('reserved', c_int32), # [in] + ('hrec', c_void_p), # [out] + ('path', c_wchar_p), # [in] wchar_t* + ('ext', c_wchar_p), # [in] wchar_t* + ('maxframepersession', c_int32), # [in] + ('userdatasize', c_int32), # [in] + ('userdatasize_session', c_int32), # [in] + ('userdatasize_file', c_int32), # [in] + ('usertextsize', c_int32), # [in] + ('usertextsize_session', c_int32), # [in] + ('usertextsize_file', c_int32), # [in] + ] + + def __init__(self): + self.size = sizeof(DCAMREC_OPEN) + # wtextbuf = create_unicode_buffer(256) + # self.path = addressof(wtextbuf) + self.ext = 0 # 'dcimg'.encode('UTF-16') + self.maxframepersession = 0 + self.userdatasize_file = 0 + self.usertextsize_file = 0 + self.userdatasize_session = 0 + self.usertextsize_session = 0 + self.userdatasize = 0 + self.usertextsize = 0 + + def setpath(self, filepath): + self.path = filepath + + +# ==== assign aliases ==== + +dcamapi_init = __dll.dcamapi_init +dcamapi_init.argtypes = [POINTER(DCAMAPI_INIT)] +dcamapi_init.restype = DCAMERR +dcamapi_uninit = __dll.dcamapi_uninit +dcamapi_uninit.restype = DCAMERR +dcamdev_open = __dll.dcamdev_open +dcamdev_open.argtypes = [POINTER(DCAMDEV_OPEN)] +dcamdev_open.restype = DCAMERR +dcamdev_close = __dll.dcamdev_close +dcamdev_close.argtypes = [c_void_p] +dcamdev_close.restype = DCAMERR +dcamdev_getstring = __dll.dcamdev_getstring +dcamdev_getstring.argtypes = [c_void_p, POINTER(DCAMDEV_STRING)] +dcamdev_getstring.restype = DCAMERR +dcamprop_getattr = __dll.dcamprop_getattr +dcamprop_getattr.argtypes = [c_void_p, POINTER(DCAMPROP_ATTR)] +dcamprop_getattr.restype = DCAMERR +dcamprop_getvalue = __dll.dcamprop_getvalue +dcamprop_getvalue.argtypes = [c_void_p, c_int32, POINTER(c_double)] +dcamprop_getvalue.restype = DCAMERR +dcamprop_setvalue = __dll.dcamprop_setvalue +dcamprop_setvalue.argtypes = [c_void_p, c_int32, c_double] +dcamprop_setvalue.restype = DCAMERR +dcamprop_setgetvalue = __dll.dcamprop_setgetvalue +dcamprop_setgetvalue.argtypes = [c_void_p, c_int32, POINTER(c_double), c_int32] +dcamprop_setgetvalue.restype = DCAMERR +dcamprop_queryvalue = __dll.dcamprop_queryvalue +dcamprop_queryvalue.argtypes = [c_void_p, c_int32, POINTER(c_double), c_int32] +dcamprop_queryvalue.restype = DCAMERR +dcamprop_getnextid = __dll.dcamprop_getnextid +dcamprop_getnextid.argtypes = [c_void_p, POINTER(c_int32), c_int32] +dcamprop_getnextid.restype = DCAMERR +dcamprop_getname = __dll.dcamprop_getname +dcamprop_getname.argtypes = [c_void_p, c_int32, c_char_p, c_int32] +dcamprop_getname.restype = DCAMERR +dcamprop_getvaluetext = __dll.dcamprop_getvaluetext +dcamprop_getvaluetext.argtypes = [c_void_p, POINTER(DCAMPROP_VALUETEXT)] +dcamprop_getvaluetext.restype = DCAMERR +dcambuf_alloc = __dll.dcambuf_alloc +dcambuf_alloc.argtypes = [c_void_p, c_int32] +dcambuf_alloc.restype = DCAMERR +dcambuf_release = __dll.dcambuf_release +dcambuf_release.argtypes = [c_void_p, c_int32] +dcambuf_release.restype = DCAMERR +dcambuf_lockframe = __dll.dcambuf_lockframe +dcambuf_lockframe.argtypes = [c_void_p, POINTER(DCAMBUF_FRAME)] +dcambuf_lockframe.restype = DCAMERR +dcambuf_copyframe = __dll.dcambuf_copyframe +dcambuf_copyframe.argtypes = [c_void_p, POINTER(DCAMBUF_FRAME)] +dcambuf_copyframe.restype = DCAMERR +dcamcap_start = __dll.dcamcap_start +dcamcap_start.argtypes = [c_void_p, c_int32] +dcamcap_start.restype = DCAMERR +dcamcap_stop = __dll.dcamcap_stop +dcamcap_stop.argtypes = [c_void_p] +dcamcap_stop.restype = DCAMERR +dcamcap_status = __dll.dcamcap_status +dcamcap_status.argtypes = [c_void_p, POINTER(c_int32)] +dcamcap_status.restype = DCAMERR +dcamcap_transferinfo = __dll.dcamcap_transferinfo +dcamcap_transferinfo.argtypes = [c_void_p, POINTER(DCAMCAP_TRANSFERINFO)] +dcamcap_transferinfo.restype = DCAMERR +dcamcap_firetrigger = __dll.dcamcap_firetrigger +dcamcap_firetrigger.argtypes = [c_void_p, c_int32] +dcamcap_firetrigger.restype = DCAMERR + +if __platform_system == 'Windows': + dcamcap_record = __dll.dcamcap_record + dcamcap_record.argtypes = [c_void_p, c_void_p] + dcamcap_record.restype = DCAMERR + +dcamwait_open = __dll.dcamwait_open +dcamwait_open.argtypes = [POINTER(DCAMWAIT_OPEN)] +dcamwait_open.restype = DCAMERR +dcamwait_close = __dll.dcamwait_close +dcamwait_close.argtypes = [c_void_p] +dcamwait_close.restype = DCAMERR +dcamwait_start = __dll.dcamwait_start +dcamwait_start.argtypes = [c_void_p, POINTER(DCAMWAIT_START)] +dcamwait_start.restype = DCAMERR +dcamwait_abort = __dll.dcamwait_abort +dcamwait_abort.argtypes = [c_void_p] +dcamwait_abort.restype = DCAMERR + +if __platform_system == 'Windows': + dcamrec_open = __dll.dcamrec_openW + dcamrec_open.argtypes = [POINTER(DCAMREC_OPEN)] # DCAMREC_OPENW + dcamrec_open.restype = DCAMERR + + dcamrec_close = __dll.dcamrec_close + dcamrec_close.argtypes = [c_void_p] + dcamrec_close.restype = DCAMERR + diff --git a/software/control/gui.py b/software/control/gui.py index fe932fa0c..249aacb9a 100644 --- a/software/control/gui.py +++ b/software/control/gui.py @@ -14,6 +14,7 @@ import control.core as core import control.microcontroller as microcontroller from control._def import * +import squid.logging import pyqtgraph.dockarea as dock SINGLE_WINDOW = True # set to False if use separate windows for display and control @@ -26,6 +27,8 @@ class OctopiGUI(QMainWindow): def __init__(self, is_simulation = False, *args, **kwargs): super().__init__(*args, **kwargs) + self.log = squid.logging.get_logger(self.__class__.__name__) + # load window if ENABLE_TRACKING: self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True,autoLevels=AUTOLEVEL_DEFAULT_SETTING) @@ -44,7 +47,7 @@ def __init__(self, is_simulation = False, *args, **kwargs): # load objects if is_simulation: self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.microcontroller = microcontroller.Microcontroller_Simulation() + self.microcontroller = microcontroller.Microcontroller(existing_serial=microcontroller.SimSerial()) else: try: self.camera = camera.Camera(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) @@ -52,11 +55,11 @@ def __init__(self, is_simulation = False, *args, **kwargs): except: self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) self.camera.open() - print('! camera not detected, using simulated camera !') + self.log.error("camera not detected, using simulated camera") try: self.microcontroller = microcontroller.Microcontroller(version=CONTROLLER_VERSION) except: - print('! Microcontroller not detected, using simulated microcontroller !') + self.log.error("Microcontroller not detected, using simulated microcontroller") self.microcontroller = microcontroller.Microcontroller_Simulation() # reset the MCU @@ -67,11 +70,13 @@ def __init__(self, is_simulation = False, *args, **kwargs): self.objectiveStore = core.ObjectiveStore() self.configurationManager = core.ConfigurationManager('./channel_configurations.xml') + self.objectiveStore = core.ObjectiveStore(parent=self) # todo: add widget to select/save objective save self.streamHandler = core.StreamHandler(display_resolution_scaling=DEFAULT_DISPLAY_CROP/100) self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller) + self.navigationController = core.NavigationController(self.microcontroller,self.objectiveStore) self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager) + self.scanCoordinates = core.ScanCoordinates() + self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager,scanCoordinates=self.scanCoordinates,parent=self) if ENABLE_TRACKING: self.trackingController = core.TrackingController(self.camera,self.microcontroller,self.navigationController,self.configurationManager,self.liveController,self.autofocusController,self.imageDisplayWindow) self.imageSaver = core.ImageSaver(image_format=Acquisition.IMAGE_FORMAT) @@ -98,12 +103,15 @@ def __init__(self, is_simulation = False, *args, **kwargs): if ENABLE_TRACKING: self.trackingControlWidget = widgets.TrackingControllerWidget(self.trackingController,self.configurationManager,show_configurations=TRACKING_SHOW_MICROSCOPE_CONFIGURATIONS) self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) + self.navigationViewer = core.NavigationViewer(self.objectiveStore, sample=str(6)+' well plate') + self.multiPointWidget2 = widgets.MultiPointWidget2(self.navigationController,self.navigationViewer,self.multipointController,self.configurationManager,scanCoordinates=None) self.recordTabWidget = QTabWidget() if ENABLE_TRACKING: self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") + self.recordTabWidget.addTab(self.multiPointWidget2, "Flexible Multipoint") self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") + # self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") # layout widgets layout = QVBoxLayout() @@ -178,6 +186,21 @@ def __init__(self, is_simulation = False, *args, **kwargs): self.liveControlWidget.update_camera_settings() self.liveControlWidget.signal_autoLevelSetting.connect(self.imageDisplayWindow.set_autolevel) + self.multiPointWidget2.signal_acquisition_started.connect(self.navigationWidget.toggle_navigation_controls) + + if USE_NAPARI_FOR_MULTIPOINT: + self.napariMultiChannelWidget = widgets.NapariMultiChannelWidget(self.objectiveStore) + self.imageDisplayTabs.addTab(self.napariMultiChannelWidget, "Multichannel Acquisition") + self.multiPointWidget.signal_acquisition_channels.connect(self.napariMultiChannelWidget.initChannels) + self.multiPointWidget.signal_acquisition_shape.connect(self.napariMultiChannelWidget.initLayersShape) + if ENABLE_FLEXIBLE_MULTIPOINT: + self.multiPointWidget2.signal_acquisition_channels.connect(self.napariMultiChannelWidget.initChannels) + self.multiPointWidget2.signal_acquisition_shape.connect(self.napariMultiChannelWidget.initLayersShape) + + self.multipointController.napari_layers_init.connect(self.napariMultiChannelWidget.initLayers) + self.multipointController.napari_layers_update.connect(self.napariMultiChannelWidget.updateLayers) + + def closeEvent(self, event): event.accept() # self.softwareTriggerGenerator.stop() @@@ => diff --git a/software/control/gui_2cameras_async.py b/software/control/gui_2cameras_async.py deleted file mode 100644 index 2ebe8689b..000000000 --- a/software/control/gui_2cameras_async.py +++ /dev/null @@ -1,139 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy -from pathlib import Path - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - self.microcontroller = microcontroller.Microcontroller_Simulation() - self.navigationController = core.NavigationController(self.microcontroller) - - self.camera_1 = camera.Camera_Simulation(sn='FW0190110139') # tracking - self.camera_2 = camera.Camera_Simulation(sn='FU0190090030') # fluorescence - - self.configurationManager_1 = core.ConfigurationManager(filename=str(Path.home()) + "/configurations_tracking.xml") - self.configurationManager_2 = core.ConfigurationManager(filename=str(Path.home()) + "/configurations_fluorescence.xml") - - self.streamHandler_1 = core.StreamHandler() - self.liveController_1 = core.LiveController(self.camera_1,self.microcontroller,self.configurationManager_1,control_illumination=False) - #self.autofocusControlle_1 = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - #self.multipointController_1 = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager) - self.imageSaver_1 = core.ImageSaver() - - self.streamHandler_2 = core.StreamHandler() - self.liveController_2 = core.LiveController(self.camera_2,self.microcontroller,self.configurationManager_2,control_illumination=True) - self.autofocusController_2 = core.AutoFocusController(self.camera_2,self.navigationController,self.liveController_2) - self.multipointController_2 = core.MultiPointController(self.camera_2,self.navigationController,self.liveController_2,self.autofocusController_2,self.configurationManager_2) - self.imageSaver_2 = core.ImageSaver() - - self.trackingController = core.TrackingController(self.microcontroller,self.navigationController) - - # open the camera - # camera start streaming - self.camera_1.open() - self.camera_1.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_1.set_callback(self.streamHandler_1.on_new_frame) - self.camera_1.enable_callback() - - self.camera_2.open() - self.camera_2.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_2.set_callback(self.streamHandler_2.on_new_frame) - self.camera_2.enable_callback() - - # load widgets - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - - self.cameraSettingWidget_1 = widgets.CameraSettingsWidget(self.camera_1,self.liveController_1) - self.liveControlWidget_1 = widgets.LiveControlWidget(self.streamHandler_1,self.liveController_1,self.configurationManager_1) - self.recordingControlWidget_1 = widgets.RecordingWidget(self.streamHandler_1,self.imageSaver_1) - #self.trackingControlWidget = widgets.TrackingControllerWidget(self.streamHandler_1,self.trackingController) - - self.cameraSettingWidget_2 = widgets.CameraSettingsWidget(self.camera_2,self.liveController_2) - self.liveControlWidget_2 = widgets.LiveControlWidget(self.streamHandler_2,self.liveController_2,self.configurationManager_2) - #self.recordingControlWidget_2 = widgets.RecordingWidget(self.streamHandler_2,self.imageSaver_2) - self.multiPointWidget_2 = widgets.MultiPointWidget(self.multipointController_2,self.configurationManager_2) - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget_1,0,0) - layout.addWidget(self.liveControlWidget_1,1,0) - layout.addWidget(self.navigationWidget,2,0) - #layout.addWidget(self.autofocusWidget,3,0) - layout.addWidget(self.recordingControlWidget_1,4,0) - - layout.addWidget(self.cameraSettingWidget_2,5,0) - layout.addWidget(self.liveControlWidget_2,6,0) - #layout.addWidget(self.recordingControlWidget_2,7,0) - layout.addWidget(self.multiPointWidget_2,8,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow_1 = core.ImageDisplayWindow('Tracking') - self.imageDisplayWindow_1.show() - self.imageDisplayWindow_2 = core.ImageDisplayWindow('Fluorescence') - self.imageDisplayWindow_2.show() - self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow('Multi-channel') - self.imageArrayDisplayWindow.show() - - # make connections - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - - self.streamHandler_1.signal_new_frame_received.connect(self.liveController_1.on_new_frame) - self.streamHandler_1.image_to_display.connect(self.imageDisplayWindow_1.display_image) - self.streamHandler_1.packet_image_to_write.connect(self.imageSaver_1.enqueue) - #self.streamHandler_1.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - - self.liveControlWidget_1.signal_newExposureTime.connect(self.cameraSettingWidget_1.set_exposure_time) - self.liveControlWidget_1.signal_newAnalogGain.connect(self.cameraSettingWidget_1.set_analog_gain) - self.liveControlWidget_1.update_camera_settings() - - self.streamHandler_2.signal_new_frame_received.connect(self.liveController_2.on_new_frame) - self.streamHandler_2.image_to_display.connect(self.imageDisplayWindow_2.display_image) - self.streamHandler_2.packet_image_to_write.connect(self.imageSaver_2.enqueue) - - self.liveControlWidget_2.signal_newExposureTime.connect(self.cameraSettingWidget_2.set_exposure_time) - self.liveControlWidget_2.signal_newAnalogGain.connect(self.cameraSettingWidget_2.set_analog_gain) - self.liveControlWidget_2.update_camera_settings() - - self.multipointController_2.image_to_display.connect(self.imageDisplayWindow_2.display_image) - self.multipointController_2.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) - self.multipointController_2.signal_current_configuration.connect(self.liveControlWidget_2.set_microscope_mode) - - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.liveController_1.stop_live() - self.camera_1.close() - self.imageSaver_1.close() - self.imageDisplayWindow_1.close() - self.liveController_2.stop_live() - self.camera_2.close() - self.imageSaver_2.close() - self.imageDisplayWindow_2.close() - self.imageArrayDisplayWindow.close() diff --git a/software/control/gui_2cameras_async_focus_tracking.py b/software/control/gui_2cameras_async_focus_tracking.py deleted file mode 100644 index 7ee0741fb..000000000 --- a/software/control/gui_2cameras_async_focus_tracking.py +++ /dev/null @@ -1,124 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.core_PDAF as core_PDAF -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - self.camera_1 = camera.Camera_Simulation(sn='FW0190110139') - self.camera_2 = camera.Camera_Simulation(sn='FU0190090030') - self.microcontroller = microcontroller.Microcontroller_Simulation() - - self.PDAFController = core_PDAF.PDAFController() - - self.streamHandler_1 = core.StreamHandler() - self.streamHandler_2 = core.StreamHandler() - self.liveController_1 = core.LiveController(self.camera_1,self.microcontroller) - self.liveController_2 = core.LiveController(self.camera_2,self.microcontroller) - self.navigationController = core.NavigationController(self.microcontroller) - self.autofocusController = core.AutoFocusController(self.camera_1,self.navigationController,self.liveController_1) - self.trackingController = core.TrackingController(self.microcontroller,self.navigationController) - self.imageSaver_1 = core.ImageSaver() - self.imageSaver_2 = core.ImageSaver() - self.imageDisplay_1 = core.ImageDisplay() - self.imageDisplay_2 = core.ImageDisplay() - - # open the cameras - self.camera_1.open() - self.camera_1.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_1.set_callback(self.streamHandler_1.on_new_frame) - self.camera_1.enable_callback() - - self.camera_2.open() - self.camera_2.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_2.set_callback(self.streamHandler_2.on_new_frame) - self.camera_2.enable_callback() - - # load widgets - self.cameraSettingWidget_1 = widgets.CameraSettingsWidget(self.camera_1,self.liveController_1) - self.liveControlWidget_1 = widgets.LiveControlWidget(self.streamHandler_1,self.liveController_1) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget_1 = widgets.RecordingWidget(self.streamHandler_1,self.imageSaver_1) - self.trackingControlWidget = widgets.TrackingControllerWidget(self.streamHandler_1,self.trackingController) - - self.cameraSettingWidget_2 = widgets.CameraSettingsWidget(self.camera_2,self.liveController_2) - self.liveControlWidget_2 = widgets.LiveControlWidget(self.streamHandler_2,self.liveController_2) - self.recordingControlWidget_2 = widgets.RecordingWidget(self.streamHandler_2,self.imageSaver_2) - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - # layout.addWidget(self.cameraSettingWidget_1,0,0) - layout.addWidget(self.liveControlWidget_1,1,0) - # layout.addWidget(self.navigationWidget,2,0) - # layout.addWidget(self.autofocusWidget,3,0) - # layout.addWidget(self.recordingControlWidget_1,4,0) - - # layout.addWidget(self.cameraSettingWidget_2,5,0) - layout.addWidget(self.liveControlWidget_2,6,0) - # layout.addWidget(self.recordingControlWidget_2,7,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow_1 = core.ImageDisplayWindow() - self.imageDisplayWindow_1.show() - self.imageDisplayWindow_2 = core.ImageDisplayWindow() - self.imageDisplayWindow_2.show() - - # make connections - self.streamHandler_1.signal_new_frame_received.connect(self.liveController_1.on_new_frame) - self.streamHandler_1.image_to_display.connect(self.imageDisplay_1.enqueue) - self.streamHandler_1.packet_image_to_write.connect(self.imageSaver_1.enqueue) - self.streamHandler_1.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay_1.image_to_display.connect(self.imageDisplayWindow_1.display_image) # may connect streamHandler directly to imageDisplayWindow - - self.streamHandler_2.signal_new_frame_received.connect(self.liveController_2.on_new_frame) - self.streamHandler_2.image_to_display.connect(self.imageDisplay_2.enqueue) - self.streamHandler_2.packet_image_to_write.connect(self.imageSaver_2.enqueue) - self.imageDisplay_2.image_to_display.connect(self.imageDisplayWindow_2.display_image) # may connect streamHandler directly to imageDisplayWindow - - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow_1.display_image) - - self.streamHandler_1.image_to_display.connect(self.PDAFController.register_image_from_camera_1) - self.streamHandler_2.image_to_display.connect(self.PDAFController.register_image_from_camera_2) - - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.liveController_1.stop_live() - self.camera_1.close() - self.imageSaver_1.close() - self.imageDisplay_1.close() - self.imageDisplayWindow_1.close() - self.liveController_2.stop_live() - self.camera_2.close() - self.imageSaver_2.close() - self.imageDisplay_2.close() - self.imageDisplayWindow_2.close() diff --git a/software/control/gui_2cameras_daheng_tis.py b/software/control/gui_2cameras_daheng_tis.py deleted file mode 100644 index 150fdacc6..000000000 --- a/software/control/gui_2cameras_daheng_tis.py +++ /dev/null @@ -1,127 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.camera_TIS as camera_tis -import control.core as core -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - self.camera_1 = camera.Camera() - self.camera_2 = camera_tis.Camera(sn=48910098) - self.microcontroller = microcontroller.Microcontroller_Simulation() - - self.streamHandler_1 = core.StreamHandler() - self.streamHandler_2 = core.StreamHandler() - self.liveController_1 = core.LiveController(self.camera_1,self.microcontroller) - self.liveController_2 = core.LiveController(self.camera_2,self.microcontroller) - self.navigationController = core.NavigationController(self.microcontroller) - self.autofocusController = core.AutoFocusController(self.camera_1,self.navigationController,self.liveController_1) - self.trackingController = core.TrackingController(self.microcontroller,self.navigationController) - self.imageSaver_1 = core.ImageSaver() - self.imageSaver_2 = core.ImageSaver() - self.imageDisplay_1 = core.ImageDisplay() - self.imageDisplay_2 = core.ImageDisplay() - - ''' - # thread - self.thread_multiPoint = QThread() - self.thread_multiPoint.start() - self.multipointController.moveToThread(self.thread_multiPoint) - ''' - - # open the camera - # camera start streaming - self.camera_1.open() - self.camera_1.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_1.set_callback(self.streamHandler_1.on_new_frame) - self.camera_1.enable_callback() - - self.camera_2.open() - self.camera_2.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_2.set_callback(self.streamHandler_2.on_new_frame) - self.camera_2.enable_callback() - - # load widgets - self.cameraSettingWidget_1 = widgets.CameraSettingsWidget(self.camera_1,self.liveController_1) - self.liveControlWidget_1 = widgets.LiveControlWidget(self.streamHandler_1,self.liveController_1) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget_1 = widgets.RecordingWidget(self.streamHandler_1,self.imageSaver_1) - self.trackingControlWidget = widgets.TrackingControllerWidget(self.streamHandler_1,self.trackingController) - - self.cameraSettingWidget_2 = widgets.CameraSettingsWidget(self.camera_2,self.liveController_2) - self.liveControlWidget_2 = widgets.LiveControlWidget(self.streamHandler_2,self.liveController_2) - self.recordingControlWidget_2 = widgets.RecordingWidget(self.streamHandler_2,self.imageSaver_2) - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget_1,0,0) - layout.addWidget(self.liveControlWidget_1,1,0) - layout.addWidget(self.navigationWidget,2,0) - layout.addWidget(self.autofocusWidget,3,0) - layout.addWidget(self.recordingControlWidget_1,4,0) - - layout.addWidget(self.cameraSettingWidget_2,5,0) - layout.addWidget(self.liveControlWidget_2,6,0) - layout.addWidget(self.recordingControlWidget_2,7,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow_1 = core.ImageDisplayWindow() - self.imageDisplayWindow_1.show() - self.imageDisplayWindow_2 = core.ImageDisplayWindow() - self.imageDisplayWindow_2.show() - - # make connections - self.streamHandler_1.signal_new_frame_received.connect(self.liveController_1.on_new_frame) - self.streamHandler_1.image_to_display.connect(self.imageDisplay_1.enqueue) - self.streamHandler_1.packet_image_to_write.connect(self.imageSaver_1.enqueue) - self.streamHandler_1.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay_1.image_to_display.connect(self.imageDisplayWindow_1.display_image) # may connect streamHandler directly to imageDisplayWindow - - self.streamHandler_2.signal_new_frame_received.connect(self.liveController_2.on_new_frame) - self.streamHandler_2.image_to_display.connect(self.imageDisplay_2.enqueue) - self.streamHandler_2.packet_image_to_write.connect(self.imageSaver_2.enqueue) - self.imageDisplay_2.image_to_display.connect(self.imageDisplayWindow_2.display_image) # may connect streamHandler directly to imageDisplayWindow - - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow_1.display_image) - - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.liveController_1.stop_live() - self.camera_1.close() - self.imageSaver_1.close() - self.imageDisplay_1.close() - self.imageDisplayWindow_1.close() - self.liveController_2.stop_live() - self.camera_2.close() - self.imageSaver_2.close() - self.imageDisplay_2.close() - self.imageDisplayWindow_2.close() diff --git a/software/control/gui_2cameras_sync.py b/software/control/gui_2cameras_sync.py deleted file mode 100644 index e6bde8fe8..000000000 --- a/software/control/gui_2cameras_sync.py +++ /dev/null @@ -1,164 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.microcontroller as microcontroller -import control.microcontroller2 as microcontroller2 -from control._def import * - -import pyqtgraph.dockarea as dock -SINGLE_WINDOW = True # set to False if use separate windows for display and control - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, is_simulation = False, *args, **kwargs): - super().__init__(*args, **kwargs) - - channels = ['ch 1','ch 2'] - self.channels = channels - - self.imageDisplayWindow = {} - for i in range(len(channels)): - self.imageDisplayWindow[channels[i]] = core.ImageDisplayWindow(draw_crosshairs=True) - - # load objects - self.camera = {} - if is_simulation: - for i in range(len(channels)): - self.camera[channels[i]] = camera.Camera_Simulation(sn=CAMERA_SN[channels[i]],is_global_shutter=True,rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.microcontroller = microcontroller.Microcontroller_Simulation() - self.microcontroller2 = microcontroller2.Microcontroller2_Simulation() - else: - for i in range(len(channels)): - self.camera[channels[i]] = camera.Camera(sn=CAMERA_SN[channels[i]],is_global_shutter=True,rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.microcontroller = microcontroller.Microcontroller_Simulation() - self.microcontroller2 = microcontroller2.Microcontroller2() - - # open the camera - for i in range(len(channels)): - self.camera[channels[i]].open() - self.camera[channels[i]].set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - - # configure the actuators - self.microcontroller.configure_actuators() - - # navigation controller and widget - self.navigationController = core.NavigationController(self.microcontroller) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - - self.configurationManager = {} - self.streamHandler = {} - self.liveController = {} - self.imageSaver = {} - - self.cameraSettingWidget = {} - self.liveControlWidget = {} - self.cameraTabWidget = QTabWidget() - - for i in range(len(channels)): - # controllers - self.configurationManager[channels[i]] = core.ConfigurationManager(filename=str(Path.home()) + "/configurations_" + channels[i] + ".xml") - self.streamHandler[channels[i]] = core.StreamHandler(display_resolution_scaling=DEFAULT_DISPLAY_CROP/100) - self.liveController[channels[i]] = core.LiveController(self.camera[channels[i]],self.microcontroller,self.configurationManager[channels[i]],use_internal_timer_for_hardware_trigger=False) - self.imageSaver[channels[i]] = core.ImageSaver(image_format=Acquisition.IMAGE_FORMAT) - # widgets - self.cameraSettingWidget[channels[i]] = widgets.CameraSettingsWidget(self.camera[channels[i]],include_gain_exposure_time=False) - self.liveControlWidget[channels[i]] = widgets.LiveControlWidget(self.streamHandler[channels[i]],self.liveController[channels[i]],self.configurationManager[channels[i]]) - # self.recordingControlWidget[channels[i]] = widgets.RecordingWidget(self.streamHandler[channels[i]],self.imageSaver[channels[i]]) - self.cameraTabWidget.addTab(self.liveControlWidget[channels[i]], channels[i]) - # self.liveControlWidget[channels[i]].setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum) - # self.liveControlWidget[channels[i]].resize(self.liveControlWidget[channels[i]].minimumSizeHint()) - # self.liveControlWidget[channels[i]].adjustSize() - self.cameraTabWidget.resize(self.cameraTabWidget.minimumSizeHint()) - self.cameraTabWidget.adjustSize() - - # self.recordTabWidget = QTabWidget() - # for i in range(len(channels)): - # self.recordTabWidget.addTab(self.recordingControlWidget[channels[i]], "Simple Recording") - self.multiCameraRecordingWidget = widgets.MultiCameraRecordingWidget(self.streamHandler,self.imageSaver,self.channels) - - # trigger control - self.triggerControlWidget = widgets.TriggerControlWidget(self.microcontroller2) - - # layout widgets - layout = QVBoxLayout() #layout = QStackedLayout() - # layout.addWidget(self.cameraSettingWidget) - layout.addWidget(self.cameraTabWidget) - layout.addWidget(self.triggerControlWidget) - layout.addWidget(self.multiCameraRecordingWidget) - # layout.addWidget(self.navigationWidget) - # layout.addWidget(self.recordTabWidget) - layout.addStretch() - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - # self.centralWidget.setFixedSize(self.centralWidget.minimumSize()) - # self.centralWidget.setFixedWidth(self.centralWidget.minimumWidth()) - # self.centralWidget.setMaximumWidth(self.centralWidget.minimumWidth()) - self.centralWidget.setFixedWidth(self.centralWidget.minimumSizeHint().width()) - - dock_display = {} - for i in range(len(channels)): - dock_display[channels[i]] = dock.Dock('Image Display ' + channels[i] , autoOrientation = False) - dock_display[channels[i]].showTitleBar() - dock_display[channels[i]].addWidget(self.imageDisplayWindow[channels[i]].widget) - dock_display[channels[i]].setStretch(x=100,y=None) - dock_controlPanel = dock.Dock('Controls', autoOrientation = False) - # dock_controlPanel.showTitleBar() - dock_controlPanel.addWidget(self.centralWidget) - dock_controlPanel.setStretch(x=1,y=None) - dock_controlPanel.setFixedWidth(dock_controlPanel.minimumSizeHint().width()) - main_dockArea = dock.DockArea() - for i in range(len(channels)): - if i == 0: - main_dockArea.addDock(dock_display[channels[i]]) - else: - main_dockArea.addDock(dock_display[channels[i]],'right') - main_dockArea.addDock(dock_controlPanel,'right') - self.setCentralWidget(main_dockArea) - desktopWidget = QDesktopWidget() - height_min = 0.9*desktopWidget.height() - width_min = 0.96*desktopWidget.width() - self.setMinimumSize(width_min,height_min) - - # make connections - for i in range(len(channels)): - self.streamHandler[channels[i]].signal_new_frame_received.connect(self.liveController[channels[i]].on_new_frame) - self.streamHandler[channels[i]].image_to_display.connect(self.imageDisplayWindow[channels[i]].display_image) - self.streamHandler[channels[i]].packet_image_to_write.connect(self.imageSaver[channels[i]].enqueue) - self.liveControlWidget[channels[i]].signal_newExposureTime.connect(self.cameraSettingWidget[channels[i]].set_exposure_time) - self.liveControlWidget[channels[i]].signal_newAnalogGain.connect(self.cameraSettingWidget[channels[i]].set_analog_gain) - self.liveControlWidget[channels[i]].update_camera_settings() - self.triggerControlWidget.signal_toggle_live.connect(self.liveControlWidget[channels[i]].btn_live.setChecked) - self.triggerControlWidget.signal_toggle_live.connect(self.liveControlWidget[channels[i]].toggle_live) - self.triggerControlWidget.signal_trigger_mode.connect(self.liveControlWidget[channels[i]].set_trigger_mode) - self.triggerControlWidget.signal_trigger_fps.connect(self.liveControlWidget[channels[i]].entry_triggerFPS.setValue) - self.camera[channels[i]].set_callback(self.streamHandler[channels[i]].on_new_frame) - self.camera[channels[i]].enable_callback() - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.navigationController.home() - for i in range(len(self.channels)): - self.liveController[self.channels[i]].stop_live() - self.camera[self.channels[i]].close() - self.imageSaver[self.channels[i]].close() - self.microcontroller.close() diff --git a/software/control/gui_6060.py b/software/control/gui_6060.py deleted file mode 100644 index ed79252ed..000000000 --- a/software/control/gui_6060.py +++ /dev/null @@ -1,280 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.microcontroller as microcontroller -from control._def import * - -import pyqtgraph.dockarea as dock -import time - -SINGLE_WINDOW = True # set to False if use separate windows for display and control - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, is_simulation = False, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.objectiveStore = core.ObjectiveStore() - self.objectivesWidget = widgets.ObjectivesWidget(self.objectiveStore) - - # load window - if ENABLE_TRACKING: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) - self.imageDisplayWindow.show_ROI_selector() - else: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) - self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() - # self.imageDisplayWindow.show() - # self.imageArrayDisplayWindow.show() - - # image display windows - self.imageDisplayTabs = QTabWidget() - self.imageDisplayTabs.addTab(self.imageDisplayWindow.widget, "Live View") - self.imageDisplayTabs.addTab(self.imageArrayDisplayWindow.widget, "Multichannel Acquisition") - - # load objects - if is_simulation: - self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.microcontroller = microcontroller.Microcontroller_Simulation() - else: - try: - self.camera = camera.Camera(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.camera.open() - except: - self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.camera.open() - print('! camera not detected, using simulated camera !') - try: - self.microcontroller = microcontroller.Microcontroller(version=CONTROLLER_VERSION) - except: - print('! Microcontroller not detected, using simulated microcontroller !') - self.microcontroller = microcontroller.Microcontroller_Simulation() - - # reset the MCU - self.microcontroller.reset() - - # reinitialize motor drivers and DAC (in particular for V2.1 driver board where PG is not functional) - self.microcontroller.initialize_drivers() - - # configure the actuators - self.microcontroller.configure_actuators() - - self.configurationManager = core.ConfigurationManager() - self.streamHandler = core.StreamHandler(display_resolution_scaling=DEFAULT_DISPLAY_CROP/100) - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller, parent=self) - self.slidePositionController = core.SlidePositionController(self.navigationController,self.liveController) - self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager,parent=self) - if ENABLE_TRACKING: - self.trackingController = core.TrackingController(self.camera,self.microcontroller,self.navigationController,self.configurationManager,self.liveController,self.autofocusController,self.imageDisplayWindow) - self.imageSaver = core.ImageSaver() - self.imageDisplay = core.ImageDisplay() - self.navigationViewer = core.NavigationViewer() - - # retract the objective - self.navigationController.home_z() - # wait for the operation to finish - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('z homing timeout, the program will exit') - exit() - print('objective retracted') - - # homing - self.navigationController.set_x_limit_pos_mm(100) - self.navigationController.set_x_limit_neg_mm(-100) - self.navigationController.set_y_limit_pos_mm(100) - self.navigationController.set_y_limit_neg_mm(-100) - print('start homing') - self.navigationController.home_y() - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('y homing timeout, the program will exit') - exit() - self.navigationController.home_x() - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('x homing timeout, the program will exit') - exit() - print('homing finished') - - # set software limit - self.navigationController.set_x_limit_pos_mm(SOFTWARE_POS_LIMIT.X_POSITIVE) - self.navigationController.set_x_limit_neg_mm(SOFTWARE_POS_LIMIT.X_NEGATIVE) - self.navigationController.set_y_limit_pos_mm(SOFTWARE_POS_LIMIT.Y_POSITIVE) - self.navigationController.set_y_limit_neg_mm(SOFTWARE_POS_LIMIT.Y_NEGATIVE) - - # move to center - self.navigationController.move_x(SLIDE_POSITION.SCANNING_X_MM) - while self.microcontroller.is_busy(): - time.sleep(0.005) - self.navigationController.move_y(SLIDE_POSITION.SCANNING_Y_MM) - while self.microcontroller.is_busy(): - time.sleep(0.005) - - # raise the objective - self.navigationController.move_z(DEFAULT_Z_POS_MM) - # wait for the operation to finish - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 5: - print('z return timeout, the program will exit') - exit() - - # set software limit - self.navigationController.set_x_limit_pos_mm(SOFTWARE_POS_LIMIT.X_POSITIVE) - self.navigationController.set_x_limit_neg_mm(SOFTWARE_POS_LIMIT.X_NEGATIVE) - self.navigationController.set_y_limit_pos_mm(SOFTWARE_POS_LIMIT.Y_POSITIVE) - self.navigationController.set_y_limit_neg_mm(SOFTWARE_POS_LIMIT.Y_NEGATIVE) - self.navigationController.set_z_limit_pos_mm(SOFTWARE_POS_LIMIT.Z_POSITIVE) - - # open the camera - # camera start streaming - # self.camera.set_reverse_x(CAMERA_REVERSE_X) # these are not implemented for the cameras in use - # self.camera.set_reverse_y(CAMERA_REVERSE_Y) # these are not implemented for the cameras in use - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,include_gain_exposure_time=False) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager,show_display_options=True) - self.navigationWidget = widgets.NavigationWidget(self.navigationController,self.slidePositionController,widget_configuration='malaria') - self.dacControlWidget = widgets.DACControWidget(self.microcontroller) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - if ENABLE_TRACKING: - self.trackingControlWidget = widgets.TrackingControllerWidget(self.trackingController,self.configurationManager,show_configurations=TRACKING_SHOW_MICROSCOPE_CONFIGURATIONS) - self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) - self.multiPointWidget2 = widgets.MultiPointWidget2(self.navigationController,self.navigationViewer,self.multipointController,self.configurationManager) - - self.recordTabWidget = QTabWidget() - if ENABLE_TRACKING: - self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - self.recordTabWidget.addTab(self.multiPointWidget2, "Flexible Multipoint") - self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - - # layout widgets - layout = QVBoxLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget) - #self.objectivesWidget.setFixedHeight(100) - layout.addWidget(self.liveControlWidget) - layout.addWidget(self.navigationWidget) - if SHOW_DAC_CONTROL: - layout.addWidget(self.dacControlWidget) - layout.addWidget(self.autofocusWidget) - layout.addWidget(self.recordTabWidget) - layout.addWidget(self.navigationViewer) - layout.addWidget(self.objectivesWidget) - layout.addStretch() - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - # self.centralWidget.setFixedSize(self.centralWidget.minimumSize()) - # self.centralWidget.setFixedWidth(self.centralWidget.minimumWidth()) - # self.centralWidget.setMaximumWidth(self.centralWidget.minimumWidth()) - self.centralWidget.setFixedWidth(self.centralWidget.minimumSizeHint().width()) - - if SINGLE_WINDOW: - dock_display = dock.Dock('Image Display', autoOrientation = False) - dock_display.showTitleBar() - dock_display.addWidget(self.imageDisplayTabs) - dock_display.setStretch(x=100,y=None) - dock_controlPanel = dock.Dock('Controls', autoOrientation = False) - # dock_controlPanel.showTitleBar() - dock_controlPanel.addWidget(self.centralWidget) - dock_controlPanel.setStretch(x=1,y=None) - dock_controlPanel.setFixedWidth(dock_controlPanel.minimumSizeHint().width()) - main_dockArea = dock.DockArea() - main_dockArea.addDock(dock_display) - main_dockArea.addDock(dock_controlPanel,'right') - self.setCentralWidget(main_dockArea) - desktopWidget = QDesktopWidget() - height_min = 0.9*desktopWidget.height() - width_min = 0.96*desktopWidget.width() - self.setMinimumSize(int(width_min),int(height_min)) - else: - self.setCentralWidget(self.centralWidget) - self.tabbedImageDisplayWindow = QMainWindow() - self.tabbedImageDisplayWindow.setCentralWidget(self.imageDisplayTabs) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() | Qt.CustomizeWindowHint) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() & ~Qt.WindowCloseButtonHint) - desktopWidget = QDesktopWidget() - width = 0.96*desktopWidget.height() - height = width - self.tabbedImageDisplayWindow.setFixedSize(width,height) - self.tabbedImageDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - # self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(lambda x:self.navigationWidget.label_Xpos.setText("{:.2f}".format(x))) - self.navigationController.yPos.connect(lambda x:self.navigationWidget.label_Ypos.setText("{:.2f}".format(x))) - self.navigationController.zPos.connect(lambda x:self.navigationWidget.label_Zpos.setText("{:.2f}".format(x))) - if ENABLE_TRACKING: - self.navigationController.signal_joystick_button_pressed.connect(self.trackingControlWidget.slot_joystick_button_pressed) - else: - self.navigationController.signal_joystick_button_pressed.connect(self.autofocusController.autofocus) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) - - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - - self.slidePositionController.signal_slide_loading_position_reached.connect(self.navigationWidget.slot_slide_loading_position_reached) - self.slidePositionController.signal_slide_loading_position_reached.connect(self.multiPointWidget.disable_the_start_aquisition_button) - self.slidePositionController.signal_slide_scanning_position_reached.connect(self.navigationWidget.slot_slide_scanning_position_reached) - self.slidePositionController.signal_slide_scanning_position_reached.connect(self.multiPointWidget.enable_the_start_aquisition_button) - self.slidePositionController.signal_clear_slide.connect(self.navigationViewer.clear_slide) - - self.navigationController.xyPos.connect(self.navigationViewer.update_current_location) - self.multipointController.signal_register_current_fov.connect(self.navigationViewer.register_fov) - - self.imageDisplayWindow.image_click_coordinates.connect(self.navigationController.move_from_click) - self.navigationController.move_to_cached_position() - - def closeEvent(self, event): - self.navigationController.cache_current_position() - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.navigationController.home() - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - if not SINGLE_WINDOW: - self.imageDisplayWindow.close() - self.imageArrayDisplayWindow.close() - self.tabbedImageDisplayWindow.close() - self.microcontroller.close() diff --git a/software/control/gui_PDAF_calibration.py b/software/control/gui_PDAF_calibration.py deleted file mode 100644 index 69a8dbba4..000000000 --- a/software/control/gui_PDAF_calibration.py +++ /dev/null @@ -1,130 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy -from pathlib import Path - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.core_PDAF as core_PDAF -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, is_simulation = False, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - if is_simulation: - self.microcontroller = microcontroller.Microcontroller_Simulation() - self.camera_1 = camera.Camera_Simulation(sn='FW0200050063') # tracking - self.camera_2 = camera.Camera_Simulation(sn='FW0200050068') # fluorescence - else: - self.microcontroller = microcontroller.Microcontroller() - self.camera_1 = camera.Camera(sn='FW0200050063') # tracking - self.camera_2 = camera.Camera(sn='FW0200050068') # fluorescence - - self.navigationController = core.NavigationController(self.microcontroller) - self.configurationManager = core.ConfigurationManager(filename=str(Path.home()) + "/configurations_PDAF.xml") - - self.streamHandler_1 = core.StreamHandler() - self.liveController_1 = core.LiveController(self.camera_1,self.microcontroller,self.configurationManager,control_illumination=False) - self.imageSaver_1 = core.ImageSaver() - - self.streamHandler_2 = core.StreamHandler() - self.liveController_2 = core.LiveController(self.camera_2,self.microcontroller,self.configurationManager,control_illumination=True) - self.imageSaver_2 = core.ImageSaver() - - self.twoCamerasPDAFCalibrationController = core_PDAF.TwoCamerasPDAFCalibrationController(self.camera_1,self.camera_2,self.navigationController,self.liveController_1,self.liveController_2,self.configurationManager) - - # open the camera - # camera start streaming - self.camera_1.open() - self.camera_1.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_1.set_callback(self.streamHandler_1.on_new_frame) - self.camera_1.enable_callback() - - self.camera_2.open() - self.camera_2.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_2.set_callback(self.streamHandler_2.on_new_frame) - self.camera_2.enable_callback() - - # load widgets - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - self.cameraSettingWidget_1 = widgets.CameraSettingsWidget(self.camera_1,self.liveController_1) - self.liveControlWidget_1 = widgets.LiveControlWidget(self.streamHandler_1,self.liveController_1,self.configurationManager) - self.cameraSettingWidget_2 = widgets.CameraSettingsWidget(self.camera_2,self.liveController_2) - self.liveControlWidget_2 = widgets.LiveControlWidget(self.streamHandler_2,self.liveController_2,self.configurationManager) - - self.PDAFCalibrationWidget = widgets.MultiPointWidget(self.twoCamerasPDAFCalibrationController,self.configurationManager) - - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget_1,0,0) - layout.addWidget(self.liveControlWidget_1,1,0) - layout.addWidget(self.cameraSettingWidget_2,0,1) - layout.addWidget(self.liveControlWidget_2,1,1) - - layout.addWidget(self.navigationWidget,7,0) - layout.addWidget(self.PDAFCalibrationWidget,7,1) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow_1 = core.ImageDisplayWindow('camera 1') - self.imageDisplayWindow_1.show() - self.imageDisplayWindow_2 = core.ImageDisplayWindow('camera 2') - self.imageDisplayWindow_2.show() - - # make connections - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - - self.streamHandler_1.signal_new_frame_received.connect(self.liveController_1.on_new_frame) - self.streamHandler_1.image_to_display.connect(self.imageDisplayWindow_1.display_image) - self.streamHandler_1.packet_image_to_write.connect(self.imageSaver_1.enqueue) - #self.streamHandler_1.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - - self.liveControlWidget_1.signal_newExposureTime.connect(self.cameraSettingWidget_1.set_exposure_time) - self.liveControlWidget_1.signal_newAnalogGain.connect(self.cameraSettingWidget_1.set_analog_gain) - self.liveControlWidget_1.update_camera_settings() - - self.streamHandler_2.signal_new_frame_received.connect(self.liveController_2.on_new_frame) - self.streamHandler_2.image_to_display.connect(self.imageDisplayWindow_2.display_image) - self.streamHandler_2.packet_image_to_write.connect(self.imageSaver_2.enqueue) - - self.liveControlWidget_2.signal_newExposureTime.connect(self.cameraSettingWidget_2.set_exposure_time) - self.liveControlWidget_2.signal_newAnalogGain.connect(self.cameraSettingWidget_2.set_analog_gain) - self.liveControlWidget_2.update_camera_settings() - - self.twoCamerasPDAFCalibrationController.image_to_display_camera1.connect(self.imageDisplayWindow_1.display_image) - self.twoCamerasPDAFCalibrationController.image_to_display_camera2.connect(self.imageDisplayWindow_1.display_image) - self.twoCamerasPDAFCalibrationController.signal_current_configuration.connect(self.liveControlWidget_1.set_microscope_mode) - self.twoCamerasPDAFCalibrationController.signal_current_configuration.connect(self.liveControlWidget_2.set_microscope_mode) - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.liveController_1.stop_live() - self.camera_1.close() - self.imageSaver_1.close() - self.imageDisplayWindow_1.close() - self.liveController_2.stop_live() - self.camera_2.close() - self.imageSaver_2.close() - self.imageDisplayWindow_2.close() diff --git a/software/control/gui_PDAF_demo.py b/software/control/gui_PDAF_demo.py deleted file mode 100644 index 6aa43d5e2..000000000 --- a/software/control/gui_PDAF_demo.py +++ /dev/null @@ -1,136 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy -from pathlib import Path - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.core_PDAF as core_PDAF -import control.microcontroller as microcontroller - -class Internal_States(): - def __init__(self): - self.w = 500 - self.h = 500 - self.x = 1500 - self.y = 1500 - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, is_simulation=False,*args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - if is_simulation: - self.microcontroller = microcontroller.Microcontroller_Simulation() - self.camera_1 = camera.Camera_Simulation(sn='FW0200050063') # tracking - self.camera_2 = camera.Camera_Simulation(sn='FW0200050068') # fluorescence - else: - self.microcontroller = microcontroller.Microcontroller() - self.camera_1 = camera.Camera(sn='FW0200050063') # tracking - self.camera_2 = camera.Camera(sn='FW0200050068') # fluorescence - - self.internal_states = Internal_States() - - self.navigationController = core.NavigationController(self.microcontroller) - self.PDAFController = core_PDAF.PDAFController(self.internal_states) - - self.configurationManager = core.ConfigurationManager(filename=str(Path.home()) + "/configurations_PDAF.xml") - - self.streamHandler_1 = core.StreamHandler() - self.liveController_1 = core.LiveController(self.camera_1,self.microcontroller,self.configurationManager,control_illumination=False) - self.imageSaver_1 = core.ImageSaver() - - self.streamHandler_2 = core.StreamHandler() - self.liveController_2 = core.LiveController(self.camera_2,self.microcontroller,self.configurationManager,control_illumination=True) - self.imageSaver_2 = core.ImageSaver() - - self.twoCamerasPDAFCalibrationController = core_PDAF.TwoCamerasPDAFCalibrationController(self.camera_1,self.camera_2,self.navigationController,self.liveController_1,self.liveController_2,self.configurationManager) - - # open the camera - # camera start streaming - self.camera_1.open() - self.camera_1.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_1.set_callback(self.streamHandler_1.on_new_frame) - self.camera_1.enable_callback() - - self.camera_2.open() - self.camera_2.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_2.set_callback(self.streamHandler_2.on_new_frame) - self.camera_2.enable_callback() - - # load widgets - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - self.cameraSettingWidget_1 = widgets.CameraSettingsWidget(self.camera_1,self.liveController_1) - self.liveControlWidget_1 = widgets.LiveControlWidget(self.streamHandler_1,self.liveController_1,self.configurationManager) - self.cameraSettingWidget_2 = widgets.CameraSettingsWidget(self.camera_2,self.liveController_2) - self.liveControlWidget_2 = widgets.LiveControlWidget(self.streamHandler_2,self.liveController_2,self.configurationManager) - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget_1,0,0) - layout.addWidget(self.liveControlWidget_1,1,0) - layout.addWidget(self.cameraSettingWidget_2,0,1) - layout.addWidget(self.liveControlWidget_2,1,1) - - layout.addWidget(self.navigationWidget,7,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow_1 = core.ImageDisplayWindow('camera 1') - self.imageDisplayWindow_1.show() - self.imageDisplayWindow_2 = core.ImageDisplayWindow('camera 2') - self.imageDisplayWindow_2.show() - - # make connections - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - - self.streamHandler_1.signal_new_frame_received.connect(self.liveController_1.on_new_frame) - self.streamHandler_1.image_to_display.connect(self.imageDisplayWindow_1.display_image) - self.streamHandler_1.packet_image_to_write.connect(self.imageSaver_1.enqueue) - #self.streamHandler_1.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - - self.liveControlWidget_1.signal_newExposureTime.connect(self.cameraSettingWidget_1.set_exposure_time) - self.liveControlWidget_1.signal_newAnalogGain.connect(self.cameraSettingWidget_1.set_analog_gain) - self.liveControlWidget_1.update_camera_settings() - - self.streamHandler_2.signal_new_frame_received.connect(self.liveController_2.on_new_frame) - self.streamHandler_2.image_to_display.connect(self.imageDisplayWindow_2.display_image) - self.streamHandler_2.packet_image_to_write.connect(self.imageSaver_2.enqueue) - - self.liveControlWidget_2.signal_newExposureTime.connect(self.cameraSettingWidget_2.set_exposure_time) - self.liveControlWidget_2.signal_newAnalogGain.connect(self.cameraSettingWidget_2.set_analog_gain) - self.liveControlWidget_2.update_camera_settings() - - self.streamHandler_1.image_to_display.connect(self.PDAFController.register_image_from_camera_1) - self.streamHandler_2.image_to_display.connect(self.PDAFController.register_image_from_camera_2) - - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.liveController_1.stop_live() - self.camera_1.close() - self.imageSaver_1.close() - self.imageDisplayWindow_1.close() - self.liveController_2.stop_live() - self.camera_2.close() - self.imageSaver_2.close() - self.imageDisplayWindow_2.close() diff --git a/software/control/gui_camera_only.py b/software/control/gui_camera_only.py deleted file mode 100644 index 4da6a9fd4..000000000 --- a/software/control/gui_camera_only.py +++ /dev/null @@ -1,78 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - self.camera = camera.Camera() - self.microcontroller = microcontroller.Microcontroller_Simulation() - - self.configurationManager = core.ConfigurationManager() - self.streamHandler = core.StreamHandler() - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.imageSaver = core.ImageSaver() - self.imageDisplay = core.ImageDisplay() - - # open the camera - # camera start streaming - self.camera.open() - self.camera.set_software_triggered_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,self.liveController) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget,0,0) - layout.addWidget(self.liveControlWidget,1,0) - layout.addWidget(self.recordingControlWidget,4,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow = core.ImageDisplayWindow() - self.imageDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - self.imageDisplayWindow.close() \ No newline at end of file diff --git a/software/control/gui_camera_only_tiscamera.py b/software/control/gui_camera_only_tiscamera.py deleted file mode 100644 index ac364f335..000000000 --- a/software/control/gui_camera_only_tiscamera.py +++ /dev/null @@ -1,78 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera_TIS as camera -import control.core as core -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - self.camera = camera.Camera(sn='39810456',width=3072,height=2048,framerate=60,color=True) - self.microcontroller = microcontroller.Microcontroller_Simulation() - - self.configurationManager = core.ConfigurationManager() - self.streamHandler = core.StreamHandler() - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.imageSaver = core.ImageSaver() - self.imageDisplay = core.ImageDisplay() - - # open the camera - # camera start streaming - self.camera.open() - self.camera.set_software_triggered_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,self.liveController) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget,0,0) - layout.addWidget(self.liveControlWidget,1,0) - layout.addWidget(self.recordingControlWidget,4,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow = core.ImageDisplayWindow() - self.imageDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - self.imageDisplayWindow.close() diff --git a/software/control/gui_displacement_measurement.py b/software/control/gui_displacement_measurement.py deleted file mode 100644 index ce064d028..000000000 --- a/software/control/gui_displacement_measurement.py +++ /dev/null @@ -1,196 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.core_displacement_measurement as core_displacement_measurement -import control.microcontroller as microcontroller -from control._def import * - -import pyqtgraph.dockarea as dock -SINGLE_WINDOW = True # set to False if use separate windows for display and control - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, is_simulation = False, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load window - if ENABLE_TRACKING: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) - self.imageDisplayWindow.show_ROI_selector() - else: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) - self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() - # self.imageDisplayWindow.show() - # self.imageArrayDisplayWindow.show() - - # image display windows - self.imageDisplayTabs = QTabWidget() - self.imageDisplayTabs.addTab(self.imageDisplayWindow.widget, "Live View") - self.imageDisplayTabs.addTab(self.imageArrayDisplayWindow.widget, "Multichannel Acquisition") - - # load objects - if is_simulation: - self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.microcontroller = microcontroller.Microcontroller_Simulation() - else: - self.camera = camera.Camera(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - try: - self.microcontroller = microcontroller.Microcontroller() - except: - print('! Microcontroller not detected, using simulated microcontroller !') - self.microcontroller = microcontroller.Microcontroller_Simulation() - - # configure the actuators - self.microcontroller.configure_actuators() - - self.configurationManager = core.ConfigurationManager() - self.streamHandler = core.StreamHandler(display_resolution_scaling=DEFAULT_DISPLAY_CROP/100) - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller) - self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager) - if ENABLE_TRACKING: - self.trackingController = core.TrackingController(self.camera,self.microcontroller,self.navigationController,self.configurationManager,self.liveController,self.autofocusController,self.imageDisplayWindow) - self.imageSaver = core.ImageSaver(image_format=Acquisition.IMAGE_FORMAT) - self.imageDisplay = core.ImageDisplay() - self.displacementMeasurementController = core_displacement_measurement.DisplacementMeasurementController() - - # open the camera - # camera start streaming - self.camera.open() - # self.camera.set_reverse_x(CAMERA_REVERSE_X) # these are not implemented for the cameras in use - # self.camera.set_reverse_y(CAMERA_REVERSE_Y) # these are not implemented for the cameras in use - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - if ENABLE_STROBE_OUTPUT: - self.camera.set_line3_to_exposure_active() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,include_gain_exposure_time=False) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - self.dacControlWidget = widgets.DACControWidget(self.microcontroller) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - if ENABLE_TRACKING: - self.trackingControlWidget = widgets.TrackingControllerWidget(self.trackingController,self.configurationManager,show_configurations=TRACKING_SHOW_MICROSCOPE_CONFIGURATIONS) - self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) - - self.recordTabWidget = QTabWidget() - if ENABLE_TRACKING: - self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - - self.waveformDisplay = widgets.WaveformDisplay(N=1000) - self.displacementMeasurementWidget = widgets.DisplacementMeasurementWidget(self.displacementMeasurementController,self.waveformDisplay) - - # layout widgets - layout = QVBoxLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget) - layout.addWidget(self.liveControlWidget) - layout.addWidget(self.navigationWidget) - if SHOW_DAC_CONTROL: - layout.addWidget(self.dacControlWidget) - layout.addWidget(self.autofocusWidget) - layout.addWidget(self.recordTabWidget) - layout.addStretch() - layout.addWidget(self.displacementMeasurementWidget) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - # self.centralWidget.setFixedSize(self.centralWidget.minimumSize()) - # self.centralWidget.setFixedWidth(self.centralWidget.minimumWidth()) - # self.centralWidget.setMaximumWidth(self.centralWidget.minimumWidth()) - self.centralWidget.setFixedWidth(self.centralWidget.minimumSizeHint().width()) - - if SINGLE_WINDOW: - dock_display = dock.Dock('Image Display', autoOrientation = False) - dock_display.showTitleBar() - dock_display.addWidget(self.imageDisplayTabs) - dock_display.setStretch(x=100,y=60) - dock_waveform = dock.Dock('Displacement Measurement', autoOrientation = False) - dock_waveform.showTitleBar() - dock_waveform.addWidget(self.waveformDisplay) - dock_waveform.setStretch(x=100,y=40) - dock_controlPanel = dock.Dock('Controls', autoOrientation = False) - # dock_controlPanel.showTitleBar() - dock_controlPanel.addWidget(self.centralWidget) - dock_controlPanel.setStretch(x=1,y=None) - dock_controlPanel.setFixedWidth(dock_controlPanel.minimumSizeHint().width()) - main_dockArea = dock.DockArea() - main_dockArea.addDock(dock_display) - main_dockArea.addDock(dock_waveform,'bottom') - main_dockArea.addDock(dock_controlPanel,'right') - self.setCentralWidget(main_dockArea) - desktopWidget = QDesktopWidget() - height_min = 0.9*desktopWidget.height() - width_min = 0.96*desktopWidget.width() - self.setMinimumSize(width_min,height_min) - else: - self.setCentralWidget(self.centralWidget) - self.tabbedImageDisplayWindow = QMainWindow() - self.tabbedImageDisplayWindow.setCentralWidget(self.imageDisplayTabs) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() | Qt.CustomizeWindowHint) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() & ~Qt.WindowCloseButtonHint) - desktopWidget = QDesktopWidget() - width = 0.96*desktopWidget.height() - height = width - self.tabbedImageDisplayWindow.setFixedSize(width,height) - self.tabbedImageDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - # self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - if ENABLE_TRACKING: - self.navigationController.signal_joystick_button_pressed.connect(self.trackingControlWidget.slot_joystick_button_pressed) - else: - self.navigationController.signal_joystick_button_pressed.connect(self.autofocusController.autofocus) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - - # displacement measurement - self.streamHandler.image_to_display.connect(self.displacementMeasurementController.update_measurement) - self.displacementMeasurementController.signal_plots.connect(self.waveformDisplay.plot) - self.displacementMeasurementController.signal_readings.connect(self.displacementMeasurementWidget.display_readings) - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.navigationController.home() - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - if not SINGLE_WINDOW: - self.imageDisplayWindow.close() - self.imageArrayDisplayWindow.close() - self.tabbedImageDisplayWindow.close() - self.microcontroller.close() diff --git a/software/control/gui_hcs.py b/software/control/gui_hcs.py index 5ecb5ee8d..fdda3e4df 100644 --- a/software/control/gui_hcs.py +++ b/software/control/gui_hcs.py @@ -1,7 +1,9 @@ # set QT_API environment variable -import os +import os os.environ["QT_API"] = "pyqt5" -import qtpy +import serial +import time +from typing import Optional # qt libraries from qtpy.QtCore import * @@ -12,18 +14,41 @@ # app specific libraries import control.widgets as widgets +import pyqtgraph.dockarea as dock +import squid.logging +import control.microscope + +log = squid.logging.get_logger(__name__) if CAMERA_TYPE == "Toupcam": try: import control.camera_toupcam as camera except: - print("Problem importing Toupcam, defaulting to default camera") + log.warning("Problem importing Toupcam, defaulting to default camera") import control.camera as camera elif CAMERA_TYPE == "FLIR": try: import control.camera_flir as camera except: - print("Problem importing FLIR camera, defaulting to default camera") + log.warning("Problem importing FLIR camera, defaulting to default camera") + import control.camera as camera +elif CAMERA_TYPE == "Hamamatsu": + try: + import control.camera_hamamatsu as camera + except: + log.warning("Problem importing Hamamatsu camera, defaulting to default camera") + import control.camera as camera +elif CAMERA_TYPE == "iDS": + try: + import control.camera_ids as camera + except: + log.warning("Problem importing iDS camera, defaulting to default camera") + import control.camera as camera +elif CAMERA_TYPE == "Tucsen": + try: + import control.camera_tucsen as camera + except: + log.warning("Problem importing Tucsen camera, defaulting to default camera") import control.camera as camera else: import control.camera as camera @@ -32,450 +57,656 @@ try: import control.camera_toupcam as camera_fc except: - print("Problem importing Toupcam for focus, defaulting to default camera") + log.warning("Problem importing Toupcam for focus, defaulting to default camera") import control.camera as camera_fc elif FOCUS_CAMERA_TYPE == "FLIR": try: import control.camera_flir as camera_fc except: - print("Problem importing FLIR camera for focus, defaulting to default camera") + log.warning("Problem importing FLIR camera for focus, defaulting to default camera") import control.camera as camera_fc else: import control.camera as camera_fc - +if USE_PRIOR_STAGE: + from control.stage_prior import PriorStage + from control.navigation_prior import NavigationController_PriorStage import control.core as core import control.microcontroller as microcontroller - import control.serial_peripherals as serial_peripherals +if ENABLE_STITCHER: + import control.stitcher as stitcher + if SUPPORT_LASER_AUTOFOCUS: import control.core_displacement_measurement as core_displacement_measurement -import pyqtgraph.dockarea as dock -import time - SINGLE_WINDOW = True # set to False if use separate windows for display and control -class OctopiGUI(QMainWindow): - # variables +class HighContentScreeningGui(QMainWindow): fps_software_trigger = 100 - def __init__(self, is_simulation = False, *args, **kwargs): + def __init__(self, is_simulation=False, live_only_mode=False, *args, **kwargs): super().__init__(*args, **kwargs) + self.microcontroller: Optional[microcontroller.Microcontroller] = None - # load window - if ENABLE_TRACKING: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) - self.imageDisplayWindow.show_ROI_selector() - else: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True,show_LUT=True,autoLevels=True) - self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() - # self.imageDisplayWindow.show() - # self.imageArrayDisplayWindow.show() + self.log = squid.logging.get_logger(self.__class__.__name__) + self.live_only_mode = live_only_mode or LIVE_ONLY_MODE + self.performance_mode = False + self.napari_connections = {} - # image display windows - self.imageDisplayTabs = QTabWidget() - self.imageDisplayTabs.addTab(self.imageDisplayWindow.widget, "Live View") - self.imageDisplayTabs.addTab(self.imageArrayDisplayWindow.widget, "Multichannel Acquisition") - - self.objectiveStore = core.ObjectiveStore() + self.loadObjects(is_simulation) - # load objects - if is_simulation: - if ENABLE_SPINNING_DISK_CONFOCAL: - self.xlight = serial_peripherals.XLight_Simulation() - if SUPPORT_LASER_AUTOFOCUS: - self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.camera_focus = camera_fc.Camera_Simulation() - else: - self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.microcontroller = microcontroller.Microcontroller_Simulation() - else: - if ENABLE_SPINNING_DISK_CONFOCAL: - self.xlight = serial_peripherals.XLight() - try: - if SUPPORT_LASER_AUTOFOCUS: - sn_camera_main = camera.get_sn_by_model(MAIN_CAMERA_MODEL) - sn_camera_focus = camera_fc.get_sn_by_model(FOCUS_CAMERA_MODEL) - self.camera = camera.Camera(sn=sn_camera_main,rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.camera.open() - self.camera_focus = camera_fc.Camera(sn=sn_camera_focus) - self.camera_focus.open() - else: - self.camera = camera.Camera(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.camera.open() - except: - if SUPPORT_LASER_AUTOFOCUS: - self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.camera.open() - self.camera_focus = camera.Camera_Simulation() - self.camera_focus.open() - else: - self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.camera.open() - print('! camera not detected, using simulated camera !') - self.microcontroller = microcontroller.Microcontroller(version=CONTROLLER_VERSION) + self.setupHardware() - # reset the MCU - self.microcontroller.reset() + self.loadWidgets() - # reinitialize motor drivers and DAC (in particular for V2.1 driver board where PG is not functional) - self.microcontroller.initialize_drivers() + self.setupLayout() - # configure the actuators - self.microcontroller.configure_actuators() + self.makeConnections() - self.configurationManager = core.ConfigurationManager(filename='./channel_configurations.xml') + self.microscope = control.microscope.Microscope(self) + + # Move to cached position + if HOMING_ENABLED_X and HOMING_ENABLED_Y and HOMING_ENABLED_Z: + self.navigationController.move_to_cached_position() + self.waitForMicrocontroller() + if ENABLE_WELLPLATE_MULTIPOINT: + self.wellplateMultiPointWidget.init_z() + self.flexibleMultiPointWidget.init_z() + + # Create the menu bar + menubar = self.menuBar() + settings_menu = menubar.addMenu("Settings") + if SUPPORT_SCIMICROSCOPY_LED_ARRAY: + led_matrix_action = QAction("LED Matrix", self) + led_matrix_action.triggered.connect(self.openLedMatrixSettings) + settings_menu.addAction(led_matrix_action) + def loadObjects(self, is_simulation): + if is_simulation: + self.loadSimulationObjects() + else: + try: + self.loadHardwareObjects() + except Exception: + self.log.error("---- !! ERROR CONNECTING TO HARDWARE !! ----", stack_info=True, exc_info=True) + raise + + # Common object initialization + self.objectiveStore = core.ObjectiveStore(parent=self) + self.configurationManager = core.ConfigurationManager(filename='./channel_configurations.xml') + self.contrastManager = core.ContrastManager() self.streamHandler = core.StreamHandler(display_resolution_scaling=DEFAULT_DISPLAY_CROP/100) - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller, parent=self) - self.slidePositionController = core.SlidePositionController(self.navigationController,self.liveController,is_for_wellplate=True) - self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) + self.liveController = core.LiveController(self.camera, self.microcontroller, self.configurationManager, parent=self) + if USE_PRIOR_STAGE: + self.navigationController = NavigationController_PriorStage(self.priorstage, self.microcontroller, self.objectiveStore, parent=self) + else: + self.navigationController = core.NavigationController(self.microcontroller, self.objectiveStore, parent=self) + self.slidePositionController = core.SlidePositionController(self.navigationController, self.liveController, is_for_wellplate=True) + self.autofocusController = core.AutoFocusController(self.camera, self.navigationController, self.liveController) self.scanCoordinates = core.ScanCoordinates() - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager,scanCoordinates=self.scanCoordinates,parent=self) - if ENABLE_TRACKING: - self.trackingController = core.TrackingController(self.camera,self.microcontroller,self.navigationController,self.configurationManager,self.liveController,self.autofocusController,self.imageDisplayWindow) + self.multipointController = core.MultiPointController(self.camera, self.navigationController, self.liveController, self.autofocusController, self.configurationManager, scanCoordinates=self.scanCoordinates, parent=self) self.imageSaver = core.ImageSaver() self.imageDisplay = core.ImageDisplay() - self.navigationViewer = core.NavigationViewer(sample=str(WELLPLATE_FORMAT)+' well plate') - ''' - if HOMING_ENABLED_Z: - # retract the objective - self.navigationController.home_z() - # wait for the operation to finish - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('z homing timeout, the program will exit') - exit() - print('objective retracted') - - if HOMING_ENABLED_Z and HOMING_ENABLED_X and HOMING_ENABLED_Y: - # self.navigationController.set_x_limit_pos_mm(100) - # self.navigationController.set_x_limit_neg_mm(-100) - # self.navigationController.set_y_limit_pos_mm(100) - # self.navigationController.set_y_limit_neg_mm(-100) - # self.navigationController.home_xy() - # for the new design, need to home y before home x; x also needs to be at > + 10 mm when homing y - self.navigationController.move_x(12) - while self.microcontroller.is_busy(): # to do, add a blocking option move_x() - time.sleep(0.005) - - self.navigationController.home_y() - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('y homing timeout, the program will exit') - exit() - - self.navigationController.home_x() - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('x homing timeout, the program will exit') - exit() - - print('xy homing completed') - - # move to (20 mm, 20 mm) - self.navigationController.move_x(20) - while self.microcontroller.is_busy(): - time.sleep(0.005) - self.navigationController.move_y(20) - while self.microcontroller.is_busy(): - time.sleep(0.005) + if ENABLE_TRACKING: + self.trackingController = core.TrackingController(self.camera, self.microcontroller, self.navigationController, self.configurationManager, self.liveController, self.autofocusController, self.imageDisplayWindow) + if WELLPLATE_FORMAT == 'glass slide': + self.navigationViewer = core.NavigationViewer(self.objectiveStore, sample='4 glass slide') + else: + self.navigationViewer = core.NavigationViewer(self.objectiveStore, sample=WELLPLATE_FORMAT) + + if SUPPORT_LASER_AUTOFOCUS: + self.configurationManager_focus_camera = core.ConfigurationManager(filename='./focus_camera_configurations.xml') + self.streamHandler_focus_camera = core.StreamHandler() + self.liveController_focus_camera = core.LiveController(self.camera_focus,self.microcontroller,self.configurationManager_focus_camera, self, control_illumination=False,for_displacement_measurement=True) + self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager,scanCoordinates=self.scanCoordinates,parent=self) + self.imageDisplayWindow_focus = core.ImageDisplayWindow(draw_crosshairs=True, show_LUT=False, autoLevels=False) + self.displacementMeasurementController = core_displacement_measurement.DisplacementMeasurementController() + self.laserAutofocusController = core.LaserAutofocusController(self.microcontroller,self.camera_focus,self.liveController_focus_camera,self.navigationController,has_two_interfaces=HAS_TWO_INTERFACES,use_glass_top=USE_GLASS_TOP,look_for_cache=False) + + def loadSimulationObjects(self): + self.log.debug("Loading simulated hardware objects...") + # Initialize simulation objects + if ENABLE_SPINNING_DISK_CONFOCAL: + self.xlight = serial_peripherals.XLight_Simulation() + if ENABLE_NL5: + import control.NL5 as NL5 + self.nl5 = NL5.NL5_Simulation() + if ENABLE_CELLX: + self.cellx = serial_peripherals.CellX_Simulation() + if SUPPORT_LASER_AUTOFOCUS: + self.camera_focus = camera_fc.Camera_Simulation() + if USE_LDI_SERIAL_CONTROL: + self.ldi = serial_peripherals.LDI_Simulation() + self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE, flip_image=FLIP_IMAGE) + self.camera.set_pixel_format(DEFAULT_PIXEL_FORMAT) + if USE_ZABER_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel = serial_peripherals.FilterController_Simulation(115200, 8, serial.PARITY_NONE, serial.STOPBITS_ONE) + if USE_OPTOSPIN_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel = serial_peripherals.Optospin_Simulation(SN=None) + self.microcontroller = microcontroller.Microcontroller(existing_serial=microcontroller.SimSerial(),is_simulation=True) + + def loadHardwareObjects(self): + # Initialize hardware objects + if ENABLE_SPINNING_DISK_CONFOCAL: + try: + self.xlight = serial_peripherals.XLight(XLIGHT_SERIAL_NUMBER, XLIGHT_SLEEP_TIME_FOR_WHEEL) + except Exception: + self.log.error("Error initializing Spinning Disk Confocal") + raise + + if ENABLE_NL5: + try: + import control.NL5 as NL5 + self.nl5 = NL5.NL5() + except Exception: + self.log.error("Error initializing NL5") + raise + + if ENABLE_CELLX: + try: + self.cellx = serial_peripherals.CellX(CELLX_SN) + for channel in [1,2,3,4]: + self.cellx.set_modulation(channel, CELLX_MODULATION) + self.cellx.turn_on(channel) + except Exception: + self.log.error("Error initializing CellX") + raise + + if USE_LDI_SERIAL_CONTROL: + try: + self.ldi = serial_peripherals.LDI() + self.ldi.run() + self.ldi.set_intensity_mode(LDI_INTENSITY_MODE) + self.ldi.set_shutter_mode(LDI_SHUTTER_MODE) + except Exception: + self.log.error("Error initializing LDI") + raise + + if SUPPORT_LASER_AUTOFOCUS: + try: + sn_camera_focus = camera_fc.get_sn_by_model(FOCUS_CAMERA_MODEL) + self.camera_focus = camera_fc.Camera(sn=sn_camera_focus) + self.camera_focus.open() + self.camera_focus.set_pixel_format('MONO8') + except Exception: + self.log.error(f"Error initializing Laser Autofocus Camera") + raise + + try: + sn_camera_main = camera.get_sn_by_model(MAIN_CAMERA_MODEL) + self.camera = camera.Camera(sn=sn_camera_main, rotate_image_angle=ROTATE_IMAGE_ANGLE, flip_image=FLIP_IMAGE) + self.camera.open() + self.camera.set_pixel_format(DEFAULT_PIXEL_FORMAT) + except Exception: + self.log.error("Error initializing Main Camera") + raise + + if USE_ZABER_EMISSION_FILTER_WHEEL: + try: + self.emission_filter_wheel = serial_peripherals.FilterController(FILTER_CONTROLLER_SERIAL_NUMBER, 115200, 8, serial.PARITY_NONE, serial.STOPBITS_ONE) + except Exception: + self.log.error("Error initializing Zaber Emission Filter Wheel") + raise + + if USE_OPTOSPIN_EMISSION_FILTER_WHEEL: + try: + self.emission_filter_wheel = serial_peripherals.Optospin(SN=FILTER_CONTROLLER_SERIAL_NUMBER) + except Exception: + self.log.error("Error initializing Optospin Emission Filter Wheel") + raise + + if USE_PRIOR_STAGE: + try: + self.priorstage = PriorStage(PRIOR_STAGE_SN, parent=self) + except Exception: + self.log.error("Error initializing Prior Stage") + raise + + try: + self.microcontroller = microcontroller.Microcontroller(version=CONTROLLER_VERSION, sn=CONTROLLER_SN) + except Exception: + self.log.error(f"Error initializing Microcontroller") + raise + + def setupHardware(self): + # Setup hardware components + if USE_ZABER_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel.start_homing() + if USE_OPTOSPIN_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel.set_speed(OPTOSPIN_EMISSION_FILTER_WHEEL_SPEED_HZ) + + if not self.microcontroller: + raise ValueError("Microcontroller must be none-None for hardware setup.") + + try: + self.microcontroller.reset() + time.sleep(0.5) + self.microcontroller.initialize_drivers() + time.sleep(0.5) + self.microcontroller.configure_actuators() + + if HAS_ENCODER_X: + self.navigationController.set_axis_PID_arguments(0, PID_P_X, PID_I_X, PID_D_X) + self.navigationController.configure_encoder(0, (SCREW_PITCH_X_MM * 1000) / ENCODER_RESOLUTION_UM_X, ENCODER_FLIP_DIR_X) + self.navigationController.set_pid_control_enable(0, ENABLE_PID_X) + if HAS_ENCODER_Y: + self.navigationController.set_axis_PID_arguments(1, PID_P_Y, PID_I_Y, PID_D_Y) + self.navigationController.configure_encoder(1, (SCREW_PITCH_Y_MM * 1000) / ENCODER_RESOLUTION_UM_Y, ENCODER_FLIP_DIR_Y) + self.navigationController.set_pid_control_enable(1, ENABLE_PID_Y) + if HAS_ENCODER_Z: + self.navigationController.set_axis_PID_arguments(2, PID_P_Z, PID_I_Z, PID_D_Z) + self.navigationController.configure_encoder(2, (SCREW_PITCH_Z_MM * 1000) / ENCODER_RESOLUTION_UM_Z, ENCODER_FLIP_DIR_Z) + self.navigationController.set_pid_control_enable(2, ENABLE_PID_Z) + time.sleep(0.5) self.navigationController.set_x_limit_pos_mm(SOFTWARE_POS_LIMIT.X_POSITIVE) self.navigationController.set_x_limit_neg_mm(SOFTWARE_POS_LIMIT.X_NEGATIVE) self.navigationController.set_y_limit_pos_mm(SOFTWARE_POS_LIMIT.Y_POSITIVE) self.navigationController.set_y_limit_neg_mm(SOFTWARE_POS_LIMIT.Y_NEGATIVE) self.navigationController.set_z_limit_pos_mm(SOFTWARE_POS_LIMIT.Z_POSITIVE) - - if HOMING_ENABLED_Z: - # move the objective back - self.navigationController.move_z(DEFAULT_Z_POS_MM) - # wait for the operation to finish - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 5: - print('z return timeout, the program will exit') - exit() - ''' - - # retract the objective - self.navigationController.home_z() - # wait for the operation to finish - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('z homing timeout, the program will exit') - exit() - print('objective retracted') - self.navigationController.set_z_limit_pos_mm(SOFTWARE_POS_LIMIT.Z_POSITIVE) - - # home XY, set zero and set software limit - print('home xy') - timestamp_start = time.time() - # x needs to be at > + 20 mm when homing y - self.navigationController.move_x(20) # to-do: add blocking code - while self.microcontroller.is_busy(): - time.sleep(0.005) - # home y - self.navigationController.home_y() - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('y homing timeout, the program will exit') - exit() - self.navigationController.zero_y() - # home x - self.navigationController.home_x() - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('y homing timeout, the program will exit') - exit() - self.navigationController.zero_x() - self.slidePositionController.homing_done = True - - # move to scanning position - self.navigationController.move_x(20) - while self.microcontroller.is_busy(): - time.sleep(0.005) - self.navigationController.move_y(20) - while self.microcontroller.is_busy(): - time.sleep(0.005) - - # move z - self.navigationController.move_z_to(DEFAULT_Z_POS_MM) - # wait for the operation to finish - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 5: - print('z return timeout, the program will exit') - exit() - - # open the camera - # camera start streaming - # self.camera.set_reverse_x(CAMERA_REVERSE_X) # these are not implemented for the cameras in use - # self.camera.set_reverse_y(CAMERA_REVERSE_Y) # these are not implemented for the cameras in use - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() + self.navigationController.set_z_limit_neg_mm(SOFTWARE_POS_LIMIT.Z_NEGATIVE) + + if HOMING_ENABLED_Z: + self.navigationController.home_z() + self.waitForMicrocontroller(10, 'z homing timeout') + if HOMING_ENABLED_X and HOMING_ENABLED_Y: + self.navigationController.move_x(20) + self.waitForMicrocontroller() + self.navigationController.home_y() + self.waitForMicrocontroller(10, 'y homing timeout') + self.navigationController.zero_y() + self.navigationController.home_x() + self.waitForMicrocontroller(10, 'x homing timeout') + self.navigationController.zero_x() + self.slidePositionController.homing_done = True + if USE_ZABER_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel.wait_for_homing_complete() + if HOMING_ENABLED_X and HOMING_ENABLED_Y: + self.navigationController.move_x(20) + self.waitForMicrocontroller() + self.navigationController.move_y(20) + self.waitForMicrocontroller() + + if ENABLE_OBJECTIVE_PIEZO: + OUTPUT_GAINS.CHANNEL7_GAIN = (OBJECTIVE_PIEZO_CONTROL_VOLTAGE_RANGE == 5) + div = 1 if OUTPUT_GAINS.REFDIV else 0 + gains = sum(getattr(OUTPUT_GAINS, f'CHANNEL{i}_GAIN') << i for i in range(8)) + self.microcontroller.configure_dac80508_refdiv_and_gain(div, gains) + self.microcontroller.set_dac80508_scaling_factor_for_illumination(ILLUMINATION_INTENSITY_FACTOR) + except TimeoutError as e: + # If we can't recover from a timeout, at least do our best to make sure the system is left in a safe + # and restartable state. + self.log.error("Setup timed out, resetting microcontroller before failing gui setup") + self.microcontroller.reset() + raise e + self.camera.set_software_triggered_acquisition() self.camera.set_callback(self.streamHandler.on_new_frame) self.camera.enable_callback() - # load widgets + if CAMERA_TYPE == "Toupcam": + self.camera.set_reset_strobe_delay_function(self.liveController.reset_strobe_arugment) + + if SUPPORT_LASER_AUTOFOCUS: + self.camera_focus.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() + self.camera_focus.set_callback(self.streamHandler_focus_camera.on_new_frame) + self.camera_focus.enable_callback() + self.camera_focus.start_streaming() + + def waitForMicrocontroller(self, timeout=5.0, error_message=None): + try: + self.microcontroller.wait_till_operation_is_completed(timeout) + except TimeoutError as e: + self.log.error(error_message or "Microcontroller operation timed out!") + raise e + + def loadWidgets(self): + # Initialize all GUI widgets if ENABLE_SPINNING_DISK_CONFOCAL: self.spinningDiskConfocalWidget = widgets.SpinningDiskConfocalWidget(self.xlight, self.configurationManager) + if ENABLE_NL5: + import control.NL5Widget as NL5Widget + self.nl5Wdiget = NL5Widget.NL5Widget(self.nl5) if CAMERA_TYPE == "Toupcam": - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,include_gain_exposure_time=True, include_camera_temperature_setting = True) + self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera, include_gain_exposure_time=False, include_camera_temperature_setting=True, include_camera_auto_wb_setting=False) else: - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera, include_gain_exposure_time=True, include_camera_temperature_setting=False) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager,show_display_options=True,show_autolevel=True,autolevel=True) - self.navigationWidget = widgets.NavigationWidget(self.navigationController,self.slidePositionController,widget_configuration='384 well plate') + self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera, include_gain_exposure_time=False, include_camera_temperature_setting=False, include_camera_auto_wb_setting=True) + self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler, self.liveController, self.configurationManager, show_display_options=True, show_autolevel=True, autolevel=True) + self.navigationWidget = widgets.NavigationWidget(self.navigationController, self.slidePositionController, widget_configuration=f'{WELLPLATE_FORMAT} well plate') + self.navigationBarWidget = widgets.NavigationBarWidget(self.navigationController, self.slidePositionController, add_z_buttons=False) self.dacControlWidget = widgets.DACControWidget(self.microcontroller) self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) + self.piezoWidget = widgets.PiezoWidget(self.navigationController) + self.objectivesWidget = widgets.ObjectivesWidget(self.objectiveStore) + + if USE_ZABER_EMISSION_FILTER_WHEEL or USE_OPTOSPIN_EMISSION_FILTER_WHEEL: + self.filterControllerWidget = widgets.FilterControllerWidget(self.emission_filter_wheel, self.liveController) + + self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler, self.imageSaver) + self.wellplateFormatWidget = widgets.WellplateFormatWidget(self.navigationController, self.navigationViewer, self.streamHandler, self.liveController) + if WELLPLATE_FORMAT != '1536 well plate': + self.wellSelectionWidget = widgets.WellSelectionWidget(WELLPLATE_FORMAT, self.wellplateFormatWidget) + else: + self.wellSelectionWidget = widgets.Well1536SelectionWidget() + self.scanCoordinates.add_well_selector(self.wellSelectionWidget) + + if SUPPORT_LASER_AUTOFOCUS: + if FOCUS_CAMERA_TYPE == "Toupcam": + self.cameraSettingWidget_focus_camera = widgets.CameraSettingsWidget(self.camera_focus, include_gain_exposure_time = False, include_camera_temperature_setting = True, include_camera_auto_wb_setting = False) + else: + self.cameraSettingWidget_focus_camera = widgets.CameraSettingsWidget(self.camera_focus, include_gain_exposure_time = False, include_camera_temperature_setting = False, include_camera_auto_wb_setting = True) + self.liveControlWidget_focus_camera = widgets.LiveControlWidget(self.streamHandler_focus_camera,self.liveController_focus_camera,self.configurationManager_focus_camera, stretch=False) #,show_display_options=True) + self.waveformDisplay = widgets.WaveformDisplay(N=1000,include_x=True,include_y=False) + self.displacementMeasurementWidget = widgets.DisplacementMeasurementWidget(self.displacementMeasurementController,self.waveformDisplay) + self.laserAutofocusControlWidget = widgets.LaserAutofocusControlWidget(self.laserAutofocusController) + + self.imageDisplayWindow_focus = core.ImageDisplayWindow(draw_crosshairs=True) + self.waveformDisplay = widgets.WaveformDisplay(N=1000, include_x=True, include_y=False) + self.displacementMeasurementWidget = widgets.DisplacementMeasurementWidget(self.displacementMeasurementController, self.waveformDisplay) + self.laserAutofocusControlWidget = widgets.LaserAutofocusControlWidget(self.laserAutofocusController) + + self.imageDisplayTabs = QTabWidget() + if self.live_only_mode: + if ENABLE_TRACKING: + self.imageDisplayWindow = core.ImageDisplayWindow(self.liveController, self.contrastManager, draw_crosshairs=True) + self.imageDisplayWindow.show_ROI_selector() + else: + self.imageDisplayWindow = core.ImageDisplayWindow(self.liveController, self.contrastManager, draw_crosshairs=True, show_LUT=True, autoLevels=True) + #self.imageDisplayTabs.addTab(self.imageDisplayWindow.widget, "Live View") + self.imageDisplayTabs = self.imageDisplayWindow.widget + self.napariMosaicDisplayWidget = None + else: + self.setupImageDisplayTabs() + + self.flexibleMultiPointWidget = widgets.FlexibleMultiPointWidget(self.navigationController, self.navigationViewer, self.multipointController, self.objectiveStore, self.configurationManager, scanCoordinates=None) + self.wellplateMultiPointWidget = widgets.WellplateMultiPointWidget(self.navigationController, self.navigationViewer, self.multipointController, self.objectiveStore, self.configurationManager, self.scanCoordinates, self.napariMosaicDisplayWidget) + self.sampleSettingsWidget = widgets.SampleSettingsWidget(self.objectivesWidget, self.wellplateFormatWidget) + if ENABLE_TRACKING: - self.trackingControlWidget = widgets.TrackingControllerWidget(self.trackingController,self.configurationManager,show_configurations=TRACKING_SHOW_MICROSCOPE_CONFIGURATIONS) - self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) - self.multiPointWidget2 = widgets.MultiPointWidget2(self.navigationController,self.navigationViewer,self.multipointController,self.configurationManager) + self.trackingControlWidget = widgets.TrackingControllerWidget(self.trackingController, self.configurationManager, show_configurations=TRACKING_SHOW_MICROSCOPE_CONFIGURATIONS) + if ENABLE_STITCHER: + self.stitcherWidget = widgets.StitcherWidget(self.configurationManager, self.contrastManager) self.recordTabWidget = QTabWidget() - if ENABLE_TRACKING: - self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - #self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint (Wellplate)") - self.wellSelectionWidget = widgets.WellSelectionWidget(WELLPLATE_FORMAT) - self.scanCoordinates.add_well_selector(self.wellSelectionWidget) + self.setupRecordTabWidget() + + self.cameraTabWidget = QTabWidget() + self.setupCameraTabWidget() + + def setupImageDisplayTabs(self): + if USE_NAPARI_FOR_LIVE_VIEW: + self.napariLiveWidget = widgets.NapariLiveWidget(self.streamHandler, self.liveController, self.navigationController, self.configurationManager, self.contrastManager, self.wellSelectionWidget) + self.imageDisplayTabs.addTab(self.napariLiveWidget, "Live View") + else: + if ENABLE_TRACKING: + self.imageDisplayWindow = core.ImageDisplayWindow(self.liveController, self.contrastManager, draw_crosshairs=True) + self.imageDisplayWindow.show_ROI_selector() + else: + self.imageDisplayWindow = core.ImageDisplayWindow(self.liveController, self.contrastManager, draw_crosshairs=True, show_LUT=True, autoLevels=True) + self.imageDisplayTabs.addTab(self.imageDisplayWindow.widget, "Live View") + + if not self.live_only_mode: + if USE_NAPARI_FOR_MULTIPOINT: + self.napariMultiChannelWidget = widgets.NapariMultiChannelWidget(self.objectiveStore, self.contrastManager) + self.imageDisplayTabs.addTab(self.napariMultiChannelWidget, "Multichannel Acquisition") + else: + self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() + self.imageDisplayTabs.addTab(self.imageArrayDisplayWindow.widget, "Multichannel Acquisition") + + if SHOW_TILED_PREVIEW: + if USE_NAPARI_FOR_TILED_DISPLAY: + self.napariTiledDisplayWidget = widgets.NapariTiledDisplayWidget(self.objectiveStore, self.contrastManager) + self.imageDisplayTabs.addTab(self.napariTiledDisplayWidget, "Tiled Preview") + else: + self.imageDisplayWindow_scan_preview = core.ImageDisplayWindow(draw_crosshairs=True) + self.imageDisplayTabs.addTab(self.imageDisplayWindow_scan_preview.widget, "Tiled Preview") + + if USE_NAPARI_FOR_MOSAIC_DISPLAY: + self.napariMosaicDisplayWidget = widgets.NapariMosaicDisplayWidget(self.objectiveStore, self.contrastManager) + self.imageDisplayTabs.addTab(self.napariMosaicDisplayWidget, "Mosaic View") + + if SUPPORT_LASER_AUTOFOCUS: + dock_laserfocus_image_display = dock.Dock('Focus Camera Image Display', autoOrientation=False) + dock_laserfocus_image_display.showTitleBar() + dock_laserfocus_image_display.addWidget(self.imageDisplayWindow_focus.widget) + dock_laserfocus_image_display.setStretch(x=100, y=100) + + dock_laserfocus_liveController = dock.Dock('Focus Camera Controller', autoOrientation=False) + dock_laserfocus_liveController.showTitleBar() + dock_laserfocus_liveController.addWidget(self.liveControlWidget_focus_camera) + dock_laserfocus_liveController.setStretch(x=100, y=100) + dock_laserfocus_liveController.setFixedWidth(self.liveControlWidget_focus_camera.minimumSizeHint().width()) + + dock_waveform = dock.Dock('Displacement Measurement', autoOrientation=False) + dock_waveform.showTitleBar() + dock_waveform.addWidget(self.waveformDisplay) + dock_waveform.setStretch(x=100, y=40) + + dock_displayMeasurement = dock.Dock('Displacement Measurement Control', autoOrientation=False) + dock_displayMeasurement.showTitleBar() + dock_displayMeasurement.addWidget(self.displacementMeasurementWidget) + dock_displayMeasurement.setStretch(x=100, y=40) + dock_displayMeasurement.setFixedWidth(self.displacementMeasurementWidget.minimumSizeHint().width()) + + laserfocus_dockArea = dock.DockArea() + laserfocus_dockArea.addDock(dock_laserfocus_image_display) + laserfocus_dockArea.addDock(dock_laserfocus_liveController, 'right', relativeTo=dock_laserfocus_image_display) + if SHOW_LEGACY_DISPLACEMENT_MEASUREMENT_WINDOWS: + laserfocus_dockArea.addDock(dock_waveform, 'bottom', relativeTo=dock_laserfocus_liveController) + laserfocus_dockArea.addDock(dock_displayMeasurement, 'bottom', relativeTo=dock_waveform) + + self.imageDisplayTabs.addTab(laserfocus_dockArea, "Laser-Based Focus") + def setupRecordTabWidget(self): + if ENABLE_WELLPLATE_MULTIPOINT: + self.recordTabWidget.addTab(self.wellplateMultiPointWidget, "Wellplate Multipoint") if ENABLE_FLEXIBLE_MULTIPOINT: - self.recordTabWidget.addTab(self.multiPointWidget2, "Flexible Multipoint") + self.recordTabWidget.addTab(self.flexibleMultiPointWidget, "Flexible Multipoint") + if ENABLE_TRACKING: + self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") + if ENABLE_RECORDING: + self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") + self.recordTabWidget.currentChanged.connect(lambda: self.resizeCurrentTab(self.recordTabWidget)) + self.resizeCurrentTab(self.recordTabWidget) + + def setupCameraTabWidget(self): + if not USE_NAPARI_FOR_LIVE_CONTROL or self.live_only_mode: + self.cameraTabWidget.addTab(self.navigationWidget, "Stages") + if ENABLE_OBJECTIVE_PIEZO: + self.cameraTabWidget.addTab(self.piezoWidget, "Piezo") + if ENABLE_NL5: + self.cameraTabWidget.addTab(self.nl5Wdiget, "NL5") if ENABLE_SPINNING_DISK_CONFOCAL: - self.recordTabWidget.addTab(self.spinningDiskConfocalWidget,"Spinning Disk Confocal") + self.cameraTabWidget.addTab(self.spinningDiskConfocalWidget, "Confocal") + if USE_ZABER_EMISSION_FILTER_WHEEL or USE_OPTOSPIN_EMISSION_FILTER_WHEEL: + self.cameraTabWidget.addTab(self.filterControllerWidget, "Emission Filter") + self.cameraTabWidget.addTab(self.cameraSettingWidget, 'Camera') + self.cameraTabWidget.addTab(self.autofocusWidget, "Contrast AF") + if SUPPORT_LASER_AUTOFOCUS: + self.cameraTabWidget.addTab(self.laserAutofocusControlWidget, "Laser AF") + self.cameraTabWidget.currentChanged.connect(lambda: self.resizeCurrentTab(self.cameraTabWidget)) + self.resizeCurrentTab(self.cameraTabWidget) + + def setupLayout(self): + layout = QVBoxLayout() + + if USE_NAPARI_FOR_LIVE_CONTROL and not self.live_only_mode: + layout.addWidget(self.navigationWidget) + else: + layout.addWidget(self.liveControlWidget) + + layout.addWidget(self.cameraTabWidget) - # layout widgets - layout = QVBoxLayout() #layout = QStackedLayout() - #layout.addWidget(self.cameraSettingWidget) - layout.addWidget(self.liveControlWidget) - layout.addWidget(self.navigationWidget) if SHOW_DAC_CONTROL: layout.addWidget(self.dacControlWidget) - layout.addWidget(self.autofocusWidget) + layout.addWidget(self.recordTabWidget) + + if ENABLE_STITCHER: + layout.addWidget(self.stitcherWidget) + self.stitcherWidget.hide() + + layout.addWidget(self.sampleSettingsWidget) layout.addWidget(self.navigationViewer) - layout.addStretch() - # transfer the layout to the central widget + # Add performance mode toggle button + if not self.live_only_mode: + self.performanceModeToggle = QPushButton("Enable Performance Mode") + self.performanceModeToggle.setCheckable(True) + self.performanceModeToggle.setChecked(self.performance_mode) + self.performanceModeToggle.clicked.connect(self.togglePerformanceMode) + layout.addWidget(self.performanceModeToggle) + self.centralWidget = QWidget() self.centralWidget.setLayout(layout) - # self.centralWidget.setFixedSize(self.centralWidget.minimumSize()) - # self.centralWidget.setFixedWidth(self.centralWidget.minimumWidth()) - # self.centralWidget.setMaximumWidth(self.centralWidget.minimumWidth()) self.centralWidget.setFixedWidth(self.centralWidget.minimumSizeHint().width()) if SINGLE_WINDOW: - dock_display = dock.Dock('Image Display', autoOrientation = False) - dock_display.showTitleBar() - dock_display.addWidget(self.imageDisplayTabs) - dock_display.setStretch(x=100,y=100) - dock_wellSelection = dock.Dock('Well Selector', autoOrientation = False) - dock_wellSelection.showTitleBar() - dock_wellSelection.addWidget(self.wellSelectionWidget) - dock_wellSelection.setFixedHeight(dock_wellSelection.minimumSizeHint().height()) - dock_controlPanel = dock.Dock('Controls', autoOrientation = False) - # dock_controlPanel.showTitleBar() - dock_controlPanel.addWidget(self.centralWidget) - dock_controlPanel.setStretch(x=1,y=None) - dock_controlPanel.setFixedWidth(dock_controlPanel.minimumSizeHint().width()) - main_dockArea = dock.DockArea() - main_dockArea.addDock(dock_display) - main_dockArea.addDock(dock_wellSelection,'bottom') - main_dockArea.addDock(dock_controlPanel,'right') - self.setCentralWidget(main_dockArea) - desktopWidget = QDesktopWidget() - height_min = 0.9*desktopWidget.height() - width_min = 0.96*desktopWidget.width() - self.setMinimumSize(int(width_min),int(height_min)) + self.setupSingleWindowLayout() else: - self.setCentralWidget(self.centralWidget) - self.tabbedImageDisplayWindow = QMainWindow() - self.tabbedImageDisplayWindow.setCentralWidget(self.imageDisplayTabs) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() | Qt.CustomizeWindowHint) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() & ~Qt.WindowCloseButtonHint) - desktopWidget = QDesktopWidget() - width = 0.96*desktopWidget.height() - height = width - self.tabbedImageDisplayWindow.setFixedSize(width,height) - self.tabbedImageDisplayWindow.show() - - try: - self.cswWindow = widgets.WrapperWindow(self.cameraSettingWidget) - except AttributeError: - pass - - try: - self.cswfcWindow = widgets.WrapperWindow(self.cameraSettingWidget_focus_camera) - except AttributeError: - pass - - # make connections + self.setupMultiWindowLayout() + + def setupSingleWindowLayout(self): + main_dockArea = dock.DockArea() + + dock_display = dock.Dock('Image Display', autoOrientation=False) + dock_display.showTitleBar() + dock_display.addWidget(self.imageDisplayTabs) + if SHOW_NAVIGATION_BAR: + dock_display.addWidget(self.navigationBarWidget) + dock_display.setStretch(x=100, y=100) + main_dockArea.addDock(dock_display) + + self.dock_wellSelection = dock.Dock('Well Selector', autoOrientation=False) + self.dock_wellSelection.showTitleBar() + if not USE_NAPARI_WELL_SELECTION or self.live_only_mode: + self.dock_wellSelection.addWidget(self.wellSelectionWidget) + self.dock_wellSelection.setFixedHeight(self.dock_wellSelection.minimumSizeHint().height()) + main_dockArea.addDock(self.dock_wellSelection, 'bottom') + + dock_controlPanel = dock.Dock('Controls', autoOrientation=False) + dock_controlPanel.addWidget(self.centralWidget) + dock_controlPanel.setStretch(x=1, y=None) + dock_controlPanel.setFixedWidth(dock_controlPanel.minimumSizeHint().width()) + main_dockArea.addDock(dock_controlPanel, 'right') + self.setCentralWidget(main_dockArea) + + desktopWidget = QDesktopWidget() + height_min = 0.9 * desktopWidget.height() + width_min = 0.96 * desktopWidget.width() + self.setMinimumSize(int(width_min), int(height_min)) + self.onTabChanged(self.recordTabWidget.currentIndex()) + + def setupMultiWindowLayout(self): + self.setCentralWidget(self.centralWidget) + self.tabbedImageDisplayWindow = QMainWindow() + self.tabbedImageDisplayWindow.setCentralWidget(self.imageDisplayTabs) + self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() | Qt.CustomizeWindowHint) + self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() & ~Qt.WindowCloseButtonHint) + desktopWidget = QDesktopWidget() + width = 0.96 * desktopWidget.height() + height = width + self.tabbedImageDisplayWindow.setFixedSize(int(width), int(height)) + self.tabbedImageDisplayWindow.show() + + def makeConnections(self): self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) # self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(lambda x:self.navigationWidget.label_Xpos.setText("{:.2f}".format(x))) - self.navigationController.yPos.connect(lambda x:self.navigationWidget.label_Ypos.setText("{:.2f}".format(x))) - self.navigationController.zPos.connect(lambda x:self.navigationWidget.label_Zpos.setText("{:.2f}".format(x))) + self.navigationController.xPos.connect(lambda x: self.navigationWidget.label_Xpos.setText("{:.2f}".format(x) + " mm")) + self.navigationController.yPos.connect(lambda x: self.navigationWidget.label_Ypos.setText("{:.2f}".format(x) + " mm")) + self.navigationController.zPos.connect(lambda x: self.navigationWidget.label_Zpos.setText("{:.2f}".format(x) + " μm")) + + if SHOW_NAVIGATION_BAR: + self.navigationController.xPos.connect(self.navigationBarWidget.update_x_position) + self.navigationController.yPos.connect(self.navigationBarWidget.update_y_position) + self.navigationController.zPos.connect(self.navigationBarWidget.update_z_position) + if ENABLE_TRACKING: self.navigationController.signal_joystick_button_pressed.connect(self.trackingControlWidget.slot_joystick_button_pressed) else: self.navigationController.signal_joystick_button_pressed.connect(self.autofocusController.autofocus) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) + + if ENABLE_STITCHER: + self.multipointController.signal_stitcher.connect(self.startStitcher) + + if ENABLE_FLEXIBLE_MULTIPOINT: + self.flexibleMultiPointWidget.signal_acquisition_started.connect(self.toggleAcquisitionStart) + if ENABLE_STITCHER: + self.flexibleMultiPointWidget.signal_stitcher_widget.connect(self.toggleStitcherWidget) + self.flexibleMultiPointWidget.signal_acquisition_channels.connect(self.stitcherWidget.updateRegistrationChannels) + self.flexibleMultiPointWidget.signal_stitcher_z_levels.connect(self.stitcherWidget.updateRegistrationZLevels) + + if ENABLE_WELLPLATE_MULTIPOINT: + self.wellplateMultiPointWidget.signal_acquisition_started.connect(self.toggleAcquisitionStart) + if ENABLE_STITCHER: + self.wellplateMultiPointWidget.signal_stitcher_widget.connect(self.toggleStitcherWidget) + self.wellplateMultiPointWidget.signal_acquisition_channels.connect(self.stitcherWidget.updateRegistrationChannels) + self.wellplateMultiPointWidget.signal_stitcher_z_levels.connect(self.stitcherWidget.updateRegistrationZLevels) self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) + if not self.live_only_mode: + self.liveControlWidget.signal_start_live.connect(self.onStartLive) self.liveControlWidget.update_camera_settings() - self.liveControlWidget.signal_autoLevelSetting.connect(self.imageDisplayWindow.set_autolevel) - # load vs scan position switching self.slidePositionController.signal_slide_loading_position_reached.connect(self.navigationWidget.slot_slide_loading_position_reached) - self.slidePositionController.signal_slide_loading_position_reached.connect(self.multiPointWidget.disable_the_start_aquisition_button) + #self.slidePositionController.signal_slide_loading_position_reached.connect(self.multiPointWidget.disable_the_start_aquisition_button) self.slidePositionController.signal_slide_scanning_position_reached.connect(self.navigationWidget.slot_slide_scanning_position_reached) - self.slidePositionController.signal_slide_scanning_position_reached.connect(self.multiPointWidget.enable_the_start_aquisition_button) + #self.slidePositionController.signal_slide_scanning_position_reached.connect(self.multiPointWidget.enable_the_start_aquisition_button) self.slidePositionController.signal_clear_slide.connect(self.navigationViewer.clear_slide) - - # display the FOV in the viewer + self.navigationViewer.signal_coordinates_clicked.connect(self.navigationController.move_from_click_mosaic) + self.objectivesWidget.signal_objective_changed.connect(self.navigationViewer.on_objective_changed) + if ENABLE_FLEXIBLE_MULTIPOINT: + self.objectivesWidget.signal_objective_changed.connect(self.flexibleMultiPointWidget.update_fov_positions) self.navigationController.xyPos.connect(self.navigationViewer.update_current_location) self.multipointController.signal_register_current_fov.connect(self.navigationViewer.register_fov) + self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) + self.multipointController.signal_z_piezo_um.connect(self.piezoWidget.update_displacement_um_display) + self.wellplateMultiPointWidget.signal_z_stacking.connect(self.multipointController.set_z_stacking_config) + + self.recordTabWidget.currentChanged.connect(self.onTabChanged) + if not self.live_only_mode: + self.imageDisplayTabs.currentChanged.connect(self.onDisplayTabChanged) + + if USE_NAPARI_FOR_LIVE_VIEW and not self.live_only_mode: + self.multipointController.signal_current_configuration.connect(self.napariLiveWidget.set_microscope_mode) + self.autofocusController.image_to_display.connect(lambda image: self.napariLiveWidget.updateLiveLayer(image, from_autofocus=True)) + self.streamHandler.image_to_display.connect(lambda image: self.napariLiveWidget.updateLiveLayer(image, from_autofocus=False)) + self.multipointController.image_to_display.connect(lambda image: self.napariLiveWidget.updateLiveLayer(image, from_autofocus=False)) + self.napariLiveWidget.signal_coordinates_clicked.connect(self.navigationController.move_from_click) + self.liveControlWidget.signal_live_configuration.connect(self.napariLiveWidget.set_live_configuration) + + if USE_NAPARI_FOR_LIVE_CONTROL: + self.napariLiveWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) + self.napariLiveWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) + self.napariLiveWidget.signal_autoLevelSetting.connect(self.imageDisplayWindow.set_autolevel) + else: + self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) + self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) + self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) + self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) + self.liveControlWidget.signal_autoLevelSetting.connect(self.imageDisplayWindow.set_autolevel) + self.imageDisplayWindow.image_click_coordinates.connect(self.navigationController.move_from_click) - # (double) click to move to a well - self.wellSelectionWidget.signal_wellSelectedPos.connect(self.navigationController.move_to) - - # camera - self.camera.set_callback(self.streamHandler.on_new_frame) - - # laser autofocus - if SUPPORT_LASER_AUTOFOCUS: - - # controllers - self.configurationManager_focus_camera = core.ConfigurationManager(filename='./focus_camera_configurations.xml') - self.streamHandler_focus_camera = core.StreamHandler() - self.liveController_focus_camera = core.LiveController(self.camera_focus,self.microcontroller,self.configurationManager_focus_camera,control_illumination=False,for_displacement_measurement=True) - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager,scanCoordinates=self.scanCoordinates,parent=self) - self.imageDisplayWindow_focus = core.ImageDisplayWindow(draw_crosshairs=True) - self.displacementMeasurementController = core_displacement_measurement.DisplacementMeasurementController() - self.laserAutofocusController = core.LaserAutofocusController(self.microcontroller,self.camera_focus,self.liveController_focus_camera,self.navigationController,has_two_interfaces=HAS_TWO_INTERFACES,use_glass_top=USE_GLASS_TOP,look_for_cache=False) - - # camera - self.camera_focus.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera_focus.set_callback(self.streamHandler_focus_camera.on_new_frame) - self.camera_focus.enable_callback() - self.camera_focus.start_streaming() - - # widgets - if FOCUS_CAMERA_TYPE == "Toupcam": - self.cameraSettingWidget_focus_camera = widgets.CameraSettingsWidget(self.camera_focus,include_gain_exposure_time=True, include_camera_temperature_setting = True) - else: - self.cameraSettingWidget_focus_camera = widgets.CameraSettingsWidget(self.camera_focus, include_gain_exposure_time=True, include_camera_temperature_setting=False) - - self.liveControlWidget_focus_camera = widgets.LiveControlWidget(self.streamHandler_focus_camera,self.liveController_focus_camera,self.configurationManager_focus_camera,show_display_options=True) - self.waveformDisplay = widgets.WaveformDisplay(N=1000,include_x=True,include_y=False) - self.displacementMeasurementWidget = widgets.DisplacementMeasurementWidget(self.displacementMeasurementController,self.waveformDisplay) - self.laserAutofocusControlWidget = widgets.LaserAutofocusControlWidget(self.laserAutofocusController) - - self.recordTabWidget.addTab(self.laserAutofocusControlWidget, "Laser Autofocus Control") - - dock_laserfocus_image_display = dock.Dock('Focus Camera Image Display', autoOrientation = False) - dock_laserfocus_image_display.showTitleBar() - dock_laserfocus_image_display.addWidget(self.imageDisplayWindow_focus.widget) - dock_laserfocus_image_display.setStretch(x=100,y=100) - - dock_laserfocus_liveController = dock.Dock('Focus Camera Controller', autoOrientation = False) - dock_laserfocus_liveController.showTitleBar() - dock_laserfocus_liveController.addWidget(self.liveControlWidget_focus_camera) - dock_laserfocus_liveController.setStretch(x=100,y=100) - # dock_laserfocus_liveController.setFixedHeight(self.liveControlWidget_focus_camera.minimumSizeHint().height()) - dock_laserfocus_liveController.setFixedWidth(self.liveControlWidget_focus_camera.minimumSizeHint().width()) - - dock_waveform = dock.Dock('Displacement Measurement', autoOrientation = False) - dock_waveform.showTitleBar() - dock_waveform.addWidget(self.waveformDisplay) - dock_waveform.setStretch(x=100,y=40) - - dock_displayMeasurement = dock.Dock('Displacement Measurement Control', autoOrientation = False) - dock_displayMeasurement.showTitleBar() - dock_displayMeasurement.addWidget(self.displacementMeasurementWidget) - dock_displayMeasurement.setStretch(x=100,y=40) - dock_displayMeasurement.setFixedWidth(self.displacementMeasurementWidget.minimumSizeHint().width()) + self.makeNapariConnections() - laserfocus_dockArea = dock.DockArea() - laserfocus_dockArea.addDock(dock_laserfocus_image_display) - laserfocus_dockArea.addDock(dock_laserfocus_liveController,'right',relativeTo=dock_laserfocus_image_display) - if SHOW_LEGACY_DISPLACEMENT_MEASUREMENT_WINDOWS: - laserfocus_dockArea.addDock(dock_waveform,'bottom',relativeTo=dock_laserfocus_liveController) - laserfocus_dockArea.addDock(dock_displayMeasurement,'bottom',relativeTo=dock_waveform) + self.wellplateFormatWidget.signalWellplateSettings.connect(self.navigationViewer.update_wellplate_settings) + self.wellplateFormatWidget.signalWellplateSettings.connect(self.scanCoordinates.update_wellplate_settings) + self.wellplateFormatWidget.signalWellplateSettings.connect(lambda format_, *args: self.onWellplateChanged(format_)) - # self.imageDisplayWindow_focus.widget - self.imageDisplayTabs.addTab(laserfocus_dockArea,"Laser-based Focus") + self.wellSelectionWidget.signal_wellSelectedPos.connect(self.navigationController.move_to) + #self.wellSelectionWidget.signal_wellSelected.connect(self.multiPointWidget.set_well_selected) + if ENABLE_WELLPLATE_MULTIPOINT: + self.wellSelectionWidget.signal_wellSelected.connect(self.wellplateMultiPointWidget.set_well_coordinates) + self.objectivesWidget.signal_objective_changed.connect(self.wellplateMultiPointWidget.update_coordinates) + self.wellplateMultiPointWidget.signal_update_navigation_viewer.connect(self.navigationViewer.update_current_location) - # connections + if SUPPORT_LASER_AUTOFOCUS: self.liveControlWidget_focus_camera.signal_newExposureTime.connect(self.cameraSettingWidget_focus_camera.set_exposure_time) self.liveControlWidget_focus_camera.signal_newAnalogGain.connect(self.cameraSettingWidget_focus_camera.set_analog_gain) self.liveControlWidget_focus_camera.update_camera_settings() @@ -488,41 +719,365 @@ def __init__(self, is_simulation = False, *args, **kwargs): self.displacementMeasurementController.signal_readings.connect(self.displacementMeasurementWidget.display_readings) self.laserAutofocusController.image_to_display.connect(self.imageDisplayWindow_focus.display_image) - self.imageDisplayWindow.image_click_coordinates.connect(self.navigationController.move_from_click) + self.camera.set_callback(self.streamHandler.on_new_frame) - self.navigationController.move_to_cached_position() + def makeNapariConnections(self): + """Initialize all Napari connections in one place""" + self.napari_connections = { + 'napariLiveWidget': [], + 'napariMultiChannelWidget': [], + 'napariTiledDisplayWidget': [], + 'napariMosaicDisplayWidget': [] + } + + # Setup live view connections + if USE_NAPARI_FOR_LIVE_VIEW and not self.live_only_mode: + self.napari_connections['napariLiveWidget'] = [ + (self.multipointController.signal_current_configuration, self.napariLiveWidget.set_microscope_mode), + (self.autofocusController.image_to_display, + lambda image: self.napariLiveWidget.updateLiveLayer(image, from_autofocus=True)), + (self.streamHandler.image_to_display, + lambda image: self.napariLiveWidget.updateLiveLayer(image, from_autofocus=False)), + (self.multipointController.image_to_display, + lambda image: self.napariLiveWidget.updateLiveLayer(image, from_autofocus=False)), + (self.napariLiveWidget.signal_coordinates_clicked, self.navigationController.move_from_click), + (self.liveControlWidget.signal_live_configuration, self.napariLiveWidget.set_live_configuration) + ] + + if USE_NAPARI_FOR_LIVE_CONTROL: + self.napari_connections['napariLiveWidget'].extend([ + (self.napariLiveWidget.signal_newExposureTime, self.cameraSettingWidget.set_exposure_time), + (self.napariLiveWidget.signal_newAnalogGain, self.cameraSettingWidget.set_analog_gain), + (self.napariLiveWidget.signal_autoLevelSetting, self.imageDisplayWindow.set_autolevel) + ]) + else: + # Non-Napari display connections + self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) + self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) + self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) + self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) + self.liveControlWidget.signal_autoLevelSetting.connect(self.imageDisplayWindow.set_autolevel) + self.imageDisplayWindow.image_click_coordinates.connect(self.navigationController.move_from_click) + + if not self.live_only_mode: + # Setup multichannel widget connections + if USE_NAPARI_FOR_MULTIPOINT: + self.napari_connections['napariMultiChannelWidget'] = [ + (self.multipointController.napari_layers_init, self.napariMultiChannelWidget.initLayers), + (self.multipointController.napari_layers_update, self.napariMultiChannelWidget.updateLayers) + ] + + if ENABLE_FLEXIBLE_MULTIPOINT: + self.napari_connections['napariMultiChannelWidget'].extend([ + (self.flexibleMultiPointWidget.signal_acquisition_channels, self.napariMultiChannelWidget.initChannels), + (self.flexibleMultiPointWidget.signal_acquisition_shape, self.napariMultiChannelWidget.initLayersShape) + ]) + + if ENABLE_WELLPLATE_MULTIPOINT: + self.napari_connections['napariMultiChannelWidget'].extend([ + (self.wellplateMultiPointWidget.signal_acquisition_channels, self.napariMultiChannelWidget.initChannels), + (self.wellplateMultiPointWidget.signal_acquisition_shape, self.napariMultiChannelWidget.initLayersShape) + ]) + else: + self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) + + # Setup tiled display widget connections + if SHOW_TILED_PREVIEW and USE_NAPARI_FOR_TILED_DISPLAY: + self.napari_connections['napariTiledDisplayWidget'] = [ + (self.multipointController.napari_layers_init, self.napariTiledDisplayWidget.initLayers), + (self.multipointController.napari_layers_update, self.napariTiledDisplayWidget.updateLayers), + (self.napariTiledDisplayWidget.signal_coordinates_clicked, + self.navigationController.scan_preview_move_from_click) + ] + + if ENABLE_FLEXIBLE_MULTIPOINT: + self.napari_connections['napariTiledDisplayWidget'].extend([ + (self.flexibleMultiPointWidget.signal_acquisition_channels, self.napariTiledDisplayWidget.initChannels), + (self.flexibleMultiPointWidget.signal_acquisition_shape, self.napariTiledDisplayWidget.initLayersShape) + ]) + + if ENABLE_WELLPLATE_MULTIPOINT: + self.napari_connections['napariTiledDisplayWidget'].extend([ + (self.wellplateMultiPointWidget.signal_acquisition_channels, self.napariTiledDisplayWidget.initChannels), + (self.wellplateMultiPointWidget.signal_acquisition_shape, self.napariTiledDisplayWidget.initLayersShape) + ]) + + # Setup mosaic display widget connections + if USE_NAPARI_FOR_MOSAIC_DISPLAY: + self.napari_connections['napariMosaicDisplayWidget'] = [ + (self.multipointController.napari_mosaic_update, self.napariMosaicDisplayWidget.updateMosaic), + (self.napariMosaicDisplayWidget.signal_coordinates_clicked, + self.navigationController.move_from_click_mosaic), + (self.napariMosaicDisplayWidget.signal_update_viewer, self.navigationViewer.update_slide) + ] + + if ENABLE_FLEXIBLE_MULTIPOINT: + self.napari_connections['napariMosaicDisplayWidget'].extend([ + (self.flexibleMultiPointWidget.signal_acquisition_channels, self.napariMosaicDisplayWidget.initChannels), + (self.flexibleMultiPointWidget.signal_acquisition_shape, self.napariMosaicDisplayWidget.initLayersShape) + ]) + + if ENABLE_WELLPLATE_MULTIPOINT: + self.napari_connections['napariMosaicDisplayWidget'].extend([ + (self.wellplateMultiPointWidget.signal_acquisition_channels, self.napariMosaicDisplayWidget.initChannels), + (self.wellplateMultiPointWidget.signal_acquisition_shape, self.napariMosaicDisplayWidget.initLayersShape), + (self.wellplateMultiPointWidget.signal_draw_shape, self.napariMosaicDisplayWidget.enable_shape_drawing), + (self.napariMosaicDisplayWidget.signal_shape_drawn, self.wellplateMultiPointWidget.update_manual_shape) + ]) + + # Make initial connections + self.updateNapariConnections() + + def updateNapariConnections(self): + # Update Napari connections based on performance mode. Live widget connections are preserved + for widget_name, connections in self.napari_connections.items(): + if widget_name != 'napariLiveWidget': # Always keep the live widget connected + widget = getattr(self, widget_name, None) + if widget: + for signal, slot in connections: + if self.performance_mode: + try: + signal.disconnect(slot) + except TypeError: + # Connection might not exist, which is fine + pass + else: + try: + signal.connect(slot) + except TypeError: + # Connection might already exist, which is fine + pass + + def toggleNapariTabs(self): + # Enable/disable Napari tabs based on performance mode + for i in range(1, self.imageDisplayTabs.count()): + widget = self.imageDisplayTabs.widget(i) + self.imageDisplayTabs.setTabEnabled(i, not self.performance_mode) + + if self.performance_mode: + # Switch to the NapariLiveWidget tab if it exists + for i in range(self.imageDisplayTabs.count()): + if isinstance(self.imageDisplayTabs.widget(i), widgets.NapariLiveWidget): + self.imageDisplayTabs.setCurrentIndex(i) + break + + def togglePerformanceMode(self): + self.performance_mode = self.performanceModeToggle.isChecked() + button_txt = "Disable" if self.performance_mode else "Enable" + self.performanceModeToggle.setText(button_txt + " Performance Mode") + self.updateNapariConnections() + self.toggleNapariTabs() + print(f"Performance mode {'enabled' if self.performance_mode else 'disabled'}") + + def openLedMatrixSettings(self): + if SUPPORT_SCIMICROSCOPY_LED_ARRAY: + dialog = widgets.LedMatrixSettingsDialog(self.liveController.led_array) + dialog.exec_() + + def onTabChanged(self, index): + acquisitionWidget = self.recordTabWidget.widget(index) + is_flexible = (index == self.recordTabWidget.indexOf(self.flexibleMultiPointWidget)) + is_scan_grid = (index == self.recordTabWidget.indexOf(self.wellplateMultiPointWidget)) if ENABLE_WELLPLATE_MULTIPOINT else False + self.toggleWellSelector(is_scan_grid and self.wellSelectionWidget.format != 'glass slide') + + if is_scan_grid: + self.navigationViewer.clear_overlay() + self.wellSelectionWidget.onSelectionChanged() + else: + self.wellplateMultiPointWidget.clear_regions() + + if is_flexible: + self.flexibleMultiPointWidget.update_fov_positions() + + if ENABLE_STITCHER: + self.toggleStitcherWidget(acquisitionWidget.checkbox_stitchOutput.isChecked()) + acquisitionWidget.emit_selected_channels() + + + def resizeCurrentTab(self, tabWidget): + current_widget = tabWidget.currentWidget() + if current_widget: + total_height = current_widget.sizeHint().height() + tabWidget.tabBar().height() + tabWidget.resize(tabWidget.width(), total_height) + tabWidget.setMaximumHeight(total_height) + tabWidget.updateGeometry() + self.updateGeometry() + + def onDisplayTabChanged(self, index): + current_widget = self.imageDisplayTabs.widget(index) + if hasattr(current_widget, 'viewer'): + current_widget.activate() + + def onWellplateChanged(self, format_): + if isinstance(format_, QVariant): + format_ = format_.value() + + if format_ == 'glass slide': + self.toggleWellSelector(False) + self.multipointController.inverted_objective = False + self.navigationController.inverted_objective = False + self.setupSlidePositionController(is_for_wellplate=False) + else: + self.toggleWellSelector(True) + self.multipointController.inverted_objective = True + self.navigationController.inverted_objective = True + self.setupSlidePositionController(is_for_wellplate=True) - def closeEvent(self, event): + if format_ == '1536 well plate': + self.replaceWellSelectionWidget(widgets.Well1536SelectionWidget()) + elif isinstance(self.wellSelectionWidget, widgets.Well1536SelectionWidget): + self.replaceWellSelectionWidget(widgets.WellSelectionWidget(format_, self.wellplateFormatWidget)) + self.connectWellSelectionWidget() + + if ENABLE_FLEXIBLE_MULTIPOINT: + self.flexibleMultiPointWidget.clear_only_location_list() + if ENABLE_WELLPLATE_MULTIPOINT: + self.wellplateMultiPointWidget.clear_regions() + self.wellplateMultiPointWidget.set_default_scan_size() + self.wellSelectionWidget.onSelectionChanged() + + def setupSlidePositionController(self, is_for_wellplate): + self.slidePositionController.setParent(None) + self.slidePositionController.deleteLater() + self.slidePositionController = core.SlidePositionController(self.navigationController, self.liveController, is_for_wellplate=is_for_wellplate) + self.connectSlidePositionController() + + def connectSlidePositionController(self): + self.slidePositionController.signal_slide_loading_position_reached.connect(self.navigationWidget.slot_slide_loading_position_reached) + #self.slidePositionController.signal_slide_loading_position_reached.connect(self.multiPointWidget.disable_the_start_aquisition_button) + self.slidePositionController.signal_slide_scanning_position_reached.connect(self.navigationWidget.slot_slide_scanning_position_reached) + #self.slidePositionController.signal_slide_scanning_position_reached.connect(self.multiPointWidget.enable_the_start_aquisition_button) + self.slidePositionController.signal_clear_slide.connect(self.navigationViewer.clear_slide) + if SHOW_NAVIGATION_BAR: + self.navigationBarWidget.replace_slide_controller(self.slidePositionController) + self.navigationWidget.replace_slide_controller(self.slidePositionController) + + def replaceWellSelectionWidget(self, new_widget): + self.wellSelectionWidget.setParent(None) + self.wellSelectionWidget.deleteLater() + self.wellSelectionWidget = new_widget + self.scanCoordinates.add_well_selector(self.wellSelectionWidget) + if USE_NAPARI_WELL_SELECTION and not self.performance_mode and not self.live_only_mode: + self.napariLiveWidget.replace_well_selector(self.wellSelectionWidget) + else: + self.dock_wellSelection.addWidget(self.wellSelectionWidget) + + def connectWellSelectionWidget(self): + #self.wellSelectionWidget.signal_wellSelected.connect(self.multiPointWidget.set_well_selected) + self.wellSelectionWidget.signal_wellSelectedPos.connect(self.navigationController.move_to) + self.wellplateFormatWidget.signalWellplateSettings.connect(self.wellSelectionWidget.updateWellplateSettings) + if ENABLE_WELLPLATE_MULTIPOINT: + self.wellSelectionWidget.signal_wellSelected.connect(self.wellplateMultiPointWidget.set_well_coordinates) + + def toggleWellSelector(self, show): + if USE_NAPARI_WELL_SELECTION and not self.performance_mode and not self.live_only_mode: + self.napariLiveWidget.toggle_well_selector(show) + else: + self.dock_wellSelection.setVisible(show) + self.wellSelectionWidget.setVisible(show) + + def toggleAcquisitionStart(self, acquisition_started): + self.navigationWidget.toggle_click_to_move(acquisition_started) + current_index = self.recordTabWidget.currentIndex() + for index in range(self.recordTabWidget.count()): + self.recordTabWidget.setTabEnabled(index, not acquisition_started or index == current_index) + if acquisition_started: + self.liveControlWidget.toggle_autolevel(not acquisition_started) + + is_scan_grid = (current_index == self.recordTabWidget.indexOf(self.wellplateMultiPointWidget)) if ENABLE_WELLPLATE_MULTIPOINT else False + if is_scan_grid and self.wellSelectionWidget.format != 'glass slide': + self.toggleWellSelector(not acquisition_started) + + self.recordTabWidget.currentWidget().display_progress_bar(acquisition_started) + self.navigationViewer.on_acquisition_start(acquisition_started) + def toggleStitcherWidget(self, checked): + if checked: + self.stitcherWidget.show() + else: + self.stitcherWidget.hide() + + def onStartLive(self): + self.imageDisplayTabs.setCurrentIndex(0) + + def startStitcher(self, acquisition_path): + acquisitionWidget = self.recordTabWidget.currentWidget() + if acquisitionWidget.checkbox_stitchOutput.isChecked(): + apply_flatfield = self.stitcherWidget.applyFlatfieldCheck.isChecked() + use_registration = self.stitcherWidget.useRegistrationCheck.isChecked() + registration_channel = self.stitcherWidget.registrationChannelCombo.currentText() + registration_z_level = self.stitcherWidget.registrationZCombo.value() + overlap_percent = self.wellplateMultiPointWidget.entry_overlap.value() + output_name = acquisitionWidget.lineEdit_experimentID.text() or "stitched" + output_format = ".ome.zarr" if self.stitcherWidget.outputFormatCombo.currentText() == "OME-ZARR" else ".ome.tiff" + + stitcher_class = stitcher.CoordinateStitcher if self.recordTabWidget.currentIndex() == self.recordTabWidget.indexOf(self.wellplateMultiPointWidget) else stitcher.Stitcher + self.stitcherThread = stitcher_class( + input_folder=acquisition_path, + output_name=output_name, + output_format=output_format, + apply_flatfield=apply_flatfield, + overlap_percent=overlap_percent, + use_registration=use_registration, + registration_channel=registration_channel, + registration_z_level=registration_z_level + ) + + self.stitcherWidget.setStitcherThread(self.stitcherThread) + self.connectStitcherSignals() + self.stitcherThread.start() + + def connectStitcherSignals(self): + self.stitcherThread.update_progress.connect(self.stitcherWidget.updateProgressBar) + self.stitcherThread.getting_flatfields.connect(self.stitcherWidget.gettingFlatfields) + self.stitcherThread.starting_stitching.connect(self.stitcherWidget.startingStitching) + self.stitcherThread.starting_saving.connect(self.stitcherWidget.startingSaving) + self.stitcherThread.finished_saving.connect(self.stitcherWidget.finishedSaving) + + def closeEvent(self, event): self.navigationController.cache_current_position() - # move the objective to a defined position upon exit - self.navigationController.move_x(0.1) # temporary bug fix - move_x needs to be called before move_x_to if the stage has been moved by the joystick - while self.microcontroller.is_busy(): - time.sleep(0.005) - self.navigationController.move_x_to(30) - while self.microcontroller.is_busy(): - time.sleep(0.005) - self.navigationController.move_y(0.1) # temporary bug fix - move_y needs to be called before move_y_to if the stage has been moved by the joystick - while self.microcontroller.is_busy(): - time.sleep(0.005) - self.navigationController.move_y_to(30) - while self.microcontroller.is_busy(): - time.sleep(0.005) + if USE_ZABER_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel.set_emission_filter(1) + if USE_OPTOSPIN_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel.set_emission_filter(1) + self.emission_filter_wheel.close() + if ENABLE_STITCHER: + self.stitcherWidget.closeEvent(event) + if SUPPORT_LASER_AUTOFOCUS: + self.liveController_focus_camera.stop_live() + self.camera_focus.close() + self.imageDisplayWindow_focus.close() self.liveController.stop_live() + self.camera.stop_streaming() self.camera.close() + + if HOMING_ENABLED_X and HOMING_ENABLED_Y: + self.navigationController.move_x(0.1) + self.waitForMicrocontroller() + self.navigationController.move_x_to(30) + self.waitForMicrocontroller() + self.navigationController.move_y(0.1) + self.waitForMicrocontroller() + self.navigationController.move_y_to(30) + self.waitForMicrocontroller() + + self.navigationController.turnoff_axis_pid_control() + + if ENABLE_CELLX: + for channel in [1,2,3,4]: + self.cellx.turn_off(channel) + self.cellx.close() + self.imageSaver.close() self.imageDisplay.close() if not SINGLE_WINDOW: self.imageDisplayWindow.close() self.imageArrayDisplayWindow.close() self.tabbedImageDisplayWindow.close() - if SUPPORT_LASER_AUTOFOCUS: - self.camera_focus.close() - self.imageDisplayWindow_focus.close() - self.microcontroller.close() + self.microcontroller.close() try: self.cswWindow.closeForReal(event) except AttributeError: diff --git a/software/control/gui_malaria.py b/software/control/gui_malaria.py index 4ebd15729..7fabc0df4 100644 --- a/software/control/gui_malaria.py +++ b/software/control/gui_malaria.py @@ -1,12 +1,10 @@ # set QT_API environment variable -import os +import os os.environ["QT_API"] = "pyqt5" -import qtpy # qt libraries from qtpy.QtCore import * from qtpy.QtWidgets import * -from qtpy.QtGui import * # app specific libraries import control.widgets as widgets @@ -20,7 +18,7 @@ SINGLE_WINDOW = True # set to False if use separate windows for display and control -class OctopiGUI(QMainWindow): +class MalariaGUI(QMainWindow): # variables fps_software_trigger = 100 @@ -28,25 +26,12 @@ class OctopiGUI(QMainWindow): def __init__(self, is_simulation = False, *args, **kwargs): super().__init__(*args, **kwargs) - # load window - if ENABLE_TRACKING: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) - self.imageDisplayWindow.show_ROI_selector() - else: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) - self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() - # self.imageDisplayWindow.show() - # self.imageArrayDisplayWindow.show() - - # image display windows - self.imageDisplayTabs = QTabWidget() - self.imageDisplayTabs.addTab(self.imageDisplayWindow.widget, "Live View") - self.imageDisplayTabs.addTab(self.imageArrayDisplayWindow.widget, "Multichannel Acquisition") + self.log = squid.logging.get_logger(self.__class__.__name__) # load objects if is_simulation: self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.microcontroller = microcontroller.Microcontroller_Simulation() + self.microcontroller = microcontroller.Microcontroller(existing_serial=microcontroller.SimSerial()) else: try: self.camera = camera.Camera(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) @@ -54,12 +39,12 @@ def __init__(self, is_simulation = False, *args, **kwargs): except: self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) self.camera.open() - print('! camera not detected, using simulated camera !') + self.log.error("camera not detected, using simulated camera") try: self.microcontroller = microcontroller.Microcontroller(version=CONTROLLER_VERSION) except: - print('! Microcontroller not detected, using simulated microcontroller !') - self.microcontroller = microcontroller.Microcontroller_Simulation() + self.log.error("Microcontroller not detected, using simulated microcontroller") + self.microcontroller = microcontroller.Microcontroller(existing_serial=microcontroller.SimSerial()) # reset the MCU self.microcontroller.reset() @@ -69,11 +54,12 @@ def __init__(self, is_simulation = False, *args, **kwargs): # configure the actuators self.microcontroller.configure_actuators() - + self.objectiveStore = core.ObjectiveStore(parent=self) + self.scanCoordinates = core.ScanCoordinates() self.configurationManager = core.ConfigurationManager() self.streamHandler = core.StreamHandler(display_resolution_scaling=DEFAULT_DISPLAY_CROP/100) self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller, parent=self) + self.navigationController = core.NavigationController(self.microcontroller, self.objectiveStore, parent=self) self.slidePositionController = core.SlidePositionController(self.navigationController,self.liveController) self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager) @@ -81,7 +67,7 @@ def __init__(self, is_simulation = False, *args, **kwargs): self.trackingController = core.TrackingController(self.camera,self.microcontroller,self.navigationController,self.configurationManager,self.liveController,self.autofocusController,self.imageDisplayWindow) self.imageSaver = core.ImageSaver() self.imageDisplay = core.ImageDisplay() - self.navigationViewer = core.NavigationViewer() + self.navigationViewer = core.NavigationViewer(self.objectiveStore) # retract the objective self.navigationController.home_z() @@ -90,34 +76,65 @@ def __init__(self, is_simulation = False, *args, **kwargs): while self.microcontroller.is_busy(): time.sleep(0.005) if time.time() - t0 > 10: - print('z homing timeout, the program will exit') - exit() - print('objective retracted') + self.log.error('z homing timeout, the program will exit') + sys.exit(1) + self.log.info('objective retracted') + + # set encoder arguments + # set axis pid control enable + # only ENABLE_PID_X and HAS_ENCODER_X are both enable, can be enable to PID + if HAS_ENCODER_X == True: + self.navigationController.set_axis_PID_arguments(0, PID_P_X, PID_I_X, PID_D_X) + self.navigationController.configure_encoder(0, (SCREW_PITCH_X_MM * 1000) / ENCODER_RESOLUTION_UM_X, ENCODER_FLIP_DIR_X) + self.navigationController.set_pid_control_enable(0, ENABLE_PID_X) + if HAS_ENCODER_Y == True: + self.navigationController.set_axis_PID_arguments(1, PID_P_Y, PID_I_Y, PID_D_Y) + self.navigationController.configure_encoder(1, (SCREW_PITCH_Y_MM * 1000) / ENCODER_RESOLUTION_UM_Y, ENCODER_FLIP_DIR_Y) + self.navigationController.set_pid_control_enable(1, ENABLE_PID_Y) + if HAS_ENCODER_Z == True: + self.navigationController.set_axis_PID_arguments(2, PID_P_Z, PID_I_Z, PID_D_Z) + self.navigationController.configure_encoder(2, (SCREW_PITCH_Z_MM * 1000) / ENCODER_RESOLUTION_UM_Z, ENCODER_FLIP_DIR_Z) + self.navigationController.set_pid_control_enable(2, ENABLE_PID_Z) + + time.sleep(0.5) # homing, set zero and set software limit self.navigationController.set_x_limit_pos_mm(100) self.navigationController.set_x_limit_neg_mm(-100) self.navigationController.set_y_limit_pos_mm(100) self.navigationController.set_y_limit_neg_mm(-100) - print('start homing') + self.log.info("start homing") self.slidePositionController.move_to_slide_scanning_position() while self.slidePositionController.slide_scanning_position_reached == False: time.sleep(0.005) - print('homing finished') + self.log.info("homing finished") self.navigationController.set_x_limit_pos_mm(SOFTWARE_POS_LIMIT.X_POSITIVE) self.navigationController.set_x_limit_neg_mm(SOFTWARE_POS_LIMIT.X_NEGATIVE) self.navigationController.set_y_limit_pos_mm(SOFTWARE_POS_LIMIT.Y_POSITIVE) self.navigationController.set_y_limit_neg_mm(SOFTWARE_POS_LIMIT.Y_NEGATIVE) - # raise the objective - self.navigationController.move_z(DEFAULT_Z_POS_MM) - # wait for the operation to finish - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 5: - print('z return timeout, the program will exit') - exit() + # set piezo arguments + if ENABLE_OBJECTIVE_PIEZO is True: + if OBJECTIVE_PIEZO_CONTROL_VOLTAGE_RANGE == 5: + OUTPUT_GAINS.CHANNEL7_GAIN = True + else: + OUTPUT_GAINS.CHANNEL7_GAIN = False + + # set output's gains + div = 1 if OUTPUT_GAINS.REFDIV is True else 0 + gains = OUTPUT_GAINS.CHANNEL0_GAIN << 0 + gains += OUTPUT_GAINS.CHANNEL1_GAIN << 1 + gains += OUTPUT_GAINS.CHANNEL2_GAIN << 2 + gains += OUTPUT_GAINS.CHANNEL3_GAIN << 3 + gains += OUTPUT_GAINS.CHANNEL4_GAIN << 4 + gains += OUTPUT_GAINS.CHANNEL5_GAIN << 5 + gains += OUTPUT_GAINS.CHANNEL6_GAIN << 6 + gains += OUTPUT_GAINS.CHANNEL7_GAIN << 7 + self.microcontroller.configure_dac80508_refdiv_and_gain(div, gains) + + # set illumination intensity factor + global ILLUMINATION_INTENSITY_FACTOR + self.microcontroller.set_dac80508_scaling_factor_for_illumination(ILLUMINATION_INTENSITY_FACTOR) # set software limit self.navigationController.set_x_limit_pos_mm(SOFTWARE_POS_LIMIT.X_POSITIVE) @@ -134,8 +151,14 @@ def __init__(self, is_simulation = False, *args, **kwargs): self.camera.set_callback(self.streamHandler.on_new_frame) self.camera.enable_callback() + # only toupcam need reset strobe argument when camera's argument change + if CAMERA_TYPE == "Toupcam": + self.camera.set_reset_strobe_delay_function(self.liveController.reset_strobe_arugment) + # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,include_gain_exposure_time=False) + self.objectivesWidget = widgets.ObjectivesWidget(self.objectiveStore) + self.contrastManager = core.ContrastManager() + self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera, include_gain_exposure_time=False) self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager,show_display_options=True) self.navigationWidget = widgets.NavigationWidget(self.navigationController,self.slidePositionController,widget_configuration='malaria') self.dacControlWidget = widgets.DACControWidget(self.microcontroller) @@ -144,17 +167,63 @@ def __init__(self, is_simulation = False, *args, **kwargs): self.focusMapWidget = widgets.FocusMapWidget(self.autofocusController) if ENABLE_TRACKING: self.trackingControlWidget = widgets.TrackingControllerWidget(self.trackingController,self.configurationManager,show_configurations=TRACKING_SHOW_MICROSCOPE_CONFIGURATIONS) + + self.imageDisplayTabs = QTabWidget() + if USE_NAPARI_FOR_LIVE_VIEW: + self.napariLiveWidget = widgets.NapariLiveWidget(self.streamHandler, self.liveController, self.navigationController, self.configurationManager, self.contrastManager) + self.imageDisplayTabs.addTab(self.napariLiveWidget, "Live View") + else: + if ENABLE_TRACKING: + self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) + self.imageDisplayWindow.show_ROI_selector() + else: + self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) + self.imageDisplayTabs.addTab(self.imageDisplayWindow.widget, "Live View") + + if USE_NAPARI_FOR_MULTIPOINT: + self.napariMultiChannelWidget = widgets.NapariMultiChannelWidget(self.objectiveStore, self.contrastManager) + self.imageDisplayTabs.addTab(self.napariMultiChannelWidget, "Multichannel Acquisition") + else: + self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() + self.imageDisplayTabs.addTab(self.imageArrayDisplayWindow.widget, "Multichannel Acquisition") + + if SHOW_TILED_PREVIEW: + if USE_NAPARI_FOR_TILED_DISPLAY: + self.napariTiledDisplayWidget = widgets.NapariTiledDisplayWidget(self.objectiveStore, self.contrastManager) + self.imageDisplayTabs.addTab(self.napariTiledDisplayWidget, "Tiled Preview") + else: + self.imageDisplayWindow_scan_preview = core.ImageDisplayWindow(draw_crosshairs=True) + self.imageDisplayTabs.addTab(self.imageDisplayWindow_scan_preview.widget, "Tiled Preview") + + if USE_NAPARI_FOR_MOSAIC_DISPLAY: + self.napariMosaicDisplayWidget = widgets.NapariMosaicDisplayWidget(self.objectiveStore, self.contrastManager) + self.imageDisplayTabs.addTab(self.napariMosaicDisplayWidget, "Mosaic View") + self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) + self.multiPointWidgetGrid = widgets.MultiPointWidgetGrid(self.navigationController, self.navigationViewer, self.multipointController, self.objectiveStore, self.configurationManager, self.scanCoordinates, self.napariMosaicDisplayWidget) self.recordTabWidget = QTabWidget() + self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") + if ENABLE_SCAN_GRID: + self.recordTabWidget.addTab(self.multiPointWidgetGrid, "Auto-Grid Multipoint") + self.recordTabWidget.addTab(self.focusMapWidget, "Contrast Focus Map") if ENABLE_TRACKING: self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") #self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - self.recordTabWidget.addTab(self.focusMapWidget, "Contrast Focus Map") + self.recordTabWidget.currentChanged.connect(lambda: self.resizeCurrentTab(self.recordTabWidget)) + self.resizeCurrentTab(self.recordTabWidget) + + frame = QFrame() + frame.setFrameStyle(QFrame.Panel | QFrame.Raised) + # Creating the top row layout and adding widgets + top_row_layout = QHBoxLayout() + top_row_layout.addWidget(self.objectivesWidget) + top_row_layout.setContentsMargins(1, 1, 1, 1) + frame.setLayout(top_row_layout) # Set the layout on the frame + # layout widgets - layout = QVBoxLayout() #layout = QStackedLayout() + layout = QVBoxLayout() #layout.addWidget(self.cameraSettingWidget) layout.addWidget(self.liveControlWidget) layout.addWidget(self.navigationWidget) @@ -162,6 +231,7 @@ def __init__(self, is_simulation = False, *args, **kwargs): layout.addWidget(self.dacControlWidget) layout.addWidget(self.autofocusWidget) layout.addWidget(self.recordTabWidget) + layout.addWidget(frame) layout.addWidget(self.navigationViewer) layout.addStretch() @@ -205,10 +275,8 @@ def __init__(self, is_simulation = False, *args, **kwargs): # make connections self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) # self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow self.navigationController.xPos.connect(lambda x:self.navigationWidget.label_Xpos.setText("{:.2f}".format(x))) self.navigationController.yPos.connect(lambda x:self.navigationWidget.label_Ypos.setText("{:.2f}".format(x))) self.navigationController.zPos.connect(lambda x:self.navigationWidget.label_Zpos.setText("{:.2f}".format(x))) @@ -216,10 +284,64 @@ def __init__(self, is_simulation = False, *args, **kwargs): self.navigationController.signal_joystick_button_pressed.connect(self.trackingControlWidget.slot_joystick_button_pressed) else: self.navigationController.signal_joystick_button_pressed.connect(self.autofocusController.autofocus) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) + self.multiPointWidget.signal_acquisition_started.connect(self.navigationWidget.toggle_navigation_controls) + if ENABLE_SCAN_GRID: + self.multiPointWidgetGrid.signal_acquisition_started.connect(self.navigationWidget.toggle_navigation_controls) + + if USE_NAPARI_FOR_LIVE_VIEW: + self.autofocusController.image_to_display.connect(lambda image: self.napariLiveWidget.updateLiveLayer(image, from_autofocus=True)) + self.streamHandler.image_to_display.connect(lambda image: self.napariLiveWidget.updateLiveLayer(image, from_autofocus=False)) + self.multipointController.image_to_display.connect(lambda image: self.napariLiveWidget.updateLiveLayer(image, from_autofocus=False)) + self.napariLiveWidget.signal_coordinates_clicked.connect(self.navigationController.move_from_click) + else: + self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) + self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) + self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) + self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) + self.imageDisplayWindow.image_click_coordinates.connect(self.navigationController.move_from_click) + + if USE_NAPARI_FOR_MULTIPOINT: + self.multiPointWidget.signal_acquisition_channels.connect(self.napariMultiChannelWidget.initChannels) + self.multiPointWidget.signal_acquisition_shape.connect(self.napariMultiChannelWidget.initLayersShape) + if ENABLE_SCAN_GRID: + self.multiPointWidgetGrid.signal_acquisition_channels.connect(self.napariMultiChannelWidget.initChannels) + self.multiPointWidgetGrid.signal_acquisition_shape.connect(self.napariMultiChannelWidget.initLayersShape) + + self.multipointController.napari_layers_init.connect(self.napariMultiChannelWidget.initLayers) + self.multipointController.napari_layers_update.connect(self.napariMultiChannelWidget.updateLayers) + + else: + self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) + + if SHOW_TILED_PREVIEW: + if USE_NAPARI_FOR_TILED_DISPLAY: + self.multiPointWidget.signal_acquisition_channels.connect(self.napariTiledDisplayWidget.initChannels) + self.multiPointWidget.signal_acquisition_shape.connect(self.napariTiledDisplayWidget.initLayersShape) + if ENABLE_SCAN_GRID: + self.multiPointWidgetGrid.signal_acquisition_channels.connect(self.napariTiledDisplayWidget.initChannels) + self.multiPointWidgetGrid.signal_acquisition_shape.connect(self.napariTiledDisplayWidget.initLayersShape) + + self.multipointController.napari_layers_init.connect(self.napariTiledDisplayWidget.initLayers) + self.multipointController.napari_layers_update.connect(self.napariTiledDisplayWidget.updateLayers) + self.napariTiledDisplayWidget.signal_coordinates_clicked.connect(self.navigationController.scan_preview_move_from_click) + + else: + self.multipointController.image_to_display_tiled_preview.connect(self.imageDisplayWindow_scan_preview.display_image) + self.imageDisplayWindow_scan_preview.image_click_coordinates.connect(self.navigationController.scan_preview_move_from_click) + + if USE_NAPARI_FOR_MOSAIC_DISPLAY: + self.multiPointWidget.signal_acquisition_channels.connect(self.napariMosaicDisplayWidget.initChannels) + self.multiPointWidget.signal_acquisition_shape.connect(self.napariMosaicDisplayWidget.initLayersShape) + if ENABLE_SCAN_GRID: + self.multiPointWidgetGrid.signal_acquisition_channels.connect(self.napariMosaicDisplayWidget.initChannels) + self.multiPointWidgetGrid.signal_acquisition_shape.connect(self.napariMosaicDisplayWidget.initLayersShape) + self.multiPointWidgetGrid.signal_draw_shape.connect(self.napariMosaicDisplayWidget.enable_shape_drawing) + self.napariMosaicDisplayWidget.signal_shape_drawn.connect(self.multiPointWidgetGrid.update_manual_shape) + + self.multipointController.napari_mosaic_update.connect(self.napariMosaicDisplayWidget.updateMosaic) + self.napariMosaicDisplayWidget.signal_coordinates_clicked.connect(self.navigationController.move_from_click_mosaic) + self.napariMosaicDisplayWidget.signal_update_viewer.connect(self.navigationViewer.update_slide) self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) @@ -230,20 +352,28 @@ def __init__(self, is_simulation = False, *args, **kwargs): self.slidePositionController.signal_slide_scanning_position_reached.connect(self.navigationWidget.slot_slide_scanning_position_reached) self.slidePositionController.signal_slide_scanning_position_reached.connect(self.multiPointWidget.enable_the_start_aquisition_button) self.slidePositionController.signal_clear_slide.connect(self.navigationViewer.clear_slide) - + self.objectivesWidget.signal_objective_changed.connect(self.navigationViewer.on_objective_changed) self.navigationController.xyPos.connect(self.navigationViewer.update_current_location) self.multipointController.signal_register_current_fov.connect(self.navigationViewer.register_fov) - - self.imageDisplayWindow.image_click_coordinates.connect(self.navigationController.move_from_click) - self.navigationController.move_to_cached_position() + def resizeCurrentTab(self, tabWidget): + current_widget = tabWidget.currentWidget() + if current_widget: + total_height = current_widget.sizeHint().height() + tabWidget.tabBar().height() + tabWidget.resize(tabWidget.width(), total_height) + tabWidget.setMaximumHeight(total_height) + tabWidget.updateGeometry() + self.updateGeometry() + def closeEvent(self, event): self.navigationController.cache_current_position() event.accept() # self.softwareTriggerGenerator.stop() @@@ => self.navigationController.home() + self.navigationController.turnoff_axis_pid_control() + self.liveController.stop_live() self.camera.close() self.imageSaver.close() diff --git a/software/control/gui_motion_only.py b/software/control/gui_motion_only.py deleted file mode 100644 index bfa15a80e..000000000 --- a/software/control/gui_motion_only.py +++ /dev/null @@ -1,48 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - self.microcontroller = microcontroller.Microcontroller() - self.navigationController = core.NavigationController(self.microcontroller) - - # load widgets - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.navigationWidget,2,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # make connections - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - - def closeEvent(self, event): - event.accept() - self.navigationController.home() \ No newline at end of file diff --git a/software/control/gui_platereader.py b/software/control/gui_platereader.py deleted file mode 100644 index dfcf0d291..000000000 --- a/software/control/gui_platereader.py +++ /dev/null @@ -1,103 +0,0 @@ -# set QT_API environment variable -import os -from pathlib import Path -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.core_platereader as core_platereader -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, is_simulation = False, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - if is_simulation: - self.camera = camera.Camera_Simulation() - self.microcontroller = microcontroller.Microcontroller_Simulation() - else: - self.camera = camera.Camera() - self.microcontroller = microcontroller.Microcontroller() - - self.configurationManager = core.ConfigurationManager(filename=str(Path.home()) + "/configurations_platereader.xml") - self.streamHandler = core.StreamHandler() - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller) - self.plateReaderNavigationController = core.PlateReaderNavigationController(self.microcontroller) - self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - self.plateReadingController = core_platereader.PlateReadingController(self.camera,self.plateReaderNavigationController,self.liveController,self.autofocusController,self.configurationManager) - self.imageSaver = core.ImageSaver() - - # open the camera - # camera start streaming - self.camera.open() - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,include_gain_exposure_time=False) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager,show_trigger_options=False,show_display_options=False) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.plateReaderAcquisitionWidget = widgets.PlateReaderAcquisitionWidget(self.plateReadingController,self.configurationManager,show_configurations=False) - self.plateReaderNavigationWidget = widgets.PlateReaderNavigationWidget(self.plateReaderNavigationController) - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - #layout.addWidget(self.cameraSettingWidget,0,0) - layout.addWidget(self.liveControlWidget,1,0) - layout.addWidget(self.plateReaderNavigationWidget,2,0) - layout.addWidget(self.autofocusWidget,3,0) - layout.addWidget(self.plateReaderAcquisitionWidget,4,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow = core.ImageDisplayWindow() - self.imageDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplayWindow.display_image) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - # self.plateReaderNavigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - # self.plateReaderNavigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - # self.plateReaderNavigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - # self.plateReadingController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.plateReadingController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.plateReadingController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - - self.microcontroller.set_callback(self.plateReaderNavigationController.update_pos) - self.plateReaderNavigationController.signal_homing_complete.connect(self.plateReaderNavigationWidget.slot_homing_complete) - self.plateReaderNavigationController.signal_homing_complete.connect(self.plateReaderAcquisitionWidget.slot_homing_complete) - self.plateReaderNavigationController.signal_current_well.connect(self.plateReaderNavigationWidget.update_current_location) - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - # self.plateReaderNavigationController.home() - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplayWindow.close() - self.microcontroller.close() \ No newline at end of file diff --git a/software/control/gui_simulation.py b/software/control/gui_simulation.py deleted file mode 100644 index af05a1c62..000000000 --- a/software/control/gui_simulation.py +++ /dev/null @@ -1,111 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - self.camera = camera.Camera_Simulation() - self.microcontroller = microcontroller.Microcontroller_Simulation() - - self.configurationManager = core.ConfigurationManager() - self.streamHandler = core.StreamHandler() - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller) - self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager) - self.trackingController = core.TrackingController(self.microcontroller,self.navigationController) - self.imageSaver = core.ImageSaver() - self.imageDisplay = core.ImageDisplay() - - # open the camera - # camera start streaming - self.camera.open() - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,self.liveController) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - self.trackingControlWidget = widgets.TrackingControllerWidget(self.streamHandler,self.trackingController) - self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) - - self.recordTabWidget = QTabWidget() - self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - # layout.addWidget(self.cameraSettingWidget,0,0) - layout.addWidget(self.liveControlWidget,1,0) - layout.addWidget(self.navigationWidget,2,0) - layout.addWidget(self.autofocusWidget,3,0) - layout.addWidget(self.recordTabWidget,4,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # widgets_ = QWidget() - # widgets_.setLayout(layout) - # scroll = QScrollArea() - # scroll.setWidget(widgets_) - # self.setCentralWidget(widgets_) - - # load window - self.imageDisplayWindow = core.ImageDisplayWindow() - self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() - self.imageDisplayWindow.show() - self.imageArrayDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - # self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) - self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.navigationController.home() - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - self.imageDisplayWindow.close() - self.imageArrayDisplayWindow.close() diff --git a/software/control/gui_tiscamera.py b/software/control/gui_tiscamera.py deleted file mode 100644 index 5fed69ad8..000000000 --- a/software/control/gui_tiscamera.py +++ /dev/null @@ -1,116 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.widgets_tracking as widgets_tracking -import control.camera_TIS as camera -import control.core as core -import control.core_tracking as core_tracking -import control.microcontroller as microcontroller - -SIMULATION = True - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - if SIMULATION is True: - # self.camera = camera.Camera(sn=48910098) - self.camera = camera.Camera(sn=17910089) - self.microcontroller = microcontroller.Microcontroller_Simulation() - else: - self.camera = camera.Camera(sn=17910089) - self.microcontroller = microcontroller.Microcontroller() - - self.configurationManager = core.ConfigurationManager() - self.streamHandler = core.StreamHandler() - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller) - #self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - #self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController) - self.trackingController = core_tracking.TrackingController(self.microcontroller,self.navigationController) - self.imageSaver = core.ImageSaver() - self.imageDisplay = core.ImageDisplay() - - ''' - # thread - self.thread_multiPoint = QThread() - self.thread_multiPoint.start() - self.multipointController.moveToThread(self.thread_multiPoint) - ''' - - # open the camera - # camera start streaming - self.camera.open() - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,self.liveController) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - #self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - self.trackingControlWidget = widgets_tracking.TrackingControllerWidget(self.streamHandler,self.trackingController) - #self.multiPointWidget = widgets.MultiPointWidget(self.multipointController) - - self.recordTabWidget = QTabWidget() - self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - #self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget,0,0) - layout.addWidget(self.liveControlWidget,1,0) - layout.addWidget(self.navigationWidget,2,0) - #layout.addWidget(self.autofocusWidget,3,0) - layout.addWidget(self.recordTabWidget,4,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow = core.ImageDisplayWindow('Main Display') - self.imageDisplayWindow.show() - - self.imageDisplayWindow_ThresholdedImage = core.ImageDisplayWindow('Thresholded Image') - self.imageDisplayWindow_ThresholdedImage.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - #self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - #self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - self.imageDisplayWindow.close() - self.imageDisplayWindow_ThresholdedImage.close() diff --git a/software/control/gui_tiscamera_DZK250.py b/software/control/gui_tiscamera_DZK250.py deleted file mode 100644 index d3c46dfec..000000000 --- a/software/control/gui_tiscamera_DZK250.py +++ /dev/null @@ -1,105 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera_TIS as camera -import control.core as core -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - self.camera = camera.Camera(sn=33910474,width=4000,height=3000,framerate=30,color=False) - self.microcontroller = microcontroller.Microcontroller() - - self.configurationManager = core.ConfigurationManager() - self.streamHandler = core.StreamHandler() - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller) - self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager) - self.trackingController = core.TrackingController(self.microcontroller,self.navigationController) - self.imageSaver = core.ImageSaver() - self.imageDisplay = core.ImageDisplay() - - # open the camera - # camera start streaming - self.camera.open() - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,self.liveController) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - self.trackingControlWidget = widgets.TrackingControllerWidget(self.streamHandler,self.trackingController) - self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) - - self.recordTabWidget = QTabWidget() - self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget,0,0) - layout.addWidget(self.liveControlWidget,1,0) - layout.addWidget(self.navigationWidget,2,0) - layout.addWidget(self.autofocusWidget,3,0) - layout.addWidget(self.recordTabWidget,4,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow = core.ImageDisplayWindow() - self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() - self.imageDisplayWindow.show() - self.imageArrayDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - # self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.navigationController.home() - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - self.imageDisplayWindow.close() - self.imageArrayDisplayWindow.close() \ No newline at end of file diff --git a/software/control/gui_tiscamera_simulation.py b/software/control/gui_tiscamera_simulation.py deleted file mode 100644 index bab3765e1..000000000 --- a/software/control/gui_tiscamera_simulation.py +++ /dev/null @@ -1,117 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.widgets_tracking as widgets_tracking -import control.camera_TIS as camera -import control.core as core -import control.core_tracking as core_tracking -import control.microcontroller as microcontroller - -SIMULATION = True - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - if SIMULATION is True: - self.camera = camera.Camera_Simulation() - self.microcontroller = microcontroller.Microcontroller_Simulation() - else: - self.camera = camera.Camera(sn=17910085) - self.microcontroller = microcontroller.Microcontroller() - - self.streamHandler = core.StreamHandler() - self.liveController = core.LiveController(self.camera,self.microcontroller) - self.navigationController = core.NavigationController(self.microcontroller) - #self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - #self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController) - self.trackingController = core_tracking.TrackingController(self.microcontroller,self.navigationController) - self.imageSaver = core.ImageSaver() - self.imageDisplay = core.ImageDisplay() - - ''' - # thread - self.thread_multiPoint = QThread() - self.thread_multiPoint.start() - self.multipointController.moveToThread(self.thread_multiPoint) - ''' - - # open the camera - # camera start streaming - self.camera.open() - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,self.liveController) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - #self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - self.trackingControlWidget = widgets_tracking.TrackingControllerWidget(self.streamHandler,self.trackingController) - #self.multiPointWidget = widgets.MultiPointWidget(self.multipointController) - - self.recordTabWidget = QTabWidget() - self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - #self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget,0,0) - layout.addWidget(self.liveControlWidget,1,0) - layout.addWidget(self.navigationWidget,2,0) - #layout.addWidget(self.autofocusWidget,3,0) - layout.addWidget(self.recordTabWidget,4,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow = core.ImageDisplayWindow('Main Display') - self.imageDisplayWindow.show() - - self.imageDisplayWindow_ThresholdedImage = core.ImageDisplayWindow('Thresholded Image') - # self.imageDisplayWindow_ThresholdedImage.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - # self.streamHandler.image_to_display.connect(self.imageDisplay.emit_directly) # test emitting image to display without queueing and threading - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - #self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - #self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - - self.camera.start_streaming() - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - self.imageDisplayWindow.close() - self.imageDisplayWindow_ThresholdedImage.close() diff --git a/software/control/gui_toupcam_IMX571.py b/software/control/gui_toupcam_IMX571.py deleted file mode 100644 index d17521327..000000000 --- a/software/control/gui_toupcam_IMX571.py +++ /dev/null @@ -1,252 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera_toupcam as camera -import control.core as core -import control.microcontroller as microcontroller -from control._def import * - -import pyqtgraph.dockarea as dock -SINGLE_WINDOW = True # set to False if use separate windows for display and control - -import time - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, is_simulation = False, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load window - if ENABLE_TRACKING: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True,show_LUT=True,autoLevels=True) - self.imageDisplayWindow.show_ROI_selector() - else: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True,show_LUT=True,autoLevels=True) - self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() - # self.imageDisplayWindow.show() - # self.imageArrayDisplayWindow.show() - - # image display windows - self.imageDisplayTabs = QTabWidget() - self.imageDisplayTabs.addTab(self.imageDisplayWindow.widget, "Live View") - self.imageDisplayTabs.addTab(self.imageArrayDisplayWindow.widget, "Multichannel Acquisition") - - # load objects - if is_simulation: - self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.microcontroller = microcontroller.Microcontroller_Simulation() - else: - self.camera = camera.Camera(resolution=(6224,4168),rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - #self.camera = camera.Camera(resolution=(3104,2084)) - #self.camera = camera.Camera(resolution=(2064,1386)) - # 6224 x 4168 - # 3104 x 2084 - # 2064 x 1386 - try: - self.microcontroller = microcontroller.Microcontroller(version=CONTROLLER_VERSION) - except: - print('! Microcontroller not detected, using simulated microcontroller !') - self.microcontroller = microcontroller.Microcontroller_Simulation() - - # reset the MCU - self.microcontroller.reset() - - ''' - # reinitialize motor drivers and DAC (in particular for V2.1 driver board where PG is not functional) - self.microcontroller.initialize_drivers() - ''' - - # configure the actuators - self.microcontroller.configure_actuators() - - self.configurationManager = core.ConfigurationManager('./channel_configurations.xml') - self.streamHandler = core.StreamHandler(display_resolution_scaling=DEFAULT_DISPLAY_CROP/100) - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller) - self.slidePositionController = core.SlidePositionController(self.navigationController,self.liveController) - self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager) - if ENABLE_TRACKING: - self.trackingController = core.TrackingController(self.camera,self.microcontroller,self.navigationController,self.configurationManager,self.liveController,self.autofocusController,self.imageDisplayWindow) - self.imageSaver = core.ImageSaver(image_format=Acquisition.IMAGE_FORMAT) - self.imageDisplay = core.ImageDisplay() - self.navigationViewer = core.NavigationViewer() - - # homing - ''' - self.navigationController.home_y() - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('y homing timeout, the program will exit') - exit() - - self.navigationController.home_x() - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('x homing timeout, the program will exit') - exit() - ''' - ''' - self.slidePositionController.move_to_slide_scanning_position() - while self.slidePositionController.slide_scanning_position_reached == False: - time.sleep(0.005) - print('homing finished') - - # retract the objective - self.navigationController.home_z() - # wait for the operation to finish - t0 = time.time() - while self.microcontroller.is_busy(): - time.sleep(0.005) - if time.time() - t0 > 10: - print('z homing timeout, the program will exit') - exit() - print('objective retracted') - self.navigationController.move_z(DEFAULT_Z_POS_MM) - - self.navigationController.set_x_limit_pos_mm(SOFTWARE_POS_LIMIT.X_POSITIVE) - self.navigationController.set_x_limit_neg_mm(SOFTWARE_POS_LIMIT.X_NEGATIVE) - self.navigationController.set_y_limit_pos_mm(SOFTWARE_POS_LIMIT.Y_POSITIVE) - self.navigationController.set_y_limit_neg_mm(SOFTWARE_POS_LIMIT.Y_NEGATIVE) - self.navigationController.set_z_limit_pos_mm(SOFTWARE_POS_LIMIT.Z_POSITIVE) - ''' - - # open the camera - # camera start streaming - self.camera.open() - self.camera.set_gain_mode('HCG') - # self.camera.camera.put_Roi(3112,2084,2048,2048) - # self.camera.set_reverse_x(CAMERA_REVERSE_X) # these are not implemented for the cameras in use - # self.camera.set_reverse_y(CAMERA_REVERSE_Y) # these are not implemented for the cameras in use - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - if ENABLE_STROBE_OUTPUT: - self.camera.set_line3_to_exposure_active() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,include_gain_exposure_time=False,include_camera_temperature_setting=True) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager,show_trigger_options=True,show_display_options=True,show_autolevel=True,autolevel=True) - self.navigationWidget = widgets.NavigationWidget(self.navigationController,self.slidePositionController,widget_configuration='malaria') - self.dacControlWidget = widgets.DACControWidget(self.microcontroller) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - if ENABLE_TRACKING: - self.trackingControlWidget = widgets.TrackingControllerWidget(self.trackingController,self.configurationManager,show_configurations=TRACKING_SHOW_MICROSCOPE_CONFIGURATIONS) - self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) - - self.recordTabWidget = QTabWidget() - if ENABLE_TRACKING: - self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - - # layout widgets - layout = QVBoxLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget) - layout.addWidget(self.liveControlWidget) - layout.addWidget(self.navigationWidget) - if SHOW_DAC_CONTROL: - layout.addWidget(self.dacControlWidget) - layout.addWidget(self.autofocusWidget) - layout.addWidget(self.recordTabWidget) - layout.addWidget(self.navigationViewer) - layout.addStretch() - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - # self.centralWidget.setFixedSize(self.centralWidget.minimumSize()) - # self.centralWidget.setFixedWidth(self.centralWidget.minimumWidth()) - # self.centralWidget.setMaximumWidth(self.centralWidget.minimumWidth()) - self.centralWidget.setFixedWidth(self.centralWidget.minimumSizeHint().width()) - - if SINGLE_WINDOW: - dock_display = dock.Dock('Image Display', autoOrientation = False) - dock_display.showTitleBar() - dock_display.addWidget(self.imageDisplayTabs) - dock_display.setStretch(x=100,y=None) - dock_controlPanel = dock.Dock('Controls', autoOrientation = False) - # dock_controlPanel.showTitleBar() - dock_controlPanel.addWidget(self.centralWidget) - dock_controlPanel.setStretch(x=1,y=None) - dock_controlPanel.setFixedWidth(dock_controlPanel.minimumSizeHint().width()) - main_dockArea = dock.DockArea() - main_dockArea.addDock(dock_display) - main_dockArea.addDock(dock_controlPanel,'right') - self.setCentralWidget(main_dockArea) - desktopWidget = QDesktopWidget() - height_min = int(0.9*desktopWidget.height()) - width_min =int(0.96*desktopWidget.width()) - self.setMinimumSize(width_min,height_min) - else: - self.setCentralWidget(self.centralWidget) - self.tabbedImageDisplayWindow = QMainWindow() - self.tabbedImageDisplayWindow.setCentralWidget(self.imageDisplayTabs) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() | Qt.CustomizeWindowHint) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() & ~Qt.WindowCloseButtonHint) - desktopWidget = QDesktopWidget() - width = 0.96*desktopWidget.height() - height = width - self.tabbedImageDisplayWindow.setFixedSize(width,height) - self.tabbedImageDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - # self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - if ENABLE_TRACKING: - self.navigationController.signal_joystick_button_pressed.connect(self.trackingControlWidget.slot_joystick_button_pressed) - else: - self.navigationController.signal_joystick_button_pressed.connect(self.autofocusController.autofocus) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - self.liveControlWidget.signal_autoLevelSetting.connect(self.imageDisplayWindow.set_autolevel) - - self.slidePositionController.signal_slide_loading_position_reached.connect(self.navigationWidget.slot_slide_loading_position_reached) - self.slidePositionController.signal_slide_loading_position_reached.connect(self.multiPointWidget.disable_the_start_aquisition_button) - self.slidePositionController.signal_slide_scanning_position_reached.connect(self.navigationWidget.slot_slide_scanning_position_reached) - self.slidePositionController.signal_slide_scanning_position_reached.connect(self.multiPointWidget.enable_the_start_aquisition_button) - self.slidePositionController.signal_clear_slide.connect(self.navigationViewer.clear_slide) - self.navigationController.xyPos.connect(self.navigationViewer.update_current_location) - self.multipointController.signal_register_current_fov.connect(self.navigationViewer.register_fov) - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.navigationController.home() - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - if not SINGLE_WINDOW: - self.imageDisplayWindow.close() - self.imageArrayDisplayWindow.close() - self.tabbedImageDisplayWindow.close() - self.microcontroller.close() diff --git a/software/control/gui_usbspectrometer.py b/software/control/gui_usbspectrometer.py deleted file mode 100644 index 5826e0515..000000000 --- a/software/control/gui_usbspectrometer.py +++ /dev/null @@ -1,210 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.microcontroller as microcontroller -from control._def import * - -import control.widgets_usbspectrometer as widgets_usbspectrometer -import control.core_usbspectrometer as core_usbspectrometer -import control.spectrometer_oceanoptics as spectrometer - -import pyqtgraph.dockarea as dock -SINGLE_WINDOW = True # set to False if use separate windows for display and control - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, is_simulation = False, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load window - if ENABLE_TRACKING: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) - self.imageDisplayWindow.show_ROI_selector() - else: - self.imageDisplayWindow = core.ImageDisplayWindow(draw_crosshairs=True) - self.imageArrayDisplayWindow = core.ImageArrayDisplayWindow() - # self.imageDisplayWindow.show() - # self.imageArrayDisplayWindow.show() - - # image display windows - self.imageDisplayTabs = QTabWidget() - self.imageDisplayTabs.addTab(self.imageDisplayWindow.widget, "Live View") - # self.imageDisplayTabs.addTab(self.imageArrayDisplayWindow.widget, "Multichannel Acquisition") - - # load objects - if is_simulation: - self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - self.microcontroller = microcontroller.Microcontroller_Simulation() - self.spectrometer = spectrometer.Spectrometer_Simulation() - else: - self.camera = camera.Camera(rotate_image_angle=ROTATE_IMAGE_ANGLE,flip_image=FLIP_IMAGE) - try: - self.microcontroller = microcontroller.Microcontroller() - except: - print('! Microcontroller not detected, using simulated microcontroller !') - self.microcontroller = microcontroller.Microcontroller_Simulation() - try: - self.spectrometer = spectrometer.Spectrometer() - except: - print('! Spectrometer not detected, using simulated microcontroller !') - self.spectrometer = spectrometer.Spectrometer_Simulation() - - # configure the actuators - self.microcontroller.configure_actuators() - - self.configurationManager = core.ConfigurationManager('./channel_configurations.xml') - self.streamHandler = core.StreamHandler(display_resolution_scaling=DEFAULT_DISPLAY_CROP/100) - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller) - self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager,self.spectrometer) - if ENABLE_TRACKING: - self.trackingController = core.TrackingController(self.camera,self.microcontroller,self.navigationController,self.configurationManager,self.liveController,self.autofocusController,self.imageDisplayWindow) - self.imageSaver = core.ImageSaver(image_format=Acquisition.IMAGE_FORMAT) - self.imageDisplay = core.ImageDisplay() - self.spectrometerStreamHandler = core_usbspectrometer.SpectrumStreamHandler() - self.spectrumSaver = core_usbspectrometer.SpectrumSaver() - - # open the camera - # camera start streaming - self.camera.open() - # self.camera.set_reverse_x(CAMERA_REVERSE_X) # these are not implemented for the cameras in use - # self.camera.set_reverse_y(CAMERA_REVERSE_Y) # these are not implemented for the cameras in use - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - if ENABLE_STROBE_OUTPUT: - self.camera.set_line3_to_exposure_active() - - self.spectrometer.set_callback(self.spectrometerStreamHandler.on_new_measurement) - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,include_gain_exposure_time=False) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - self.dacControlWidget = widgets.DACControWidget(self.microcontroller) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - self.spectrumRecordingWidget = widgets_usbspectrometer.RecordingWidget(self.spectrometerStreamHandler,self.spectrumSaver) - if ENABLE_TRACKING: - self.trackingControlWidget = widgets.TrackingControllerWidget(self.trackingController,self.configurationManager,show_configurations=TRACKING_SHOW_MICROSCOPE_CONFIGURATIONS) - self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) - - self.recordTabWidget = QTabWidget() - if ENABLE_TRACKING: - self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - self.recordTabWidget.addTab(self.recordingControlWidget, "Recording - Camera") - self.recordTabWidget.addTab(self.spectrumRecordingWidget, "Recording - Spectrometer") - self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - self.spectrometerControlWidget = widgets_usbspectrometer.SpectrometerControlWidget(self.spectrometer,self.spectrometerStreamHandler) - self.spectrumDisplay = widgets_usbspectrometer.SpectrumDisplay() - - # layout widgets - layout = QVBoxLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget) - layout.addWidget(self.liveControlWidget) - layout.addWidget(self.navigationWidget) - if SHOW_DAC_CONTROL: - layout.addWidget(self.dacControlWidget) - layout.addWidget(self.autofocusWidget) - layout.addWidget(self.spectrometerControlWidget) - layout.addWidget(self.recordTabWidget) - layout.addStretch() - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - # self.centralWidget.setFixedSize(self.centralWidget.minimumSize()) - # self.centralWidget.setFixedWidth(self.centralWidget.minimumWidth()) - # self.centralWidget.setMaximumWidth(self.centralWidget.minimumWidth()) - self.centralWidget.setFixedWidth(self.centralWidget.minimumSizeHint().width()) - - if SINGLE_WINDOW: - dock_display = dock.Dock('Image Display', autoOrientation = False) - dock_display.showTitleBar() - dock_display.addWidget(self.imageDisplayTabs) - dock_display.setStretch(x=100,y=60) - dock_spectrum = dock.Dock('Spectrum', autoOrientation = False) - dock_spectrum.showTitleBar() - dock_spectrum.addWidget(self.spectrumDisplay) - dock_spectrum.setStretch(x=100,y=40) - dock_controlPanel = dock.Dock('Controls', autoOrientation = False) - dock_controlPanel = dock.Dock('Controls', autoOrientation = False) - # dock_controlPanel.showTitleBar() - dock_controlPanel.addWidget(self.centralWidget) - dock_controlPanel.setStretch(x=1,y=None) - dock_controlPanel.setFixedWidth(dock_controlPanel.minimumSizeHint().width()) - main_dockArea = dock.DockArea() - main_dockArea.addDock(dock_display) - main_dockArea.addDock(dock_spectrum,'bottom') - main_dockArea.addDock(dock_controlPanel,'right') - self.setCentralWidget(main_dockArea) - desktopWidget = QDesktopWidget() - height_min = 0.9*desktopWidget.height() - width_min = 0.96*desktopWidget.width() - self.setMinimumSize(width_min,height_min) - else: - self.setCentralWidget(self.centralWidget) - self.tabbedImageDisplayWindow = QMainWindow() - self.tabbedImageDisplayWindow.setCentralWidget(self.imageDisplayTabs) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() | Qt.CustomizeWindowHint) - self.tabbedImageDisplayWindow.setWindowFlags(self.windowFlags() & ~Qt.WindowCloseButtonHint) - desktopWidget = QDesktopWidget() - width = 0.96*desktopWidget.height() - height = width - self.tabbedImageDisplayWindow.setFixedSize(width,height) - self.tabbedImageDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - # self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - if ENABLE_TRACKING: - self.navigationController.signal_joystick_button_pressed.connect(self.trackingControlWidget.slot_joystick_button_pressed) - else: - self.navigationController.signal_joystick_button_pressed.connect(self.autofocusController.autofocus) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) - self.multipointController.spectrum_to_display.connect(self.spectrumDisplay.plot) - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - self.spectrometerStreamHandler.spectrum_to_display.connect(self.spectrumDisplay.plot) - self.spectrometerStreamHandler.spectrum_to_write.connect(self.spectrumSaver.enqueue) - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.navigationController.home() - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - self.spectrometer.close() - self.spectrumSaver.close() - if not SINGLE_WINDOW: - self.imageDisplayWindow.close() - self.imageArrayDisplayWindow.close() - self.tabbedImageDisplayWindow.close() - self.microcontroller.close() diff --git a/software/control/gui_volumetric_imaging.py b/software/control/gui_volumetric_imaging.py deleted file mode 100644 index 518c1c71a..000000000 --- a/software/control/gui_volumetric_imaging.py +++ /dev/null @@ -1,107 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.widgets as widgets -import control.camera as camera -import control.core as core -import control.core_volumetric_imaging as core_volumetric_imaging -import control.microcontroller as microcontroller - -class OctopiGUI(QMainWindow): - - # variables - fps_software_trigger = 100 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # load objects - self.camera = camera.Camera() - self.microcontroller = microcontroller.Microcontroller_Simulation() - - self.configurationManager = core.ConfigurationManager() - self.streamHandler = core_volumetric_imaging.StreamHandler(crop_width=500,crop_height=500) - self.liveController = core.LiveController(self.camera,self.microcontroller,self.configurationManager) - self.navigationController = core.NavigationController(self.microcontroller) - self.autofocusController = core.AutoFocusController(self.camera,self.navigationController,self.liveController) - self.multipointController = core.MultiPointController(self.camera,self.navigationController,self.liveController,self.autofocusController,self.configurationManager) - self.trackingController = core.TrackingController(self.microcontroller,self.navigationController) - self.imageSaver = core.ImageSaver() - self.imageDisplay = core.ImageDisplay() - - # open the camera - # camera start streaming - self.camera.open() - self.camera.set_software_triggered_acquisition() #self.camera.set_continuous_acquisition() - self.camera.set_callback(self.streamHandler.on_new_frame) - self.camera.enable_callback() - - # load widgets - self.cameraSettingWidget = widgets.CameraSettingsWidget(self.camera,self.liveController) - self.liveControlWidget = widgets.LiveControlWidget(self.streamHandler,self.liveController,self.configurationManager) - self.navigationWidget = widgets.NavigationWidget(self.navigationController) - self.autofocusWidget = widgets.AutoFocusWidget(self.autofocusController) - self.recordingControlWidget = widgets.RecordingWidget(self.streamHandler,self.imageSaver) - self.trackingControlWidget = widgets.TrackingControllerWidget(self.streamHandler,self.trackingController) - self.multiPointWidget = widgets.MultiPointWidget(self.multipointController,self.configurationManager) - - self.recordTabWidget = QTabWidget() - self.recordTabWidget.addTab(self.recordingControlWidget, "Simple Recording") - # self.recordTabWidget.addTab(self.trackingControlWidget, "Tracking") - # self.recordTabWidget.addTab(self.multiPointWidget, "Multipoint Acquisition") - - # layout widgets - layout = QGridLayout() #layout = QStackedLayout() - layout.addWidget(self.cameraSettingWidget,0,0) - layout.addWidget(self.liveControlWidget,1,0) - # layout.addWidget(self.navigationWidget,2,0) - # layout.addWidget(self.autofocusWidget,3,0) - layout.addWidget(self.recordTabWidget,4,0) - - # transfer the layout to the central widget - self.centralWidget = QWidget() - self.centralWidget.setLayout(layout) - self.setCentralWidget(self.centralWidget) - - # load window - self.imageDisplayWindow = core.ImageDisplayWindow() - self.imageArrayDisplayWindow = core_volumetric_imaging.ImageArrayDisplayWindow() - self.imageDisplayWindow.show() - self.imageArrayDisplayWindow.show() - - # make connections - self.streamHandler.signal_new_frame_received.connect(self.liveController.on_new_frame) - self.streamHandler.image_to_display.connect(self.imageDisplay.enqueue) - self.streamHandler.packet_image_to_write.connect(self.imageSaver.enqueue) - self.streamHandler.packet_image_for_tracking.connect(self.trackingController.on_new_frame) - self.streamHandler.packet_image_for_array_display.connect(self.imageArrayDisplayWindow.display_image) - self.imageDisplay.image_to_display.connect(self.imageDisplayWindow.display_image) # may connect streamHandler directly to imageDisplayWindow - self.navigationController.xPos.connect(self.navigationWidget.label_Xpos.setNum) - self.navigationController.yPos.connect(self.navigationWidget.label_Ypos.setNum) - self.navigationController.zPos.connect(self.navigationWidget.label_Zpos.setNum) - self.autofocusController.image_to_display.connect(self.imageDisplayWindow.display_image) - # self.multipointController.image_to_display.connect(self.imageDisplayWindow.display_image) - self.multipointController.signal_current_configuration.connect(self.liveControlWidget.set_microscope_mode) - self.multipointController.image_to_display_multi.connect(self.imageArrayDisplayWindow.display_image) - self.liveControlWidget.signal_newExposureTime.connect(self.cameraSettingWidget.set_exposure_time) - self.liveControlWidget.signal_newAnalogGain.connect(self.cameraSettingWidget.set_analog_gain) - self.liveControlWidget.update_camera_settings() - - def closeEvent(self, event): - event.accept() - # self.softwareTriggerGenerator.stop() @@@ => - self.navigationController.home() - self.liveController.stop_live() - self.camera.close() - self.imageSaver.close() - self.imageDisplay.close() - self.imageDisplayWindow.close() - self.imageArrayDisplayWindow.close() \ No newline at end of file diff --git a/software/control/gxipy/dxwrapper.py b/software/control/gxipy/dxwrapper.py index 1623f71c7..caf6c7938 100644 --- a/software/control/gxipy/dxwrapper.py +++ b/software/control/gxipy/dxwrapper.py @@ -13,7 +13,10 @@ print('Cannot find libgxiapi.so.') else: try: - dll = WinDLL('DxImageProc.dll') + if (sys.version_info.major == 3 and sys.version_info.minor >= 8) or (sys.version_info.major > 3): + dll = WinDLL('DxImageProc.dll', winmode=0) + else: + dll = WinDLL('DxImageProc.dll') except OSError: print('Cannot find DxImageProc.dll.') diff --git a/software/control/gxipy/gxwrapper.py b/software/control/gxipy/gxwrapper.py index 30a1518bd..3ae1d08f3 100644 --- a/software/control/gxipy/gxwrapper.py +++ b/software/control/gxipy/gxwrapper.py @@ -13,7 +13,10 @@ print("Cannot find libgxiapi.so.") else: try: - dll = WinDLL('GxIAPI.dll') + if (sys.version_info.major == 3 and sys.version_info.minor >= 8) or (sys.version_info.major > 3): + dll = WinDLL('GxIAPI.dll', winmode=0) + else: + dll = WinDLL('GxIAPI.dll') except OSError: print('Cannot find GxIAPI.dll.') diff --git a/software/control/microcontroller.py b/software/control/microcontroller.py index d26ccdee7..ee37315bc 100644 --- a/software/control/microcontroller.py +++ b/software/control/microcontroller.py @@ -5,12 +5,13 @@ import numpy as np import threading from crc import CrcCalculator, Crc8 +import struct +from qtpy.QtCore import QTimer + +import squid.logging from control._def import * -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * # add user to the dialout group to avoid the need to use sudo @@ -19,11 +20,101 @@ # check if the microcontroller has finished executing the more recent command # to do (7/28/2021) - add functions for configuring the stepper motors +class CommandAborted(RuntimeError): + """ + If we send a command and it needs to abort for any reason (too many retries, + timeout waiting for the mcu to acknowledge, etc), the Microcontroller class will throw this + for wait and progress check operations until a new command is started. + + This does mean that if you don't check for command completion, you may miss these errors! + """ + def __init__(self, command_id, reason): + super().__init__(reason) + self.command_id = command_id + + +class SimSerial: + @staticmethod + def response_bytes_for(command_id, execution_status, x, y, z, theta, joystick_button, switch): + """ + - command ID (1 byte) + - execution status (1 byte) + - X pos (4 bytes) + - Y pos (4 bytes) + - Z pos (4 bytes) + - Theta (4 bytes) + - buttons and switches (1 byte) + - reserved (4 bytes) + - CRC (1 byte) + """ + crc_calculator = CrcCalculator(Crc8.CCITT,table_based=True) + + button_state = joystick_button << BIT_POS_JOYSTICK_BUTTON | switch << BIT_POS_SWITCH + reserved_state = 0 # This is just filler for the 4 reserved bytes. + response = bytearray(struct.pack(">BBiiiiBi", command_id, execution_status, x, y, z, theta, button_state, reserved_state)) + response.append(crc_calculator.calculate_checksum(response)) + return response + + def __init__(self): + self.in_waiting = 0 + self.response_buffer = [] + + self.x = 0 + self.y = 0 + self.z = 0 + self.theta = 0 + self.joystick_button = False + self.switch = False + + self.closed = False + + def respond_to(self, write_bytes): + # Just immediately respond that the command was successful. We can + # implement specific command handlers here in the future for eg: + # correct position tracking and such. + self.response_buffer.extend(SimSerial.response_bytes_for( + write_bytes[0], + CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS, + self.x, + self.y, + self.z, + self.theta, + self.joystick_button, + self.switch)) + + self.in_waiting = len(self.response_buffer) + + def close(self): + self.closed = True + + def write(self, data): + if self.closed: + raise IOError("Closed") + self.respond_to(data) + + def read(self, count=1): + if self.closed: + raise IOError("Closed") + + response = bytearray() + for i in range(count): + if not len(self.response_buffer): + break + response.append(self.response_buffer.pop(0)) + + self.in_waiting = len(self.response_buffer) + return response + + +class Microcontroller: + LAST_COMMAND_ACK_TIMEOUT = 0.5 + MAX_RETRY_COUNT = 5 + + def __init__(self, version='Arduino Due', sn=None, existing_serial=None, is_simulation=False): + self.is_simulation = is_simulation + + self.log = squid.logging.get_logger(self.__class__.__name__) -class Microcontroller(): - def __init__(self,version='Arduino Due',sn=None,parent=None): - self.serial = None - self.platform_name = platform.system() self.tx_buffer_length = MicrocontrollerDef.CMD_LENGTH self.rx_buffer_length = MicrocontrollerDef.MSG_LENGTH @@ -42,30 +133,35 @@ def __init__(self,version='Arduino Due',sn=None,parent=None): self.switch_state = 0 self.last_command = None - self.timeout_counter = 0 - self.last_command_timestamp = time.time() + self.last_command_send_timestamp = time.time() + self.last_command_aborted_error = None self.crc_calculator = CrcCalculator(Crc8.CCITT,table_based=True) self.retry = 0 - print('connecting to controller based on ' + version) + self.log.debug("connecting to controller based on " + version) - if version =='Arduino Due': - controller_ports = [p.device for p in serial.tools.list_ports.comports() if 'Arduino Due' == p.description] # autodetect - based on Deepak's code + if existing_serial: + self.serial = existing_serial else: - if sn is not None: - controller_ports = [ p.device for p in serial.tools.list_ports.comports() if sn == p.serial_number] + if version =='Arduino Due': + controller_ports = [p.device for p in serial.tools.list_ports.comports() if 'Arduino Due' == p.description] # autodetect - based on Deepak's code else: - controller_ports = [ p.device for p in serial.tools.list_ports.comports() if p.manufacturer == 'Teensyduino'] - - if not controller_ports: - raise IOError("no controller found") - if len(controller_ports) > 1: - print('multiple controller found - using the first') - - self.serial = serial.Serial(controller_ports[0],2000000) - time.sleep(0.2) - print('controller connected') + if sn is not None: + controller_ports = [ p.device for p in serial.tools.list_ports.comports() if sn == p.serial_number] + else: + if sys.platform == 'win32': + controller_ports = [ p.device for p in serial.tools.list_ports.comports() if p.manufacturer == 'Microsoft'] + else: + controller_ports = [ p.device for p in serial.tools.list_ports.comports() if p.manufacturer == 'Teensyduino'] + + if not controller_ports: + raise IOError("no controller found") + if len(controller_ports) > 1: + self.log.warning("multiple controller found - using the first") + + self.serial = serial.Serial(controller_ports[0],2000000) + self.log.debug("controller connected") self.new_packet_callback_external = None self.terminate_reading_received_packet_thread = False @@ -78,18 +174,21 @@ def close(self): self.serial.close() def reset(self): - self._cmd_id = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.RESET + self.log.debug("reset the microcontroller") self.send_command(cmd) - print('reset the microcontroller') # debug + # On the microcontroller side, reset forces the command Id back to 0 + # so any responses will look like they are for command id 0. Force that + # here. + self._cmd_id = 0 def initialize_drivers(self): self._cmd_id = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.INITIALIZE self.send_command(cmd) - print('initialize the drivers') # debug + self.log.debug("initialize the drivers") def turn_on_illumination(self): cmd = bytearray(self.tx_buffer_length) @@ -101,7 +200,7 @@ def turn_off_illumination(self): cmd[1] = CMD_SET.TURN_OFF_ILLUMINATION self.send_command(cmd) - def set_illumination(self,illumination_source,intensity,r=None,g=None,b=None): + def set_illumination(self,illumination_source,intensity): cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.SET_ILLUMINATION cmd[2] = illumination_source @@ -113,8 +212,8 @@ def set_illumination_led_matrix(self,illumination_source,r,g,b): cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.SET_ILLUMINATION_LED_MATRIX cmd[2] = illumination_source - cmd[3] = min(int(r*255),255) - cmd[4] = min(int(g*255),255) + cmd[3] = min(int(g*255),255) + cmd[4] = min(int(r*255),255) cmd[5] = min(int(b*255),255) self.send_command(cmd) @@ -139,52 +238,50 @@ def set_strobe_delay_us(self, strobe_delay_us, camera_channel=0): cmd[6] = strobe_delay_us & 0xff self.send_command(cmd) - ''' - def move_x(self,delta): - direction = int((np.sign(delta)+1)/2) - n_microsteps = abs(delta*Motion.STEPS_PER_MM_XY) - if n_microsteps > 65535: - n_microsteps = 65535 + def set_axis_enable_disable(self, axis, status): cmd = bytearray(self.tx_buffer_length) - cmd[0] = CMD_SET.MOVE_X - cmd[1] = direction - cmd[2] = int(n_microsteps) >> 8 - cmd[3] = int(n_microsteps) & 0xff - self.serial.write(cmd) - ''' + cmd[1] = CMD_SET.SET_AXIS_DISABLE_ENABLE + cmd[2] = axis + cmd[3] = status + self.send_command(cmd) - def move_x_usteps(self,usteps): - direction = STAGE_MOVEMENT_SIGN_X*np.sign(usteps) + def _move_axis_usteps(self, usteps, axis_command_code, axis_direction_sign): + direction = axis_direction_sign * np.sign(usteps) n_microsteps_abs = abs(usteps) # if n_microsteps_abs exceed the max value that can be sent in one go - while n_microsteps_abs >= (2**32)/2: - n_microsteps_partial_abs = (2**32)/2 - 1 - n_microsteps_partial = direction*n_microsteps_partial_abs - payload = self._int_to_payload(n_microsteps_partial,4) + while n_microsteps_abs >= (2 ** 32) / 2: + n_microsteps_partial_abs = (2 ** 32) / 2 - 1 + n_microsteps_partial = direction * n_microsteps_partial_abs + payload = self._int_to_payload(n_microsteps_partial, 4) cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.MOVE_X + cmd[1] = axis_command_code cmd[2] = payload >> 24 cmd[3] = (payload >> 16) & 0xff cmd[4] = (payload >> 8) & 0xff cmd[5] = payload & 0xff + # TODO(imo): Since this issues multiple commands, there's no way to check for and abort failed + # ones mid-move. self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - n_microsteps_abs = n_microsteps_abs - n_microsteps_partial_abs - n_microsteps = direction*n_microsteps_abs - payload = self._int_to_payload(n_microsteps,4) + n_microsteps_abs = n_microsteps_abs - n_microsteps_partial_abs + n_microsteps = direction * n_microsteps_abs + payload = self._int_to_payload(n_microsteps, 4) cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.MOVE_X + cmd[1] = axis_command_code cmd[2] = payload >> 24 cmd[3] = (payload >> 16) & 0xff cmd[4] = (payload >> 8) & 0xff cmd[5] = payload & 0xff self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) + + def move_x_usteps(self,usteps): + if self.is_simulation: + self.x_pos = self.x_pos + STAGE_MOVEMENT_SIGN_X*usteps + self._move_axis_usteps(usteps, CMD_SET.MOVE_X, STAGE_MOVEMENT_SIGN_X) def move_x_to_usteps(self,usteps): + if self.is_simulation: + self.x_pos = usteps payload = self._int_to_payload(usteps,4) cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.MOVETO_X @@ -194,52 +291,14 @@ def move_x_to_usteps(self,usteps): cmd[5] = payload & 0xff self.send_command(cmd) - ''' - def move_y(self,delta): - direction = int((np.sign(delta)+1)/2) - n_microsteps = abs(delta*Motion.STEPS_PER_MM_XY) - if n_microsteps > 65535: - n_microsteps = 65535 - cmd = bytearray(self.tx_buffer_length) - cmd[0] = CMD_SET.MOVE_Y - cmd[1] = direction - cmd[2] = int(n_microsteps) >> 8 - cmd[3] = int(n_microsteps) & 0xff - self.serial.write(cmd) - ''' - def move_y_usteps(self,usteps): - direction = STAGE_MOVEMENT_SIGN_Y*np.sign(usteps) - n_microsteps_abs = abs(usteps) - # if n_microsteps_abs exceed the max value that can be sent in one go - while n_microsteps_abs >= (2**32)/2: - n_microsteps_partial_abs = (2**32)/2 - 1 - n_microsteps_partial = direction*n_microsteps_partial_abs - payload = self._int_to_payload(n_microsteps_partial,4) - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.MOVE_Y - cmd[2] = payload >> 24 - cmd[3] = (payload >> 16) & 0xff - cmd[4] = (payload >> 8) & 0xff - cmd[5] = payload & 0xff - self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - n_microsteps_abs = n_microsteps_abs - n_microsteps_partial_abs - - n_microsteps = direction*n_microsteps_abs - payload = self._int_to_payload(n_microsteps,4) - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.MOVE_Y - cmd[2] = payload >> 24 - cmd[3] = (payload >> 16) & 0xff - cmd[4] = (payload >> 8) & 0xff - cmd[5] = payload & 0xff - self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) + if self.is_simulation: + self.y_pos = self.y_pos + STAGE_MOVEMENT_SIGN_Y*usteps + self._move_axis_usteps(usteps, CMD_SET.MOVE_Y, STAGE_MOVEMENT_SIGN_Y) def move_y_to_usteps(self,usteps): + if self.is_simulation: + self.y_pos = usteps payload = self._int_to_payload(usteps,4) cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.MOVETO_Y @@ -249,52 +308,14 @@ def move_y_to_usteps(self,usteps): cmd[5] = payload & 0xff self.send_command(cmd) - ''' - def move_z(self,delta): - direction = int((np.sign(delta)+1)/2) - n_microsteps = abs(delta*Motion.STEPS_PER_MM_Z) - if n_microsteps > 65535: - n_microsteps = 65535 - cmd = bytearray(self.tx_buffer_length) - cmd[0] = CMD_SET.MOVE_Z - cmd[1] = 1-direction - cmd[2] = int(n_microsteps) >> 8 - cmd[3] = int(n_microsteps) & 0xff - self.serial.write(cmd) - ''' - def move_z_usteps(self,usteps): - direction = STAGE_MOVEMENT_SIGN_Z*np.sign(usteps) - n_microsteps_abs = abs(usteps) - # if n_microsteps_abs exceed the max value that can be sent in one go - while n_microsteps_abs >= (2**32)/2: - n_microsteps_partial_abs = (2**32)/2 - 1 - n_microsteps_partial = direction*n_microsteps_partial_abs - payload = self._int_to_payload(n_microsteps_partial,4) - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.MOVE_Z - cmd[2] = payload >> 24 - cmd[3] = (payload >> 16) & 0xff - cmd[4] = (payload >> 8) & 0xff - cmd[5] = payload & 0xff - self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - n_microsteps_abs = n_microsteps_abs - n_microsteps_partial_abs - - n_microsteps = direction*n_microsteps_abs - payload = self._int_to_payload(n_microsteps,4) - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.MOVE_Z - cmd[2] = payload >> 24 - cmd[3] = (payload >> 16) & 0xff - cmd[4] = (payload >> 8) & 0xff - cmd[5] = payload & 0xff - self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) + if self.is_simulation: + self.z_pos = self.z_pos + STAGE_MOVEMENT_SIGN_Z*usteps + self._move_axis_usteps(usteps, CMD_SET.MOVE_Z, STAGE_MOVEMENT_SIGN_Z) def move_z_to_usteps(self,usteps): + if self.is_simulation: + self.z_pos = usteps payload = self._int_to_payload(usteps,4) cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.MOVETO_Z @@ -305,35 +326,9 @@ def move_z_to_usteps(self,usteps): self.send_command(cmd) def move_theta_usteps(self,usteps): - direction = STAGE_MOVEMENT_SIGN_THETA*np.sign(usteps) - n_microsteps_abs = abs(usteps) - # if n_microsteps_abs exceed the max value that can be sent in one go - while n_microsteps_abs >= (2**32)/2: - n_microsteps_partial_abs = (2**32)/2 - 1 - n_microsteps_partial = direction*n_microsteps_partial_abs - payload = self._int_to_payload(n_microsteps_partial,4) - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.MOVE_THETA - cmd[2] = payload >> 24 - cmd[3] = (payload >> 16) & 0xff - cmd[4] = (payload >> 8) & 0xff - cmd[5] = payload & 0xff - self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - n_microsteps_abs = n_microsteps_abs - n_microsteps_partial_abs - - n_microsteps = direction*n_microsteps_abs - payload = self._int_to_payload(n_microsteps,4) - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.MOVE_THETA - cmd[2] = payload >> 24 - cmd[3] = (payload >> 16) & 0xff - cmd[4] = (payload >> 8) & 0xff - cmd[5] = payload & 0xff - self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) + if self.is_simulation: + self.theta_pos = self.theta_pos + usteps + self._move_axis_usteps(usteps, CMD_SET.MOVE_THETA, STAGE_MOVEMENT_SIGN_THETA) def set_off_set_velocity_x(self,off_set_velocity): # off_set_velocity is in mm/s @@ -361,46 +356,46 @@ def set_off_set_velocity_y(self,off_set_velocity): self.send_command(cmd) def home_x(self): + if self.is_simulation: + self.x_pos = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.HOME_OR_ZERO cmd[2] = AXIS.X cmd[3] = int((STAGE_MOVEMENT_SIGN_X+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1 self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - # # to do: add timeout def home_y(self): + if self.is_simulation: + self.y_pos = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.HOME_OR_ZERO cmd[2] = AXIS.Y cmd[3] = int((STAGE_MOVEMENT_SIGN_Y+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1 self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # sleep(self._motion_status_checking_interval) - # # to do: add timeout def home_z(self): + if self.is_simulation: + self.z_pos = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.HOME_OR_ZERO cmd[2] = AXIS.Z cmd[3] = int((STAGE_MOVEMENT_SIGN_Z+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1 self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - # # to do: add timeout def home_theta(self): + if self.is_simulation: + self.theta_pos = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.HOME_OR_ZERO cmd[2] = 3 cmd[3] = int((STAGE_MOVEMENT_SIGN_THETA+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1 self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - # # to do: add timeout def home_xy(self): + if self.is_simulation: + self.x_pos = 0 + self.y_pos = 0 + self.y_pos = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.HOME_OR_ZERO cmd[2] = AXIS.XY @@ -409,52 +404,49 @@ def home_xy(self): self.send_command(cmd) def zero_x(self): + if self.is_simulation: + self.x_pos = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.HOME_OR_ZERO cmd[2] = AXIS.X cmd[3] = HOME_OR_ZERO.ZERO self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - # # to do: add timeout def zero_y(self): + if self.is_simulation: + self.y_pos = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.HOME_OR_ZERO cmd[2] = AXIS.Y cmd[3] = HOME_OR_ZERO.ZERO self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # sleep(self._motion_status_checking_interval) - # # to do: add timeout def zero_z(self): + if self.is_simulation: + self.z_pos = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.HOME_OR_ZERO cmd[2] = AXIS.Z cmd[3] = HOME_OR_ZERO.ZERO self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - # # to do: add timeout def zero_theta(self): + if self.is_simulation: + self.theta_pos = 0 cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.HOME_OR_ZERO cmd[2] = AXIS.THETA cmd[3] = HOME_OR_ZERO.ZERO self.send_command(cmd) - # while self.mcu_cmd_execution_in_progress == True: - # time.sleep(self._motion_status_checking_interval) - # # to do: add timeout def configure_stage_pid(self, axis, transitions_per_revolution, flip_direction=False): cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.CONFIGURE_STAGE_PID cmd[2] = axis + cmd[3] = int(flip_direction) payload = self._int_to_payload(transitions_per_revolution,2) - cmd[3] = (payload >> 8) & 0xff - cmd[4] = payload & 0xff + cmd[4] = (payload >> 8) & 0xff + cmd[5] = payload & 0xff self.send_command(cmd) def turn_on_stage_pid(self, axis): @@ -469,6 +461,18 @@ def turn_off_stage_pid(self, axis): cmd[2] = axis self.send_command(cmd) + def set_pid_arguments(self, axis, pid_p, pid_i, pid_d): + cmd = bytearray(self.tx_buffer_length) + cmd[1] = CMD_SET.SET_PID_ARGUMENTS + cmd[2] = int(axis) + + cmd[3] = (int(pid_p) >> 8) & 0xff + cmd[4] = int(pid_p) & 0xff + + cmd[5] = int(pid_i) + cmd[6] = int(pid_d) + self.send_command(cmd) + def set_lim(self,limit_code,usteps): cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.SET_LIM @@ -487,6 +491,17 @@ def set_limit_switch_polarity(self,axis,polarity): cmd[3] = polarity self.send_command(cmd) + def set_home_safety_margin(self, axis, margin): + margin = abs(margin) + if margin > 0xFFFF: + margin = 0xFFFF + cmd = bytearray(self.tx_buffer_length) + cmd[1] = CMD_SET.SET_HOME_SAFETY_MERGIN + cmd[2] = axis + cmd[3] = (margin >> 8) & 0xff + cmd[4] = (margin) & 0xff + self.send_command(cmd) + def configure_motor_driver(self,axis,microstepping,current_rms,I_hold): # current_rms in mA # I_hold 0.0-1.0 @@ -554,6 +569,13 @@ def configure_actuators(self): self.wait_till_operation_is_completed() self.set_limit_switch_polarity(AXIS.Z,Z_HOME_SWITCH_POLARITY) self.wait_till_operation_is_completed() + # home safety margin + self.set_home_safety_margin(AXIS.X, int(X_HOME_SAFETY_MARGIN_UM)) + self.wait_till_operation_is_completed() + self.set_home_safety_margin(AXIS.Y, int(Y_HOME_SAFETY_MARGIN_UM)) + self.wait_till_operation_is_completed() + self.set_home_safety_margin(AXIS.Z, int(Z_HOME_SAFETY_MARGIN_UM)) + self.wait_till_operation_is_completed() def ack_joystick_button_pressed(self): cmd = bytearray(self.tx_buffer_length) @@ -568,6 +590,13 @@ def analog_write_onboard_DAC(self,dac,value): cmd[4] = value & 0xff self.send_command(cmd) + def configure_dac80508_refdiv_and_gain(self, div, gains): + cmd = bytearray(self.tx_buffer_length) + cmd[1] = CMD_SET.SET_DAC80508_REFDIV_GAIN + cmd[2] = div + cmd[3] = gains + self.send_command(cmd) + def set_pin_level(self,pin,level): cmd = bytearray(self.tx_buffer_length) cmd[1] = CMD_SET.SET_PIN_LEVEL @@ -588,28 +617,48 @@ def send_command(self,command): self.serial.write(command) self.mcu_cmd_execution_in_progress = True self.last_command = command - self.timeout_counter = 0 - self.last_command_timestamp = time.time() + self.last_command_send_timestamp = time.time() self.retry = 0 + if self.last_command_aborted_error is not None: + self.log.warning("Last command aborted and not cleared before new command sent!", self.last_command_aborted_error) + self.last_command_aborted_error = None + + def abort_current_command(self, reason): + self.log.error(f"Command id={self._cmd_id} aborted for reason='{reason}'") + self.last_command_aborted_error = CommandAborted(reason=reason, command_id=self._cmd_id) + self.mcu_cmd_execution_in_progress = False + + def acknowledge_aborted_command(self): + if self.last_command_aborted_error is None: + self.log.warning("Request to ack aborted command, but there is no aborted command.") + + self.last_command_aborted_error = None + def resend_last_command(self): - self.serial.write(self.last_command) - self.mcu_cmd_execution_in_progress = True - self.timeout_counter = 0 - self.retry = self.retry + 1 + if self.last_command is not None: + self.serial.write(self.last_command) + self.mcu_cmd_execution_in_progress = True + # We use the retry count for both checksum errors, and to keep track of + # timeout re-attempts. + self.last_command_send_timestamp = time.time() + self.retry = self.retry + 1 + else: + self.log.warning("resend requested with no last_command, something is wrong!") + self.abort_current_command("Resend last requested with no last command") def read_received_packet(self): while self.terminate_reading_received_packet_thread == False: # wait to receive data - if self.serial.in_waiting==0: - continue - if self.serial.in_waiting % self.rx_buffer_length != 0: + if self.serial.in_waiting==0 or self.serial.in_waiting % self.rx_buffer_length != 0: + # Sleep a negligible amount of time just to give other threads time to run. Otherwise, + # we run the rise of spinning forever here and not letting progress happen elsewhere. + time.sleep(0.0001) continue # get rid of old data num_bytes_in_rx_buffer = self.serial.in_waiting if num_bytes_in_rx_buffer > self.rx_buffer_length: - # print('getting rid of old data') for i in range(num_bytes_in_rx_buffer-self.rx_buffer_length): self.serial.read() @@ -633,28 +682,30 @@ def read_received_packet(self): self._cmd_id_mcu = msg[0] self._cmd_execution_status = msg[1] if (self._cmd_id_mcu == self._cmd_id) and (self._cmd_execution_status == CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS): - if self.mcu_cmd_execution_in_progress == True: + if self.mcu_cmd_execution_in_progress: self.mcu_cmd_execution_in_progress = False - print(' mcu command ' + str(self._cmd_id) + ' complete') - elif self._cmd_id_mcu != self._cmd_id and time.time() - self.last_command_timestamp > 5 and self.last_command != None: - self.timeout_counter = self.timeout_counter + 1 - if self.timeout_counter > 10: + self.log.debug("mcu command " + str(self._cmd_id) + " complete") + elif self.mcu_cmd_execution_in_progress and self._cmd_id_mcu != self._cmd_id and time.time() - self.last_command_send_timestamp > self.LAST_COMMAND_ACK_TIMEOUT and self.last_command is not None: + if self.retry > self.MAX_RETRY_COUNT: + self.abort_current_command(reason=f"Command timed out without an ack after {self.LAST_COMMAND_ACK_TIMEOUT} [s], and {self.retry} retries") + else: + self.log.debug(f"command timed out without an ack after {self.LAST_COMMAND_ACK_TIMEOUT} [s], resending command") self.resend_last_command() - print(' *** resend the last command') - elif self._cmd_execution_status == CMD_EXECUTION_STATUS.CMD_CHECKSUM_ERROR: - print('! cmd checksum error, resending command') - if self.retry > 10: - print('!! resending command failed for more than 10 times, the program will exit') - exit() + elif self.mcu_cmd_execution_in_progress and self._cmd_execution_status == CMD_EXECUTION_STATUS.CMD_CHECKSUM_ERROR: + if self.retry > self.MAX_RETRY_COUNT: + self.abort_current_command(reason=f"Checksum error and 10 retries for {self._cmd_id}") else: + self.log.error("cmd checksum error, resending command") self.resend_last_command() - # print('command id ' + str(self._cmd_id) + '; mcu command ' + str(self._cmd_id_mcu) + ' status: ' + str(msg[1]) ) - self.x_pos = self._payload_to_int(msg[2:6],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution - self.y_pos = self._payload_to_int(msg[6:10],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution - self.z_pos = self._payload_to_int(msg[10:14],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution - self.theta_pos = self._payload_to_int(msg[14:18],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution - + if self.is_simulation: + pass + else: + self.x_pos = self._payload_to_int(msg[2:6],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution + self.y_pos = self._payload_to_int(msg[6:10],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution + self.z_pos = self._payload_to_int(msg[10:14],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution + self.theta_pos = self._payload_to_int(msg[14:18],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution + self.button_and_switch_state = msg[18] # joystick button tmp = self.button_and_switch_state & (1 << BIT_POS_JOYSTICK_BUTTON) @@ -682,386 +733,46 @@ def is_busy(self): def set_callback(self,function): self.new_packet_callback_external = function - def wait_till_operation_is_completed(self, TIMEOUT_LIMIT_S=5): + def wait_till_operation_is_completed(self, timeout_limit_s=5): + """ + Wait for the current command to complete. If the wait times out, the current command isn't touched. To + abort it, you should call the abort_current_command(...) method. + """ timestamp_start = time.time() - while self.is_busy(): + while self.is_busy() and self.last_command_aborted_error is None: time.sleep(0.02) - if time.time() - timestamp_start > TIMEOUT_LIMIT_S: - print('Error - microcontroller timeout, the program will exit') - exit() + if time.time() - timestamp_start > timeout_limit_s: + raise TimeoutError(f"Current mcu operation timed out after {timeout_limit_s} [s].") - def _int_to_payload(self,signed_int,number_of_bytes): + if self.last_command_aborted_error is not None: + raise self.last_command_aborted_error + + @staticmethod + def _int_to_payload(signed_int,number_of_bytes): if signed_int >= 0: payload = signed_int else: payload = 2**(8*number_of_bytes) + signed_int # find two's completement return payload - def _payload_to_int(self,payload,number_of_bytes): + @staticmethod + def _payload_to_int(payload,number_of_bytes): signed = 0 for i in range(number_of_bytes): signed = signed + int(payload[i])*(256**(number_of_bytes-1-i)) if signed >= 256**number_of_bytes/2: signed = signed - 256**number_of_bytes return signed + + def set_dac80508_scaling_factor_for_illumination(self, illumination_intensity_factor): + if illumination_intensity_factor > 1: + illumination_intensity_factor = 1 -class Microcontroller_Simulation(): - def __init__(self,parent=None): - self.serial = None - self.platform_name = platform.system() - self.tx_buffer_length = MicrocontrollerDef.CMD_LENGTH - self.rx_buffer_length = MicrocontrollerDef.MSG_LENGTH - - self._cmd_id = 0 - self._cmd_id_mcu = None # command id of mcu's last received command - self._cmd_execution_status = None - self.mcu_cmd_execution_in_progress = False - - self.x_pos = 0 # unit: microstep or encoder resolution - self.y_pos = 0 # unit: microstep or encoder resolution - self.z_pos = 0 # unit: microstep or encoder resolution - self.theta_pos = 0 # unit: microstep or encoder resolution - self.button_and_switch_state = 0 - self.joystick_button_pressed = 0 - self.signal_joystick_button_pressed_event = False - self.switch_state = 0 - - # for simulation - self.timestamp_last_command = time.time() # for simulation only - self._mcu_cmd_execution_status = None - self.timer_update_command_execution_status = QTimer() - self.timer_update_command_execution_status.timeout.connect(self._simulation_update_cmd_execution_status) - - self.new_packet_callback_external = None - self.terminate_reading_received_packet_thread = False - self.thread_read_received_packet = threading.Thread(target=self.read_received_packet, daemon=True) - self.thread_read_received_packet.start() - - self.crc_calculator = CrcCalculator(Crc8.CCITT,table_based=True) - - def close(self): - self.terminate_reading_received_packet_thread = True - self.thread_read_received_packet.join() - - def reset(self): - self._cmd_id = 0 - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.RESET - self.send_command(cmd) - - def initialize_drivers(self): - self._cmd_id = 0 - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.INITIALIZE - self.send_command(cmd) - print('initialize the drivers') # debug - - def move_x_usteps(self,usteps): - self.x_pos = self.x_pos + STAGE_MOVEMENT_SIGN_X*usteps - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': move x') - - def move_x_to_usteps(self,usteps): - self.x_pos = usteps - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': move x to') - - def move_y_usteps(self,usteps): - self.y_pos = self.y_pos + STAGE_MOVEMENT_SIGN_Y*usteps - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': move y') - - def move_y_to_usteps(self,usteps): - self.y_pos = usteps - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': move y to') - - def move_z_usteps(self,usteps): - self.z_pos = self.z_pos + STAGE_MOVEMENT_SIGN_Z*usteps - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': move z') - - def move_z_to_usteps(self,usteps): - self.z_pos = usteps - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': move z to') - - def move_theta_usteps(self,usteps): - self.theta_pos = self.theta_pos + usteps - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - - def home_x(self): - self.x_pos = 0 - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': home x') - - def home_y(self): - self.y_pos = 0 - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': home y') - - def home_z(self): - self.z_pos = 0 - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': home z') - - def home_xy(self): - self.x_pos = 0 - self.y_pos = 0 - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': home xy') - - def home_theta(self): - self.theta_pos = 0 - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - - def zero_x(self): - self.x_pos = 0 - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': zero x') - - def zero_y(self): - self.y_pos = 0 - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': zero y') - - def zero_z(self): - self.z_pos = 0 - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': zero z') - - def zero_theta(self): - self.theta_pos = 0 - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - - def set_lim(self,limit_code,usteps): - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - - def configure_motor_driver(self,axis,microstepping,current_rms,I_hold): - # current_rms in mA - # I_hold 0.0-1.0 - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.CONFIGURE_STEPPER_DRIVER - cmd[2] = axis - if microstepping == 1: - cmd[3] = 0 - elif microstepping == 256: - cmd[3] = 255 # max of uint8 is 255 - will be changed to 255 after received by the MCU - else: - cmd[3] = microstepping - cmd[4] = current_rms >> 8 - cmd[5] = current_rms & 0xff - cmd[6] = int(I_hold*255) - self.send_command(cmd) - - def set_max_velocity_acceleration(self,axis,velocity,acceleration): - # velocity: max 65535/100 mm/s - # acceleration: max 65535/10 mm/s^2 - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.SET_MAX_VELOCITY_ACCELERATION - cmd[2] = axis - cmd[3] = int(velocity*100) >> 8 - cmd[4] = int(velocity*100) & 0xff - cmd[5] = int(acceleration*10) >> 8 - cmd[6] = int(acceleration*10) & 0xff - self.send_command(cmd) - - def set_leadscrew_pitch(self,axis,pitch_mm): - # pitch: max 65535/1000 = 65.535 (mm) - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.SET_LEAD_SCREW_PITCH - cmd[2] = axis - cmd[3] = int(pitch_mm*1000) >> 8 - cmd[4] = int(pitch_mm*1000) & 0xff - self.send_command(cmd) - - def set_limit_switch_polarity(self,axis,polarity): - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.SET_LIM_SWITCH_POLARITY - cmd[2] = axis - cmd[3] = polarity - self.send_command(cmd) - - def configure_actuators(self): - # lead screw pitch - self.set_leadscrew_pitch(AXIS.X,SCREW_PITCH_X_MM) - self.wait_till_operation_is_completed() - self.set_leadscrew_pitch(AXIS.Y,SCREW_PITCH_Y_MM) - self.wait_till_operation_is_completed() - self.set_leadscrew_pitch(AXIS.Z,SCREW_PITCH_Z_MM) - self.wait_till_operation_is_completed() - # stepper driver (microstepping,rms current and I_hold) - self.configure_motor_driver(AXIS.X,MICROSTEPPING_DEFAULT_X,X_MOTOR_RMS_CURRENT_mA,X_MOTOR_I_HOLD) - self.wait_till_operation_is_completed() - self.configure_motor_driver(AXIS.Y,MICROSTEPPING_DEFAULT_Y,Y_MOTOR_RMS_CURRENT_mA,Y_MOTOR_I_HOLD) - self.wait_till_operation_is_completed() - self.configure_motor_driver(AXIS.Z,MICROSTEPPING_DEFAULT_Z,Z_MOTOR_RMS_CURRENT_mA,Z_MOTOR_I_HOLD) - self.wait_till_operation_is_completed() - # max velocity and acceleration - self.set_max_velocity_acceleration(AXIS.X,MAX_VELOCITY_X_mm,MAX_ACCELERATION_X_mm) - self.wait_till_operation_is_completed() - self.set_max_velocity_acceleration(AXIS.Y,MAX_VELOCITY_Y_mm,MAX_ACCELERATION_Y_mm) - self.wait_till_operation_is_completed() - self.set_max_velocity_acceleration(AXIS.Z,MAX_VELOCITY_Z_mm,MAX_ACCELERATION_Z_mm) - self.wait_till_operation_is_completed() - # home switch - self.set_limit_switch_polarity(AXIS.X,X_HOME_SWITCH_POLARITY) - self.wait_till_operation_is_completed() - self.set_limit_switch_polarity(AXIS.Y,Y_HOME_SWITCH_POLARITY) - self.wait_till_operation_is_completed() - self.set_limit_switch_polarity(AXIS.Z,Z_HOME_SWITCH_POLARITY) - self.wait_till_operation_is_completed() - - def analog_write_onboard_DAC(self,dac,value): - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.ANALOG_WRITE_ONBOARD_DAC - cmd[2] = dac - cmd[3] = (value >> 8) & 0xff - cmd[4] = value & 0xff - self.send_command(cmd) - - def read_received_packet(self): - while self.terminate_reading_received_packet_thread == False: - # only for simulation - update the command execution status - if time.time() - self.timestamp_last_command > 0.05: # in the simulation, assume all the operation takes 0.05s to complete - if self._mcu_cmd_execution_status != CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS: - self._mcu_cmd_execution_status = CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS - print(' mcu command ' + str(self._cmd_id) + ' complete') - - # read and parse message - msg=[] - for i in range(self.rx_buffer_length): - msg.append(0) - - msg[0] = self._cmd_id - msg[1] = self._mcu_cmd_execution_status - - self._cmd_id_mcu = msg[0] - self._cmd_execution_status = msg[1] - if (self._cmd_id_mcu == self._cmd_id) and (self._cmd_execution_status == CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS): - self.mcu_cmd_execution_in_progress = False - # print('mcu_cmd_execution_in_progress: ' + str(self.mcu_cmd_execution_in_progress)) - - # self.x_pos = utils.unsigned_to_signed(msg[2:6],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution - # self.y_pos = utils.unsigned_to_signed(msg[6:10],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution - # self.z_pos = utils.unsigned_to_signed(msg[10:14],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution - # self.theta_pos = utils.unsigned_to_signed(msg[14:18],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution - - self.button_and_switch_state = msg[18] - - if self.new_packet_callback_external is not None: - self.new_packet_callback_external(self) - - time.sleep(0.005) # simulate MCU packet transmission interval - - def turn_on_illumination(self): - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': turn on illumination') - - def turn_off_illumination(self): - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': turn off illumination') - - def set_illumination(self,illumination_source,intensity): - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': set illumination') - - def set_illumination_led_matrix(self,illumination_source,r,g,b): - cmd = bytearray(self.tx_buffer_length) - self.send_command(cmd) - print(' mcu command ' + str(self._cmd_id) + ': set illumination (led matrix)') - - def send_hardware_trigger(self,control_illumination=False,illumination_on_time_us=0,trigger_output_ch = 0): - illumination_on_time_us = int(illumination_on_time_us) - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.SEND_HARDWARE_TRIGGER - cmd[2] = (control_illumination<<7) + trigger_output_ch # MSB: whether illumination is controlled - cmd[3] = illumination_on_time_us >> 24 - cmd[4] = (illumination_on_time_us >> 16) & 0xff - cmd[5] = (illumination_on_time_us >> 8) & 0xff - cmd[6] = illumination_on_time_us & 0xff - self.send_command(cmd) - - def set_strobe_delay_us(self, strobe_delay_us, camera_channel=0): - print('set strobe delay') - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.SET_STROBE_DELAY - cmd[2] = camera_channel - cmd[3] = strobe_delay_us >> 24 - cmd[4] = (strobe_delay_us >> 16) & 0xff - cmd[5] = (strobe_delay_us >> 8) & 0xff - cmd[6] = strobe_delay_us & 0xff - self.send_command(cmd) - - def get_pos(self): - return self.x_pos, self.y_pos, self.z_pos, self.theta_pos - - def get_button_and_switch_state(self): - return self.button_and_switch_state - - def set_callback(self,function): - self.new_packet_callback_external = function - - def is_busy(self): - return self.mcu_cmd_execution_in_progress + if illumination_intensity_factor < 0: + illumination_intensity_factor = 0.01 - def set_pin_level(self,pin,level): + factor = round(illumination_intensity_factor, 2) * 100 cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET.SET_PIN_LEVEL - cmd[2] = pin - cmd[3] = level + cmd[1] = CMD_SET.SET_ILLUMINATION_INTENSITY_FACTOR + cmd[2] = int(factor) self.send_command(cmd) - - def turn_on_AF_laser(self): - self.set_pin_level(MCU_PINS.AF_LASER,1) - - def turn_off_AF_laser(self): - self.set_pin_level(MCU_PINS.AF_LASER,0) - - def send_command(self,command): - self._cmd_id = (self._cmd_id + 1)%256 - command[0] = self._cmd_id - command[-1] = self.crc_calculator.calculate_checksum(command[:-1]) - self.mcu_cmd_execution_in_progress = True - # for simulation - self._mcu_cmd_execution_status = CMD_EXECUTION_STATUS.IN_PROGRESS - # self.timer_update_command_execution_status.setInterval(2000) - # self.timer_update_command_execution_status.start() - # print('start timer') - # timer cannot be started from another thread - self.timestamp_last_command = time.time() - - def _simulation_update_cmd_execution_status(self): - # print('simulation - MCU command execution finished') - # self._mcu_cmd_execution_status = CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS - # self.timer_update_command_execution_status.stop() - pass # timer cannot be started from another thread - - def wait_till_operation_is_completed(self, TIMEOUT_LIMIT_S=5): - timestamp_start = time.time() - while self.is_busy(): - time.sleep(0.02) - if time.time() - timestamp_start > TIMEOUT_LIMIT_S: - print('Error - microcontroller timeout, the program will exit') - exit() - diff --git a/software/control/microcontroller2.py b/software/control/microcontroller2.py deleted file mode 100755 index 9006ed557..000000000 --- a/software/control/microcontroller2.py +++ /dev/null @@ -1,253 +0,0 @@ -import platform -import serial -import serial.tools.list_ports -import time -import numpy as np -import threading - -from control._def import * - -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# temporary -class Microcontroller2(): - def __init__(self): - self.serial = None - self.platform_name = platform.system() - self.tx_buffer_length = Microcontroller2Def.CMD_LENGTH - self.rx_buffer_length = Microcontroller2Def.MSG_LENGTH - - self._cmd_id = 0 - self._cmd_id_mcu = None # command id of mcu's last received command - self._cmd_execution_status = None - self.mcu_cmd_execution_in_progress = False - self.last_command = None - self.timeout_counter = 0 - - controller_ports = [ p.device for p in serial.tools.list_ports.comports() if p.manufacturer == 'Teensyduino'] - if not controller_ports: - raise IOError("No Teensy Found") - self.serial = serial.Serial(controller_ports[0],2000000) - print('Teensy connected') - - ''' - self.new_packet_callback_external = None - self.terminate_reading_received_packet_thread = False - self.thread_read_received_packet = threading.Thread(target=self.read_received_packet, daemon=True) - self.thread_read_received_packet.start() - ''' - - def close(self): - self.serial.close() - - def analog_write_DAC8050x(self,dac,value): - print('write DAC ' + str(dac) + ': ' + str(value)) - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET2.ANALOG_WRITE_DAC8050X - cmd[2] = dac - cmd[3] = (value >> 8) & 0xff - cmd[4] = value & 0xff - self.send_command(cmd) - - def set_camera_trigger_frequency(self,frequency): - trigger_interval_us = int((1/frequency)*1000000*1000); - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET2.SET_CAMERA_TRIGGER_FREQUENCY - cmd[2] = (trigger_interval_us >> 24) & 0xff - cmd[3] = (trigger_interval_us >> 16) & 0xff - cmd[4] = (trigger_interval_us >> 8) & 0xff - cmd[5] = trigger_interval_us & 0xff - self.send_command(cmd) - - def start_camera_trigger(self): - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET2.START_CAMERA_TRIGGERING - self.send_command(cmd) - - def stop_camera_trigger(self): - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET2.STOP_CAMERA_TRIGGERING - self.send_command(cmd) - - def send_command(self,command): - self._cmd_id = (self._cmd_id + 1)%256 - command[0] = self._cmd_id - # command[self.tx_buffer_length-1] = self._calculate_CRC(command) - self.serial.write(command) - self.mcu_cmd_execution_in_progress = True - self.last_command = command - self.timeout_counter = 0 - - def read_received_packet(self): - while self.terminate_reading_received_packet_thread == False: - # wait to receive data - if self.serial.in_waiting==0: - continue - if self.serial.in_waiting % self.rx_buffer_length != 0: - continue - - # get rid of old data - num_bytes_in_rx_buffer = self.serial.in_waiting - if num_bytes_in_rx_buffer > self.rx_buffer_length: - # print('getting rid of old data') - for i in range(num_bytes_in_rx_buffer-self.rx_buffer_length): - self.serial.read() - - # read the buffer - msg=[] - for i in range(self.rx_buffer_length): - msg.append(ord(self.serial.read())) - - # parse the message - ''' - - command ID (1 byte) - - execution status (1 byte) - - CRC (1 byte) - ''' - self._cmd_id_mcu = msg[0] - self._cmd_execution_status = msg[1] - if (self._cmd_id_mcu == self._cmd_id) and (self._cmd_execution_status == CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS): - if self.mcu_cmd_execution_in_progress == True: - self.mcu_cmd_execution_in_progress = False - print(' mcu command ' + str(self._cmd_id) + ' complete') - elif self._cmd_id_mcu != self._cmd_id and self.last_command != None: - self.timeout_counter = self.timeout_counter + 1 - if self.timeout_counter > 10: - self.resend_last_command() - print(' *** resend the last command') - # print('command id ' + str(self._cmd_id) + '; mcu command ' + str(self._cmd_id_mcu) + ' status: ' + str(msg[1]) ) - - if self.new_packet_callback_external is not None: - self.new_packet_callback_external(self) - - def is_busy(self): - return self.mcu_cmd_execution_in_progress - - def set_callback(self,function): - self.new_packet_callback_external = function - - def _int_to_payload(self,signed_int,number_of_bytes): - if signed_int >= 0: - payload = signed_int - else: - payload = 2**(8*number_of_bytes) + signed_int # find two's completement - return payload - - def _payload_to_int(self,payload,number_of_bytes): - signed = 0 - for i in range(number_of_bytes): - signed = signed + int(payload[i])*(256**(number_of_bytes-1-i)) - if signed >= 256**number_of_bytes/2: - signed = signed - 256**number_of_bytes - return signed - -class Microcontroller2_Simulation(): - def __init__(self,parent=None): - self.serial = None - self.platform_name = platform.system() - self.tx_buffer_length = MicrocontrollerDef.CMD_LENGTH - self.rx_buffer_length = MicrocontrollerDef.MSG_LENGTH - - self._cmd_id = 0 - self._cmd_id_mcu = None # command id of mcu's last received command - self._cmd_execution_status = None - self.mcu_cmd_execution_in_progress = False - - # for simulation - self.timestamp_last_command = time.time() # for simulation only - self._mcu_cmd_execution_status = None - self.timer_update_command_execution_status = QTimer() - self.timer_update_command_execution_status.timeout.connect(self._simulation_update_cmd_execution_status) - - ''' - self.new_packet_callback_external = None - self.terminate_reading_received_packet_thread = False - self.thread_read_received_packet = threading.Thread(target=self.read_received_packet, daemon=True) - self.thread_read_received_packet.start() - ''' - - def close(self): - self.terminate_reading_received_packet_thread = True - self.thread_read_received_packet.join() - - def analog_write_DAC8050x(self,dac,value): - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET2.ANALOG_WRITE_DAC8050X - cmd[2] = dac - cmd[3] = (value >> 8) & 0xff - cmd[4] = value & 0xff - self.send_command(cmd) - - def set_camera_trigger_frequency(self,frequency): - trigger_interval_us = int((1/frequency)*1000000); - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET2.SET_CAMERA_TRIGGER_FREQUENCY - cmd[2] = (trigger_interval_us >> 24) & 0xff - cmd[3] = (trigger_interval_us >> 16) & 0xff - cmd[4] = (trigger_interval_us >> 8) & 0xff - cmd[5] = trigger_interval_us & 0xff - self.send_command(cmd) - - def start_camera_trigger(self): - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET2.START_CAMERA_TRIGGERING - self.send_command(cmd) - - def stop_camera_trigger(self): - cmd = bytearray(self.tx_buffer_length) - cmd[1] = CMD_SET2.STOP_CAMERA_TRIGGERING - self.send_command(cmd) - - def read_received_packet(self): - while self.terminate_reading_received_packet_thread == False: - # only for simulation - update the command execution status - if time.time() - self.timestamp_last_command > 0.05: # in the simulation, assume all the operation takes 0.05s to complete - if self._mcu_cmd_execution_status != CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS: - self._mcu_cmd_execution_status = CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS - print(' mcu command ' + str(self._cmd_id) + ' complete') - - # read and parse message - msg=[] - for i in range(self.rx_buffer_length): - msg.append(0) - - msg[0] = self._cmd_id - msg[1] = self._mcu_cmd_execution_status - - self._cmd_id_mcu = msg[0] - self._cmd_execution_status = msg[1] - if (self._cmd_id_mcu == self._cmd_id) and (self._cmd_execution_status == CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS): - self.mcu_cmd_execution_in_progress = False - # print('mcu_cmd_execution_in_progress: ' + str(self.mcu_cmd_execution_in_progress)) - - if self.new_packet_callback_external is not None: - self.new_packet_callback_external(self) - - time.sleep(0.005) # simulate MCU packet transmission interval - - def set_callback(self,function): - self.new_packet_callback_external = function - - def is_busy(self): - return self.mcu_cmd_execution_in_progress - - def send_command(self,command): - self._cmd_id = (self._cmd_id + 1)%256 - command[0] = self._cmd_id - # command[self.tx_buffer_length-1] = self._calculate_CRC(command) - self.mcu_cmd_execution_in_progress = True - # for simulation - self._mcu_cmd_execution_status = CMD_EXECUTION_STATUS.IN_PROGRESS - # self.timer_update_command_execution_status.setInterval(2000) - # self.timer_update_command_execution_status.start() - # print('start timer') - # timer cannot be started from another thread - self.timestamp_last_command = time.time() - - def _simulation_update_cmd_execution_status(self): - # print('simulation - MCU command execution finished') - # self._mcu_cmd_execution_status = CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS - # self.timer_update_command_execution_status.stop() - pass # timer cannot be started from another thread \ No newline at end of file diff --git a/software/control/microscope.py b/software/control/microscope.py new file mode 100644 index 000000000..fd3eb521b --- /dev/null +++ b/software/control/microscope.py @@ -0,0 +1,182 @@ +import serial +import time + +from PyQt5.QtCore import QObject + +import control.core as core +from control._def import * +import control + +if CAMERA_TYPE == "Toupcam": + import control.camera_toupcam as camera +import control.microcontroller as microcontroller +import control.serial_peripherals as serial_peripherals + + +class Microscope(QObject): + + def __init__(self, microscope=None, is_simulation=False): + super().__init__() + if microscope is None: + self.initialize_camera(is_simulation=is_simulation) + self.initialize_microcontroller(is_simulation=is_simulation) + self.initialize_core_components() + self.initialize_peripherals() + else: + self.camera = microscope.camera + self.microcontroller = microscope.microcontroller + self.configurationManager = microscope.microcontroller + self.objectiveStore = microscope.objectiveStore + self.streamHandler = microscope.streamHandler + self.liveController = microscope.liveController + self.navigationController = microscope.navigationController + self.autofocusController = microscope.autofocusController + self.slidePositionController = microscope.slidePositionController + if USE_ZABER_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel = microscope.emission_filter_wheel + elif USE_OPTOSPIN_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel = microscope.emission_filter_wheel + + def initialize_camera(self, is_simulation): + if is_simulation: + self.camera = camera.Camera_Simulation(rotate_image_angle=ROTATE_IMAGE_ANGLE, flip_image=FLIP_IMAGE) + else: + sn_camera_main = camera.get_sn_by_model(MAIN_CAMERA_MODEL) + self.camera = camera.Camera(sn=sn_camera_main, rotate_image_angle=ROTATE_IMAGE_ANGLE, flip_image=FLIP_IMAGE) + + self.camera.open() + self.camera.set_pixel_format(DEFAULT_PIXEL_FORMAT) + self.camera.set_software_triggered_acquisition() + + def initialize_microcontroller(self, is_simulation): + if is_simulation: + self.microcontroller = microcontroller.Microcontroller(existing_serial=control.microcontroller.SimSerial()) + else: + self.microcontroller = microcontroller.Microcontroller(version=CONTROLLER_VERSION, sn=CONTROLLER_SN) + + self.microcontroller.reset() + time.sleep(0.5) + self.microcontroller.initialize_drivers() + time.sleep(0.5) + self.microcontroller.configure_actuators() + + self.home_x_and_y_separately = False + + def initialize_core_components(self): + self.configurationManager = core.ConfigurationManager(filename='./channel_configurations.xml') + self.objectiveStore = core.ObjectiveStore() + self.streamHandler = core.StreamHandler(display_resolution_scaling=DEFAULT_DISPLAY_CROP/100) + self.liveController = core.LiveController(self.camera, self.microcontroller, self.configurationManager, self) + self.navigationController = core.NavigationController(self.microcontroller, self.objectiveStore) + self.autofocusController = core.AutoFocusController(self.camera, self.navigationController, self.liveController) + self.slidePositionController = core.SlidePositionController(self.navigationController,self.liveController) + + def initialize_peripherals(self): + if USE_ZABER_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel = serial_peripherals.FilterController(FILTER_CONTROLLER_SERIAL_NUMBER, 115200, 8, serial.PARITY_NONE, serial.STOPBITS_ONE) + self.emission_filter_wheel.start_homing() + elif USE_OPTOSPIN_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel = serial_peripherals.Optospin(SN=FILTER_CONTROLLER_SERIAL_NUMBER) + self.emission_filter_wheel.set_speed(OPTOSPIN_EMISSION_FILTER_WHEEL_SPEED_HZ) + + def set_channel(self,channel): + self.liveController.set_channel(channel) + + def acquire_image(self): + # turn on illumination and send trigger + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: + self.liveController.turn_on_illumination() + self.waitForMicrocontroller() + self.camera.send_trigger() + elif self.liveController.trigger_mode == TriggerMode.HARDWARE: + self.microcontroller.send_hardware_trigger(control_illumination=True,illumination_on_time_us=self.camera.exposure_time*1000) + + # read a frame from camera + image = self.camera.read_frame() + if image is None: + print('self.camera.read_frame() returned None') + + # tunr off the illumination if using software trigger + if self.liveController.trigger_mode == TriggerMode.SOFTWARE: + self.liveController.turn_off_illumination() + + return image + + def home_xyz(self): + if HOMING_ENABLED_Z: + self.navigationController.home_z() + self.waitForMicrocontroller(10, 'z homing timeout') + if HOMING_ENABLED_X and HOMING_ENABLED_Y: + self.navigationController.move_x(20) + self.waitForMicrocontroller() + self.navigationController.home_y() + self.waitForMicrocontroller(10, 'y homing timeout') + self.navigationController.zero_y() + self.navigationController.home_x() + self.waitForMicrocontroller(10, 'x homing timeout') + self.navigationController.zero_x() + self.slidePositionController.homing_done = True + + def move_x(self,distance,blocking=True): + self.navigationController.move_x(distance) + if blocking: + self.waitForMicrocontroller() + + def move_y(self,distance,blocking=True): + self.navigationController.move_y(distance) + if blocking: + self.waitForMicrocontroller() + + def move_x_to(self,position,blocking=True): + self.navigationController.move_x_to(position) + if blocking: + self.waitForMicrocontroller() + + def move_y_to(self,position,blocking=True): + self.navigationController.move_y_to(position) + if blocking: + self.waitForMicrocontroller() + + def get_x(self): + return self.navigationController.x_pos_mm + + def get_y(self): + return self.navigationController.y_pos_mm + + def get_z(self): + return self.navigationController.z_pos_mm + + def move_z_to(self,z_mm,blocking=True): + clear_backlash = True if (z_mm < self.navigationController.z_pos_mm and self.navigationController.get_pid_control_flag(2)==False) else False + # clear backlash if moving backward in open loop mode + self.navigationController.move_z_to(z_mm) + if blocking: + self.waitForMicrocontroller() + if clear_backlash: + _usteps_to_clear_backlash = 160 + self.navigationController.move_z_usteps(-_usteps_to_clear_backlash) + self.waitForMicrocontroller() + self.navigationController.move_z_usteps(_usteps_to_clear_backlash) + self.waitForMicrocontroller() + + def start_live(self): + self.camera.start_streaming() + self.liveController.start_live() + + def stop_live(self): + self.liveController.stop_live() + self.camera.stop_streaming() + + def waitForMicrocontroller(self, timeout=5.0, error_message=None): + try: + self.microcontroller.wait_till_operation_is_completed(timeout) + except TimeoutError as e: + self.log.error(error_message or "Microcontroller operation timed out!") + raise e + + def close(self): + self.stop_live() + self.camera.close() + self.microcontroller.close() + if USE_ZABER_EMISSION_FILTER_WHEEL or USE_OPTOSPIN_EMISSION_FILTER_WHEEL: + self.emission_filter_wheel.close() \ No newline at end of file diff --git a/software/control/multipoint_custom_script_entry.py b/software/control/multipoint_custom_script_entry.py index 91561d6ac..41c61be9a 100644 --- a/software/control/multipoint_custom_script_entry.py +++ b/software/control/multipoint_custom_script_entry.py @@ -195,11 +195,16 @@ def multipoint_custom_script_entry(multiPointWorker,time_point,current_path,coor multiPointWorker.wait_till_operation_is_completed() multiPointWorker.navigationController.move_y_usteps(-multiPointWorker.dy_usteps) multiPointWorker.wait_till_operation_is_completed() - _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) - multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.dz_usteps-_usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() - multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() + if multiPointWorker.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) + multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.dz_usteps-_usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + else: + multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.dz_usteps) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.coordinates_pd.to_csv(os.path.join(current_path,'coordinates.csv'),index=False,header=True) multiPointWorker.navigationController.enable_joystick_button_action = True return @@ -215,17 +220,28 @@ def multipoint_custom_script_entry(multiPointWorker,time_point,current_path,coor if multiPointWorker.NZ > 1: # move z back if Z_STACKING_CONFIG == 'FROM CENTER': - _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) - multiPointWorker.navigationController.move_z_usteps( -multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) + multiPointWorker.deltaZ_usteps*round((multiPointWorker.NZ-1)/2) - _usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() - multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() + if multiPointWorker.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) + multiPointWorker.navigationController.move_z_usteps( -multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) + multiPointWorker.deltaZ_usteps*round((multiPointWorker.NZ-1)/2) - _usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + else: + multiPointWorker.navigationController.move_z_usteps( -multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) + multiPointWorker.deltaZ_usteps*round((multiPointWorker.NZ-1)/2) ) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.dz_usteps = multiPointWorker.dz_usteps - multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) + multiPointWorker.deltaZ_usteps*round((multiPointWorker.NZ-1)/2) else: - multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) - _usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() - multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) - multiPointWorker.wait_till_operation_is_completed() + if multiPointWorker.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(160,20*multiPointWorker.navigationController.z_microstepping) + multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) - _usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.navigationController.move_z_usteps(_usteps_to_clear_backlash) + multiPointWorker.wait_till_operation_is_completed() + else: + multiPointWorker.navigationController.move_z_usteps(-multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1)) + multiPointWorker.wait_till_operation_is_completed() + multiPointWorker.dz_usteps = multiPointWorker.dz_usteps - multiPointWorker.deltaZ_usteps*(multiPointWorker.NZ-1) # update FOV counter diff --git a/software/control/multipoint_custom_script_entry_v2.py b/software/control/multipoint_custom_script_entry_v2.py new file mode 100644 index 000000000..32a7b536b --- /dev/null +++ b/software/control/multipoint_custom_script_entry_v2.py @@ -0,0 +1,355 @@ +import os +import time +import cv2 +import numpy as np +import pandas as pd +import imageio as iio +from control._def import * +import control.utils as utils + +BACKLASH_USTEPS = 160 + +def multipoint_custom_script_entry(worker, current_path, region_id, fov, i, j): + print(f'In custom script; t={worker.time_point}, region={region_id}, fov={fov}: {i}_{j}') + + perform_autofocus(worker, region_id) + prepare_z_stack(worker) + acquire_z_stack(worker, current_path, region_id, fov, i, j) + +def perform_autofocus(worker, region_id): + if worker.do_reflection_af: + perform_laser_autofocus(worker) + else: + perform_contrast_autofocus(worker, region_id) + +def perform_laser_autofocus(worker): + if not worker.microscope.laserAutofocusController.is_initialized: + initialize_laser_autofocus(worker) + else: + worker.microscope.laserAutofocusController.move_to_target(0) + if worker.navigationController.get_pid_control_flag(2) is False: + worker.microscope.laserAutofocusController.move_to_target(0) + +def initialize_laser_autofocus(worker): + print("Initializing reflection AF") + worker.microscope.laserAutofocusController.initialize_auto() + if worker.do_autofocus and ((worker.NZ == 1) or worker.z_stacking_config == 'FROM CENTER'): + perform_contrast_autofocus(worker, 0) + worker.microscope.laserAutofocusController.set_reference() + +def perform_contrast_autofocus(worker, region_id): + if ((worker.NZ == 1 or worker.z_stacking_config == 'FROM CENTER') + and worker.do_autofocus + and (worker.af_fov_count % Acquisition.NUMBER_OF_FOVS_PER_AF == 0)): + config_AF = get_autofocus_config(worker) + worker.signal_current_configuration.emit(config_AF) + worker.autofocusController.autofocus() + worker.autofocusController.wait_till_autofocus_has_completed() + if len(worker.scan_coordinates_mm[region_id]) == 3: + worker.scan_coordinates_mm[region_id][2] = worker.navigationController.z_pos_mm + update_widget_z_level(worker, region_id) + +def update_widget_z_level(worker, region_id): + if worker.coordinate_dict is not None: + worker.microscope.multiPointWidgetGrid.update_region_z_level(region_id, worker.navigationController.z_pos_mm) + elif worker.multiPointController.location_list is not None: + try: + worker.microscope.multiPointWidget2._update_z(region_id, worker.navigationController.z_pos_mm) + except: + print("Failed to update flexible widget z") + try: + worker.microscope.multiPointWidgetGrid.update_region_z_level(region_id, worker.navigationController.z_pos_mm) + except: + print("Failed to update grid widget z") + +def get_autofocus_config(worker): + configuration_name_AF = MULTIPOINT_AUTOFOCUS_CHANNEL + return next((config for config in worker.configurationManager.configurations if config.name == configuration_name_AF)) + +def prepare_z_stack(worker): + if worker.NZ > 1: + if worker.z_stacking_config == 'FROM CENTER': + worker.navigationController.move_z_usteps(-worker.deltaZ_usteps * round((worker.NZ - 1) / 2)) + worker.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_Z / 1000) + worker.navigationController.move_z_usteps(-BACKLASH_USTEPS) + worker.wait_till_operation_is_completed() + worker.navigationController.move_z_usteps(BACKLASH_USTEPS) + worker.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_Z / 1000) + +def acquire_z_stack(worker, current_path, region_id, fov, i, j): + total_images = worker.NZ * len(worker.selected_configurations) + for z_level in range(worker.NZ): + acquire_single_z_plane(worker, current_path, region_id, fov, i, j, z_level, total_images) + if z_level < worker.NZ - 1: + move_to_next_z_plane(worker) + + if worker.NZ > 1: + move_z_stack_back(worker) + + worker.af_fov_count += 1 + +def acquire_single_z_plane(worker, current_path, region_id, fov, i, j, z_level, total_images): + if i is not None and j is not None: + file_ID = f"{region_id}_{i}_{j}_{z_level}" + else: + file_ID = f"{region_id}_{fov}_{z_level}" + + current_round_images = {} + for config_idx, config in enumerate(worker.selected_configurations): + acquire_image_for_configuration(worker, config, file_ID, current_path, current_round_images, i, j, z_level) + + # Calculate current image number and emit progress signal + current_image = (fov * total_images) + (z_level * len(worker.selected_configurations)) + config_idx + 1 + worker.signal_region_progress.emit(current_image, worker.total_scans) + + if worker.multiPointController.do_fluorescence_rtp: + run_real_time_processing(worker, current_round_images, i, j, z_level) + + update_coordinates_dataframe(worker, region_id, z_level, fov, i, j) + + # Check for abort after each z-plane + if check_for_abort(worker, current_path, region_id): + return + +def acquire_image_for_configuration(worker, config, file_ID, current_path, current_round_images, i, j, z_level): + worker.handle_z_offset(config, True) # Added this line to perform config z-offset + + if 'USB Spectrometer' not in config.name and 'RGB' not in config.name: + acquire_camera_image(worker, config, file_ID, current_path, current_round_images, i, j, z_level) + elif 'RGB' in config.name: + acquire_rgb_image(worker, config, file_ID, current_path, current_round_images, i, j, z_level) + else: + acquire_spectrometer_data(worker, config, file_ID, current_path, i, j, z_level) + + worker.handle_z_offset(config, False) # Added this line to undo z-offset + +def acquire_camera_image(worker, config, file_ID, current_path, current_round_images, i, j, z_level): + worker.signal_current_configuration.emit(config) + worker.wait_till_operation_is_completed() + + image = capture_image(worker, config) + if image is not None: + process_and_save_image(worker, image, file_ID, config, current_path, current_round_images, i, j, z_level) + +def capture_image(worker, config): + if worker.liveController.trigger_mode == TriggerMode.SOFTWARE: + return capture_image_software_trigger(worker) + elif worker.liveController.trigger_mode == TriggerMode.HARDWARE: + return capture_image_hardware_trigger(worker, config) + else: + return worker.camera.read_frame() + +def capture_image_software_trigger(worker): + worker.liveController.turn_on_illumination() + worker.wait_till_operation_is_completed() + worker.camera.send_trigger() + image = worker.camera.read_frame() + worker.liveController.turn_off_illumination() + return image + +def capture_image_hardware_trigger(worker, config): + if 'Fluorescence' in config.name and ENABLE_NL5 and NL5_USE_DOUT: + worker.camera.image_is_ready = False + worker.microscope.nl5.start_acquisition() + return worker.camera.read_frame(reset_image_ready_flag=False) + else: + worker.microcontroller.send_hardware_trigger(control_illumination=True, illumination_on_time_us=worker.camera.exposure_time * 1000) + return worker.camera.read_frame() + +def process_and_save_image(worker, image, file_ID, config, current_path, current_round_images, i, j, z_level): + image = utils.crop_image(image, worker.crop_width, worker.crop_height) + image = utils.rotate_and_flip_image(image, rotate_image_angle=worker.camera.rotate_image_angle, flip_image=worker.camera.flip_image) + image_to_display = utils.crop_image(image, round(worker.crop_width * worker.display_resolution_scaling), round(worker.crop_height * worker.display_resolution_scaling)) + worker.image_to_display.emit(image_to_display) + worker.image_to_display_multi.emit(image_to_display, config.illumination_source) + + save_image(worker, image, file_ID, config, current_path) + worker.update_napari(image, config.name, i, j, z_level) + + current_round_images[config.name] = np.copy(image) + +def save_image(worker, image, file_ID, config, current_path): + if image.dtype == np.uint16: + saving_path = os.path.join(current_path, f"{file_ID}_{config.name.replace(' ', '_')}.tiff") + else: + saving_path = os.path.join(current_path, f"{file_ID}_{config.name.replace(' ', '_')}.{Acquisition.IMAGE_FORMAT}") + + if worker.camera.is_color and 'BF LED matrix' in config.name: + image = process_color_image(image) + + iio.imwrite(saving_path, image) + +def process_color_image(image): + if MULTIPOINT_BF_SAVING_OPTION == 'RGB2GRAY': + return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + elif MULTIPOINT_BF_SAVING_OPTION == 'Green Channel Only': + return image[:, :, 1] + return image + +def acquire_rgb_image(worker, config, file_ID, current_path, current_round_images, i, j, z_level): + rgb_channels = ['BF LED matrix full_R', 'BF LED matrix full_G', 'BF LED matrix full_B'] + images = {} + + for channel_config in worker.configurationManager.configurations: + if channel_config.name in rgb_channels: + worker.signal_current_configuration.emit(channel_config) + worker.wait_till_operation_is_completed() + image = capture_image(worker, channel_config) + if image is not None: + image = utils.crop_image(image, worker.crop_width, worker.crop_height) + image = utils.rotate_and_flip_image(image, rotate_image_angle=worker.camera.rotate_image_angle, flip_image=worker.camera.flip_image) + images[channel_config.name] = np.copy(image) + + if images: + process_and_save_rgb_image(worker, images, file_ID, config, current_path, current_round_images, i, j, z_level) + +def process_and_save_rgb_image(worker, images, file_ID, config, current_path, current_round_images, i, j, z_level): + if len(images['BF LED matrix full_R'].shape) == 3: + handle_rgb_channels(worker, images, file_ID, current_path, config, i, j, z_level) + else: + construct_rgb_image(worker, images, file_ID, current_path, config, i, j, z_level) + +def handle_rgb_channels(worker, images, file_ID, current_path, config, i, j, z_level): + for channel in ['BF LED matrix full_R', 'BF LED matrix full_G', 'BF LED matrix full_B']: + image_to_display = utils.crop_image(images[channel], round(worker.crop_width * worker.display_resolution_scaling), round(worker.crop_height * worker.display_resolution_scaling)) + worker.image_to_display.emit(image_to_display) + worker.image_to_display_multi.emit(image_to_display, config.illumination_source) + worker.update_napari(images[channel], channel, i, j, z_level) + file_name = f"{file_ID}_{channel.replace(' ', '_')}{'.tiff' if images[channel].dtype == np.uint16 else '.' + Acquisition.IMAGE_FORMAT}" + iio.imwrite(os.path.join(current_path, file_name), images[channel]) + +def construct_rgb_image(worker, images, file_ID, current_path, config, i, j, z_level): + rgb_image = np.zeros((*images['BF LED matrix full_R'].shape, 3), dtype=images['BF LED matrix full_R'].dtype) + rgb_image[:, :, 0] = images['BF LED matrix full_R'] + rgb_image[:, :, 1] = images['BF LED matrix full_G'] + rgb_image[:, :, 2] = images['BF LED matrix full_B'] + + image_to_display = utils.crop_image(rgb_image, round(worker.crop_width * worker.display_resolution_scaling), round(worker.crop_height * worker.display_resolution_scaling)) + worker.image_to_display.emit(image_to_display) + worker.image_to_display_multi.emit(image_to_display, config.illumination_source) + + worker.update_napari(rgb_image, config.name, i, j, z_level) + + file_name = f"{file_ID}_BF_LED_matrix_full_RGB{'.tiff' if rgb_image.dtype == np.uint16 else '.' + Acquisition.IMAGE_FORMAT}" + iio.imwrite(os.path.join(current_path, file_name), rgb_image) + +def acquire_spectrometer_data(worker, config, file_ID, current_path, i, j, z_level): + if worker.usb_spectrometer is not None: + for l in range(N_SPECTRUM_PER_POINT): + data = worker.usb_spectrometer.read_spectrum() + worker.spectrum_to_display.emit(data) + saving_path = os.path.join(current_path, f"{file_ID}_{config.name.replace(' ', '_')}_{l}.csv") + np.savetxt(saving_path, data, delimiter=',') + +def update_coordinates_dataframe(worker, region_id, z_level, fov, i, j): + if i is None or j is None: + worker.update_coordinates_dataframe(region_id, z_level, fov) + else: + worker.update_coordinates_dataframe(region_id, z_level, i=i, j=j) + worker.signal_register_current_fov.emit(worker.navigationController.x_pos_mm, worker.navigationController.y_pos_mm) + +def run_real_time_processing(worker, current_round_images, i, j, z_level): + acquired_image_configs = list(current_round_images.keys()) + if 'BF LED matrix left half' in current_round_images and 'BF LED matrix right half' in current_round_images and 'Fluorescence 405 nm Ex' in current_round_images: + try: + print("real time processing", worker.count_rtp) + if (worker.microscope.model is None) or (worker.microscope.device is None) or (worker.microscope.classification_th is None) or (worker.microscope.dataHandler is None): + raise AttributeError('microscope missing model, device, classification_th, and/or dataHandler') + I_fluorescence = current_round_images['Fluorescence 405 nm Ex'] + I_left = current_round_images['BF LED matrix left half'] + I_right = current_round_images['BF LED matrix right half'] + if len(I_left.shape) == 3: + I_left = cv2.cvtColor(I_left, cv2.COLOR_RGB2GRAY) + if len(I_right.shape) == 3: + I_right = cv2.cvtColor(I_right, cv2.COLOR_RGB2GRAY) + malaria_rtp(I_fluorescence, I_left, I_right, i, j, z_level, worker, + classification_test_mode=worker.microscope.classification_test_mode, + sort_during_multipoint=SORT_DURING_MULTIPOINT, + disp_th_during_multipoint=DISP_TH_DURING_MULTIPOINT) + worker.count_rtp += 1 + except AttributeError as e: + print(repr(e)) + +def move_to_next_z_plane(worker): + if worker.use_piezo: + worker.z_piezo_um += worker.deltaZ * 1000 + dac = int(65535 * (worker.z_piezo_um / OBJECTIVE_PIEZO_RANGE_UM)) + worker.navigationController.microcontroller.analog_write_onboard_DAC(7, dac) + if worker.liveController.trigger_mode == TriggerMode.SOFTWARE: + time.sleep(MULTIPOINT_PIEZO_DELAY_MS / 1000) + if MULTIPOINT_PIEZO_UPDATE_DISPLAY: + worker.signal_z_piezo_um.emit(worker.z_piezo_um) + else: + worker.navigationController.move_z_usteps(worker.deltaZ_usteps) + worker.wait_till_operation_is_completed() + time.sleep(SCAN_STABILIZATION_TIME_MS_Z / 1000) + worker.dz_usteps = worker.dz_usteps + worker.deltaZ_usteps + +def move_z_stack_back(worker): + if worker.use_piezo: + worker.z_piezo_um = OBJECTIVE_PIEZO_HOME_UM + dac = int(65535 * (worker.z_piezo_um / OBJECTIVE_PIEZO_RANGE_UM)) + worker.navigationController.microcontroller.analog_write_onboard_DAC(7, dac) + if worker.liveController.trigger_mode == TriggerMode.SOFTWARE: + time.sleep(MULTIPOINT_PIEZO_DELAY_MS / 1000) + if MULTIPOINT_PIEZO_UPDATE_DISPLAY: + worker.signal_z_piezo_um.emit(worker.z_piezo_um) + else: + if worker.z_stacking_config == 'FROM CENTER': + move_z_stack_back_from_center(worker) + else: + move_z_stack_back_from_top(worker) + +def move_z_stack_back_from_center(worker): + _usteps_to_clear_backlash = max(BACKLASH_USTEPS, 20 * worker.navigationController.z_microstepping) + if worker.navigationController.get_pid_control_flag(2) is False: + worker.navigationController.move_z_usteps(-worker.deltaZ_usteps * (worker.NZ - 1) + worker.deltaZ_usteps * round((worker.NZ - 1) / 2) - _usteps_to_clear_backlash) + worker.wait_till_operation_is_completed() + worker.navigationController.move_z_usteps(_usteps_to_clear_backlash) + worker.wait_till_operation_is_completed() + else: + worker.navigationController.move_z_usteps(-worker.deltaZ_usteps * (worker.NZ - 1) + worker.deltaZ_usteps * round((worker.NZ - 1) / 2)) + worker.wait_till_operation_is_completed() + worker.dz_usteps = worker.dz_usteps - worker.deltaZ_usteps * (worker.NZ - 1) + worker.deltaZ_usteps * round((worker.NZ - 1) / 2) + +def move_z_stack_back_from_top(worker): + _usteps_to_clear_backlash = max(BACKLASH_USTEPS, 20 * worker.navigationController.z_microstepping) + if worker.navigationController.get_pid_control_flag(2) is False: + worker.navigationController.move_z_usteps(-worker.deltaZ_usteps * (worker.NZ - 1) - _usteps_to_clear_backlash) + worker.wait_till_operation_is_completed() + worker.navigationController.move_z_usteps(_usteps_to_clear_backlash) + worker.wait_till_operation_is_completed() + else: + worker.navigationController.move_z_usteps(-worker.deltaZ_usteps * (worker.NZ - 1)) + worker.wait_till_operation_is_completed() + worker.dz_usteps = worker.dz_usteps - worker.deltaZ_usteps * (worker.NZ - 1) + +def check_for_abort(worker, current_path, region_id): + if worker.multiPointController.abort_acqusition_requested: + worker.handle_acquisition_abort(current_path, region_id) + return True + return False + +def move_stage_back(worker): + worker.navigationController.move_x_usteps(-worker.dx_usteps) + worker.wait_till_operation_is_completed() + worker.navigationController.move_y_usteps(-worker.dy_usteps) + worker.wait_till_operation_is_completed() + move_z_back(worker) + +def move_z_back(worker): + if worker.navigationController.get_pid_control_flag(2) is False: + _usteps_to_clear_backlash = max(BACKLASH_USTEPS, 20 * worker.navigationController.z_microstepping) + worker.navigationController.move_z_usteps(-worker.dz_usteps - _usteps_to_clear_backlash) + worker.wait_till_operation_is_completed() + worker.navigationController.move_z_usteps(_usteps_to_clear_backlash) + worker.wait_till_operation_is_completed() + else: + worker.navigationController.move_z_usteps(-worker.dz_usteps) + worker.wait_till_operation_is_completed() + +# This function is called by the MultiPointWorker's run_single_time_point method +def run_custom_multipoint(worker, current_path, region_id, fov, i, j): + multipoint_custom_script_entry(worker, current_path, region_id, fov, i, j) \ No newline at end of file diff --git a/software/control/navigation_prior.py b/software/control/navigation_prior.py new file mode 100644 index 000000000..faf1f6e0a --- /dev/null +++ b/software/control/navigation_prior.py @@ -0,0 +1,300 @@ +# set QT_API environment variable +import os +import sys +import time +os.environ["QT_API"] = "pyqt5" +import qtpy + +# qt libraries +from qtpy.QtCore import * +from qtpy.QtWidgets import * +from qtpy.QtGui import * + +import time +import numpy as np + +from control._def import * + +class NavigationController_PriorStage(QObject): + + xPos = Signal(float) + yPos = Signal(float) + zPos = Signal(float) + thetaPos = Signal(float) + xyPos = Signal(float,float) + signal_joystick_button_pressed = Signal() + + def __init__(self,priorstage, microcontroller, objectivestore, parent=None): + # parent should be set to OctopiGUI instance to enable updates + # to camera settings, e.g. binning, that would affect click-to-move + QObject.__init__(self) + self.stage = priorstage + self.microcontroller = microcontroller + self.parent = parent + self.objectiveStore = objectivestore + self.x_pos_mm = 0 + self.y_pos_mm = 0 + + # We are not using z and theta + self.z_pos_mm = 0 + self.z_pos = 0 + self.mm_per_ustep_Z = 0.1 + self.theta_pos_rad = 0 + self.click_to_move = False + self.z_microstepping = 1 + self.theta_microstepping = 0.1 + + # Joystick methods to be implemented + self.enable_joystick_button_action = True + + # to be moved to gui for transparency + self.stage.set_callback(self.update_pos) + + # scan start position + self.scan_begin_position_x = 0 + self.scan_begin_position_y = 0 + + def get_mm_per_ustep_X(self): + return self.stage.steps_to_mm(1) + + def get_mm_per_ustep_Y(self): + return self.stage.steps_to_mm(1) + + def get_mm_per_ustep_Z(self): + return 0.1 + + def set_flag_click_to_move(self, flag): + self.click_to_move = flag + + def get_flag_click_to_move(self): + return self.click_to_move + + def scan_preview_move_from_click(self, click_x, click_y, image_width, image_height, Nx=1, Ny=1, dx_mm=0.9, dy_mm=0.9): + """ + napariTiledDisplay uses the Nx, Ny, dx_mm, dy_mm fields to move to the correct fov first + imageArrayDisplayWindow assumes only a single fov (default values do not impact calculation but this is less correct) + """ + # check if click to move enabled + if not self.click_to_move: + print("allow click to move") + return + # restore to raw coordicate + click_x = image_width / 2.0 + click_x + click_y = image_height / 2.0 - click_y + print("click - (x, y):", (click_x, click_y)) + fov_col = click_x * Nx // image_width + fov_row = click_y * Ny // image_height + print("image - (col, row):", (fov_col, fov_row)) + end_position_x = Ny % 2 # right side or left side + fov_col = Nx - (fov_col + 1) if end_position_x else fov_col + fov_row = fov_row + print("fov - (col, row):", fov_col, fov_row) + + pixel_sign_x = (-1)**end_position_x # inverted + pixel_sign_y = -1 if INVERTED_OBJECTIVE else 1 + print("pixel_sign_x, pixel_sign_y", pixel_sign_x, pixel_sign_y) + + # move to selected fov + self.move_to(self.scan_begin_position_x+dx_mm*fov_col*pixel_sign_x, + self.scan_begin_position_y+dy_mm*fov_row*pixel_sign_y) + + # move to actual click, offset from center fov + tile_width = (image_width / Nx) * PRVIEW_DOWNSAMPLE_FACTOR + tile_height = (image_height / Ny) * PRVIEW_DOWNSAMPLE_FACTOR + offset_x = (click_x * PRVIEW_DOWNSAMPLE_FACTOR) % tile_width + offset_y = (click_y * PRVIEW_DOWNSAMPLE_FACTOR) % tile_height + offset_x_centered = int(offset_x - tile_width / 2) + offset_y_centered = int(tile_height / 2 - offset_y) + self.move_from_click(offset_x_centered, offset_y_centered, tile_width, tile_height) + + def move_from_click(self, click_x, click_y, image_width, image_height): + if self.click_to_move: + pixel_size_um = self.objectiveStore.pixel_size_um + #pixel_binning_x, pixel_binning_y = self.get_pixel_binning() + #pixel_size_x = pixel_size_um * pixel_binning_x + #pixel_size_y = pixel_size_um * pixel_binning_y + + pixel_sign_x = 1 + pixel_sign_y = 1 if INVERTED_OBJECTIVE else -1 + + #delta_x = pixel_sign_x * pixel_size_x * click_x / 1000.0 + #delta_y = pixel_sign_y * pixel_size_y * click_y / 1000.0 + delta_x = pixel_sign_x * pixel_size_um * click_x / 1000.0 + delta_y = pixel_sign_y * pixel_size_um * click_y / 1000.0 + + if not IS_HCS: + delta_x /= 2 + delta_y /= 2 + + self.move_xy(delta_x, delta_y) + + def move_from_click_mosaic(self, x_mm, y_mm): + if self.click_to_move: + self.move_to(x_mm, y_mm) + + def get_pixel_binning(self): + try: + highest_res = max(self.parent.camera.res_list, key=lambda res: res[0] * res[1]) + resolution = self.parent.camera.resolution + pixel_binning_x = max(1, highest_res[0] / resolution[0]) + pixel_binning_y = max(1, highest_res[1] / resolution[1]) + except AttributeError: + pixel_binning_x = 1 + pixel_binning_y = 1 + return pixel_binning_x, pixel_binning_y + + def move_to_cached_position(self): + if not os.path.isfile("cache/last_coords.txt"): + return + with open("cache/last_coords.txt","r") as f: + for line in f: + try: + x,y,z = line.strip("\n").strip().split(",") + x = float(x) + y = float(y) + z = float(z) + self.move_to(x,y) + self.move_z_to(z) + break + except: + pass + break + + def cache_current_position(self): + with open("cache/last_coords.txt","w") as f: + f.write(",".join([str(self.x_pos_mm),str(self.y_pos_mm),str(self.z_pos_mm)])) + + def move_x(self,delta): + self.stage.move_relative_mm(delta, 0) + + def move_y(self,delta): + self.stage.move_relative_mm(0, delta) + + def move_z(self,delta): + pass + + def move_xy(self, x_mm, y_mm): + self.stage.move_relative_mm(x_mm, y_mm) + + def move_x_to(self,delta): + self.stage.move_absolute_x_mm(delta) + + def move_y_to(self,delta): + self.stage.move_absolute_y_mm(delta) + + def move_z_to(self,delta): + pass + + def move_to(self,x_mm,y_mm): + self.stage.move_absolute_mm(x_mm, y_mm) + + def move_x_usteps(self,usteps): + self.stage.move_relative(usteps, 0) + + def move_y_usteps(self,usteps): + self.stage.move_relative(0, usteps) + + def move_z_usteps(self,usteps): + pass + + def move_x_to_usteps(self,usteps): + self.stage.move_absolute_x(usteps) + + def move_y_to_usteps(self,usteps): + self.stage.move_absolute_y(usteps) + + def move_z_to_usteps(self,usteps): + pass + + def update_pos(self,stage): + # get position from the stage + x_pos, y_pos, z_pos, theta_pos = stage.get_pos() + + self.x_pos_mm = stage.steps_to_mm(x_pos) + self.y_pos_mm = stage.steps_to_mm(y_pos) + + # emit the updated position + self.xPos.emit(self.x_pos_mm) + self.yPos.emit(self.y_pos_mm) + self.zPos.emit(self.z_pos_mm*1000) # not in use + self.thetaPos.emit(1) # not in use + self.xyPos.emit(self.x_pos_mm,self.y_pos_mm) + + # Joystick button to be implemented + if stage.signal_joystick_button_pressed_event: + if self.enable_joystick_button_action: + self.signal_joystick_button_pressed.emit() + print('joystick button pressed') + stage.signal_joystick_button_pressed_event = False + + def home_x(self): + self.stage.home_x() + + def home_y(self): + self.stage.home_y() + + def home_z(self): + pass + + def home_theta(self): + pass + + def home_xy(self): + self.stage.home_xy() + + def zero_x(self): + self.stage.zero_x() + + def zero_y(self): + self.stage.zero_y() + + def zero_z(self): + pass + + def zero_theta(self): + pass + + def home(self): + pass + + def set_x_limit_pos_mm(self,value_mm): + pass + + def set_x_limit_neg_mm(self,value_mm): + pass + + def set_y_limit_pos_mm(self,value_mm): + pass + + def set_y_limit_neg_mm(self,value_mm): + pass + + def set_z_limit_pos_mm(self,value_mm): + pass + + def set_z_limit_neg_mm(self,value_mm): + pass + + def configure_encoder(self, axis, transitions_per_revolution,flip_direction): + pass + + def set_pid_control_enable(self, axis, enable_flag): + pass + + def turnoff_axis_pid_control(self): + pass + + def get_pid_control_flag(self, axis): + return False + + def keep_scan_begin_position(self, x, y): + self.scan_begin_position_x = x + self.scan_begin_position_y = y + + def set_axis_PID_arguments(self, axis, pid_p, pid_i, pid_d): + pass + + def set_piezo_um(self, z_piezo_um): + dac = int(65535 * (z_piezo_um / OBJECTIVE_PIEZO_RANGE_UM)) + dac = 65535 - dac if OBJECTIVE_PIEZO_FLIP_DIR else dac + self.microcontroller.analog_write_onboard_DAC(7, dac) \ No newline at end of file diff --git a/software/control/serial_peripherals.py b/software/control/serial_peripherals.py index ae2f2b443..9d578a7a3 100644 --- a/software/control/serial_peripherals.py +++ b/software/control/serial_peripherals.py @@ -1,6 +1,11 @@ import serial from serial.tools import list_ports import time +from typing import Tuple, Optional +import struct + +import squid.logging +log = squid.logging.get_logger(__name__) class SerialDevice: """ @@ -8,7 +13,7 @@ class SerialDevice: automating device finding based on VID/PID or serial number. """ - def __init__(self, port=None, VID=None,PID=None,SN=None, baudrate=9600, read_timeout=1, **kwargs): + def __init__(self, port=None, VID=None,PID=None,SN=None, baudrate=9600, read_timeout=0.1, **kwargs): # Initialize the serial connection self.port = port self.VID = VID @@ -72,38 +77,69 @@ def open_ser(self, SN=None, VID=None, PID=None, baudrate =None, read_timeout=Non if self.port is not None: self.serial = serial.Serial(self.port,**kwargs) - - def write_and_check(self, command, expected_response, max_attempts=3, attempt_delay=1, check_prefix=True): + def write_and_check(self, command, expected_response, read_delay=0.1, max_attempts=5, attempt_delay=1, check_prefix=True, print_response=False): # Write a command and check the response for attempt in range(max_attempts): self.serial.write(command.encode()) - time.sleep(0.1) # Wait for the command to be sent + time.sleep(read_delay) # Wait for the command to be sent/executed response = self.serial.readline().decode().strip() + if print_response: + log.info(response) + + # flush the input buffer + while self.serial.in_waiting: + if print_response: + log.info(self.serial.readline().decode().strip()) + else: + self.serial.readline().decode().strip() + + # check response if response == expected_response: return response - + else: + log.warning(response) + + # check prefix if the full response does not match if check_prefix: if response.startswith(expected_response): return response - else: time.sleep(attempt_delay) # Wait before retrying raise RuntimeError("Max attempts reached without receiving expected response.") + def write_and_read(self, command, read_delay=0.1, max_attempts=3, attempt_delay=1): + self.serial.write(command.encode()) + time.sleep(read_delay) # Wait for the command to be sent + response = self.serial.readline().decode().strip() + return response + + def write(self, command): + self.serial.write(command.encode()) + def close(self): # Close the serial connection self.serial.close() class XLight_Simulation: def __init__(self): + self.has_spinning_disk_motor = True + self.has_spinning_disk_slider = True + self.has_dichroic_filters_wheel = True + self.has_emission_filters_wheel = True + self.has_excitation_filters_wheel = True + self.has_illumination_iris_diaphragm = True + self.has_emission_iris_diaphragm = True + self.has_dichroic_filter_slider = True + self.has_ttl_control = True + self.emission_wheel_pos = 1 self.dichroic_wheel_pos = 1 self.disk_motor_state = False self.spinning_disk_pos = 0 - def set_emission_filter(self,position, extraction=False): + def set_emission_filter(self,position, extraction=False, validate=False): self.emission_wheel_pos = position return position @@ -117,7 +153,6 @@ def set_dichroic(self, position, extraction=False): def get_dichroic(self): return self.dichroic_wheel_pos - def set_disk_position(self, position): self.spinning_disk_pos = position return position @@ -132,6 +167,22 @@ def set_disk_motor_state(self, state): def get_disk_motor_state(self): return self.disk_motor_state + def set_illumination_iris(self,value): + # value: 0 - 100 + self.illumination_iris = value + return self.illumination_iris + + def set_emission_iris(self,value): + # value: 0 - 100 + self.emission_iris = value + return self.emission_iris + + def set_filter_slider(self,position): + if str(position) not in ["0","1","2","3"]: + raise ValueError("Invalid slider position!") + self.slider_position = position + return self.slider_position + # CrestOptics X-Light Port specs: # 9600 baud # 8 data bits @@ -140,20 +191,70 @@ def get_disk_motor_state(self): # no flow control class XLight: + """Wrapper for communicating with CrestOptics X-Light devices over serial""" - def __init__(self, SN="A106QADU"): + def __init__(self, SN, sleep_time_for_wheel = 0.25, disable_emission_filter_wheel=True): """ Provide serial number (default is that of the device cephla already has) for device-finding purposes. Otherwise, all XLight devices should use the same serial protocol """ - self.serial_connection = SerialDevice(SN=SN,baudrate=9600, + self.log = squid.logging.get_logger(self.__class__.__name__) + + self.has_spinning_disk_motor = False + self.has_spinning_disk_slider = False + self.has_dichroic_filters_wheel = False + self.has_emission_filters_wheel = False + self.has_excitation_filters_wheel = False + self.has_illumination_iris_diaphragm = False + self.has_emission_iris_diaphragm = False + self.has_dichroic_filter_slider = False + self.has_ttl_control = False + self.sleep_time_for_wheel = sleep_time_for_wheel + + self.disable_emission_filter_wheel = disable_emission_filter_wheel + + self.serial_connection = SerialDevice(SN=SN,baudrate=115200, bytesize=serial.EIGHTBITS,stopbits=serial.STOPBITS_ONE, - parity=serial.PARITY_NONE, + parity=serial.PARITY_NONE, xonxoff=False,rtscts=False,dsrdtr=False) self.serial_connection.open_ser() - - def set_emission_filter(self,position,extraction=False): + + self.parse_idc_response(self.serial_connection.write_and_read("idc\r")) + self.print_config() + + def parse_idc_response(self, response): + # Convert hexadecimal response to integer + config_value = int(response, 16) + + # Check each bit and set the corresponding variable + self.has_spinning_disk_motor = bool(config_value & 0x00000001) + self.has_spinning_disk_slider = bool(config_value & 0x00000002) + self.has_dichroic_filters_wheel = bool(config_value & 0x00000004) + self.has_emission_filters_wheel = bool(config_value & 0x00000008) + self.has_excitation_filters_wheel = bool(config_value & 0x00000080) + self.has_illumination_iris_diaphragm = bool(config_value & 0x00000200) + self.has_emission_iris_diaphragm = bool(config_value & 0x00000400) + self.has_dichroic_filter_slider = bool(config_value & 0x00000800) + self.has_ttl_control = bool(config_value & 0x00001000) + + def print_config(self): + self.log.info(( + "Machine Configuration:\n" + f" Spinning disk motor: {self.has_spinning_disk_motor}\n", + f" Spinning disk slider: {self.has_spinning_disk_slider}\n", + f" Dichroic filters wheel: {self.has_dichroic_filters_wheel}\n", + f" Emission filters wheel: {self.has_emission_filters_wheel}\n", + f" Excitation filters wheel: {self.has_excitation_filters_wheel}\n", + f" Illumination Iris diaphragm: {self.has_illumination_iris_diaphragm}\n", + f" Emission Iris diaphragm: {self.has_emission_iris_diaphragm}\n", + f" Dichroic filter slider: {self.has_dichroic_filter_slider}\n", + f" TTL control and combined commands subsystem: {self.has_ttl_control}")) + + def set_emission_filter(self,position,extraction=False,validate=True): + if self.disable_emission_filter_wheel: + print('emission filter wheel disabled') + return -1 if str(position) not in ["1","2","3","4","5","6","7","8"]: raise ValueError("Invalid emission filter wheel position!") position_to_write = str(position) @@ -161,12 +262,18 @@ def set_emission_filter(self,position,extraction=False): if extraction: position_to_write+="m" - current_pos = self.serial_connection.write_and_check("B"+position_to_write+"\r","B"+position_to_read) - self.emission_wheel_pos = int(current_pos[1]) + if validate: + current_pos = self.serial_connection.write_and_check("B"+position_to_write+"\r","B"+position_to_read,read_delay=0.01) + self.emission_wheel_pos = int(current_pos[1]) + else: + self.serial_connection.write("B"+position_to_write+"\r") + time.sleep(self.sleep_time_for_wheel) + self.emission_wheel_pos = position + return self.emission_wheel_pos def get_emission_filter(self): - current_pos = self.serial_connection.write_and_check("rB\r","rB") + current_pos = self.serial_connection.write_and_check("rB\r","rB",read_delay=0.01) self.emission_wheel_pos = int(current_pos[2]) return self.emission_wheel_pos @@ -178,17 +285,15 @@ def set_dichroic(self, position,extraction=False): if extraction: position_to_write+="m" - current_pos = self.serial_connection.write_and_check("C"+position_to_write+"\r","C"+position_to_read) + current_pos = self.serial_connection.write_and_check("C"+position_to_write+"\r","C"+position_to_read,read_delay=0.01) self.dichroic_wheel_pos = int(current_pos[1]) return self.dichroic_wheel_pos - def get_dichroic(self): - current_pos = self.serial_connection.write_and_check("rC\r","rC") + current_pos = self.serial_connection.write_and_check("rC\r","rC",read_delay=0.01) self.dichroic_wheel_pos = int(current_pos[2]) return self.dichroic_wheel_pos - def set_disk_position(self,position): if str(position) not in ["0","1","2","wide field","confocal"]: raise ValueError("Invalid disk position!") @@ -201,12 +306,35 @@ def set_disk_position(self,position): position_to_write = str(position) position_to_read = str(position) - current_pos = self.serial_connection.write_and_check("D"+position_to_write+"\r","D"+position_to_read) + current_pos = self.serial_connection.write_and_check("D"+position_to_write+"\r","D"+position_to_read,read_delay=5) self.spinning_disk_pos = int(current_pos[1]) return self.spinning_disk_pos + def set_illumination_iris(self,value): + # value: 0 - 100 + self.illumination_iris = value + value = str(int(10*value)) + self.serial_connection.write_and_check("J"+value+"\r","J"+value,read_delay=3) + return self.illumination_iris + + def set_emission_iris(self,value): + # value: 0 - 100 + self.emission_iris = value + value = str(int(10*value)) + self.serial_connection.write_and_check("V"+value+"\r","V"+value,read_delay=3) + return self.emission_iris + + def set_filter_slider(self,position): + if str(position) not in ["0","1","2","3"]: + raise ValueError("Invalid slider position!") + self.slider_position = position + position_to_write = str(position) + position_to_read = str(position) + self.serial_connection.write_and_check("P"+position_to_write+"\r","V"+position_to_read,read_delay=5) + return self.slider_position + def get_disk_position(self): - current_pos = self.serial_connection.write_and_check("rD\r","rD") + current_pos = self.serial_connection.write_and_check("rD\r","rD",read_delay=0.01) self.spinning_disk_pos = int(current_pos[2]) return self.spinning_disk_pos @@ -217,13 +345,13 @@ def set_disk_motor_state(self, state): else: state_to_write = "0" - current_pos = self.serial_connection.write_and_check("N"+state_to_write+"\r","N"+state_to_write) + current_pos = self.serial_connection.write_and_check("N"+state_to_write+"\r","N"+state_to_write,read_delay=2.5) self.disk_motor_state = bool(int(current_pos[1])) def get_disk_motor_state(self): """Return True for on, Off otherwise""" - current_pos = self.serial_connection.write_and_check("rN\r","rN") + current_pos = self.serial_connection.write_and_check("rN\r","rN",read_delay=0.01) self.disk_motor_state = bool(int(current_pos[2])) return self.disk_motor_state @@ -231,26 +359,37 @@ class LDI: """Wrapper for communicating with LDI over serial""" def __init__(self, SN="00000001"): """ - Provide serial number (default is that of the device - cephla already has) for device-finding purposes. Otherwise, all - XLight devices should use the same serial protocol + Provide serial number """ + self.log = squid.logging.get_logger(self.__class__.__name__) self.serial_connection = SerialDevice(SN=SN,baudrate=9600, bytesize=serial.EIGHTBITS,stopbits=serial.STOPBITS_ONE, - parity=serial.PARITY_NONE, + parity=serial.PARITY_NONE, xonxoff=False,rtscts=False,dsrdtr=False) self.serial_connection.open_ser() - + self.intensity_mode = 'PC' + self.shutter_mode = 'PC' + def run(self): self.serial_connection.write_and_check("run!\r","ok") + def set_shutter_mode(self,mode): + if mode in ['EXT','PC']: + self.serial_connection.write_and_check('SH_MODE='+mode+'\r',"ok") + self.intensity_mode = mode + + def set_intensity_mode(self,mode): + if mode in ['EXT','PC']: + self.serial_connection.write_and_check('INT_MODE='+mode+'\r',"ok") + self.intensity_mode = mode + def set_intensity(self,channel,intensity): channel = str(channel) intensity = "{:.2f}".format(intensity) - print('set:'+channel+'='+intensity+'\r') + self.log.debug('set:'+channel+'='+intensity+'\r') self.serial_connection.write_and_check('set:'+channel+'='+intensity+'\r',"ok") - print('active channel: ' + str(self.active_channel)) - + self.log.debug('active channel: ' + str(self.active_channel)) + def set_shutter(self,channel,state): channel = str(channel) state = str(state) @@ -261,10 +400,675 @@ def get_shutter_state(self): def set_active_channel(self,channel): self.active_channel = channel - print('[set active channel to ' + str(channel) + ']') + self.log.debug('[set active channel to ' + str(channel) + ']') + + def set_active_channel_shutter(self,state): + channel = str(self.active_channel) + state = str(state) + self.log.debug('shutter:'+channel+'='+state+'\r') + self.serial_connection.write_and_check('shutter:'+channel+'='+state+'\r',"ok") + + +class LDI_Simulation: + """Wrapper for communicating with LDI over serial""" + def __init__(self, SN="00000001"): + """ + Provide serial number + """ + self.log = squid.logging.get_logger(self.__class__.__name__) + self.intensity_mode = 'PC' + self.shutter_mode = 'PC' + + def run(self): + pass + + def set_shutter_mode(self,mode): + if mode in ['EXT','PC']: + self.intensity_mode = mode + + def set_intensity_mode(self,mode): + if mode in ['EXT','PC']: + self.intensity_mode = mode + + def set_intensity(self,channel,intensity): + channel = str(channel) + intensity = "{:.2f}".format(intensity) + self.log.debug('set:'+channel+'='+intensity+'\r') + self.log.debug('active channel: ' + str(self.active_channel)) + + def set_shutter(self,channel,state): + channel = str(channel) + state = str(state) + + def get_shutter_state(self): + return 0 + + def set_active_channel(self,channel): + self.active_channel = channel + self.log.debug('[set active channel to ' + str(channel) + ']') def set_active_channel_shutter(self,state): channel = str(self.active_channel) state = str(state) - print('shutter:'+channel+'='+state+'\r') - self.serial_connection.write_and_check('shutter:'+channel+'='+state+'\r',"ok") \ No newline at end of file + self.log.debug('shutter:'+channel+'='+state+'\r') + +class SciMicroscopyLEDArray: + """Wrapper for communicating with SciMicroscopy over serial""" + def __init__(self, SN, array_distance = 50, turn_on_delay = 0.03): + """ + Provide serial number + """ + self.serial_connection = SerialDevice(SN=SN,baudrate=115200, + bytesize=serial.EIGHTBITS,stopbits=serial.STOPBITS_ONE, + parity=serial.PARITY_NONE, + xonxoff=False,rtscts=False,dsrdtr=False) + self.serial_connection.open_ser() + self.check_about() + self.set_distance(array_distance) + self.set_brightness(1) + + self.illumination = None + self.NA = 0.5 + self.turn_on_delay = turn_on_delay + + def write(self,command): + self.serial_connection.write_and_check(command+'\r','',read_delay=0.01,print_response=True) + + def check_about(self): + self.serial_connection.write_and_check('about'+'\r','=',read_delay=0.01,print_response=True) + + def set_distance(self,array_distance): + # array distance in mm + array_distance = str(int(array_distance)) + self.serial_connection.write_and_check('sad.'+array_distance+'\r','Current array distance from sample is '+array_distance+'mm',read_delay=0.01,print_response=False) + + def set_NA(self,NA): + self.NA = NA + NA = str(int(NA*100)) + self.serial_connection.write_and_check('na.'+NA+'\r','Current NA is 0.'+NA,read_delay=0.01,print_response=False) + + def set_color(self,color): + # (r,g,b), 0-1 + r = int(255*color[0]) + g = int(255*color[1]) + b = int(255*color[2]) + self.serial_connection.write_and_check(f'sc.{r}.{g}.{b}\r',f'Current color balance values are {r}.{g}.{b}',read_delay=0.01,print_response=False) + + def set_brightness(self, brightness): + # 0 to 100 + brightness = str(int(255*(brightness/100.0))) + self.serial_connection.write_and_check(f'sb.{brightness}\r',f'Current brightness value is {brightness}.',read_delay=0.01,print_response=False) + + def turn_on_bf(self): + self.serial_connection.write_and_check(f'bf\r','-==-',read_delay=0.01,print_response=False) + + def turn_on_dpc(self,quadrant): + self.serial_connection.write_and_check(f'dpc.{quadrant[0]}\r','-==-',read_delay=0.01,print_response=False) + + def turn_on_df(self): + self.serial_connection.write_and_check(f'df\r','-==-',read_delay=0.01,print_response=False) + + def set_illumination(self,illumination): + self.illumination = illumination + + def clear(self): + self.serial_connection.write_and_check('x\r','-==-',read_delay=0.01,print_response=False) + + def turn_on_illumination(self): + if self.illumination is not None: + self.serial_connection.write_and_check(f'{self.illumination}\r','-==-',read_delay=0.01,print_response=False) + time.sleep(self.turn_on_delay) + def turn_off_illumination(self): + self.clear() + +class SciMicroscopyLEDArray_Simulation: + """Wrapper for communicating with SciMicroscopy over serial""" + def __init__(self, SN, array_distance = 50, turn_on_delay = 0.03): + """ + Provide serial number + """ + self.serial_connection.open_ser() + self.check_about() + self.set_distance(array_distance) + self.set_brightness(1) + + self.illumination = None + self.NA = 0.5 + self.turn_on_delay = turn_on_delay + + def write(self,command): + pass + + def check_about(self): + pass + + def set_distance(self,array_distance): + # array distance in mm + array_distance = str(int(array_distance)) + + def set_NA(self,NA): + self.NA = NA + NA = str(int(NA*100)) + + def set_color(self,color): + # (r,g,b), 0-1 + r = int(255*color[0]) + g = int(255*color[1]) + b = int(255*color[2]) + + def set_brightness(self, brightness): + # 0 to 100 + brightness = str(int(255*(brightness/100.0))) + + def turn_on_bf(self): + pass + + def turn_on_dpc(self,quadrant): + pass + + def turn_on_df(self): + pass + + def set_illumination(self,illumination): + pass + + def clear(self): + pass + + def turn_on_illumination(self): + pass + + def turn_off_illumination(self): + pass + +class CellX: + + VALID_MODULATIONS = ['INT','EXT Digital','EXT Analog','EXT Mixed'] + + """Wrapper for communicating with LDI over serial""" + def __init__(self, SN=""): + self.serial_connection = SerialDevice(SN=SN,baudrate=115200, + bytesize=serial.EIGHTBITS,stopbits=serial.STOPBITS_ONE, + parity=serial.PARITY_NONE, + xonxoff=False,rtscts=False,dsrdtr=False) + self.serial_connection.open_ser() + self.power = {} + + def turn_on(self, channel): + self.serial_connection.write_and_check('SOUR'+str(channel)+':AM:STAT ON\r','OK',read_delay=0.01,print_response=False) + + def turn_off(self, channel): + self.serial_connection.write_and_check('SOUR'+str(channel)+':AM:STAT OFF\r','OK',read_delay=0.01,print_response=False) + + def set_laser_power(self, channel, power): + if not (power >= 1 and power <= 100): + raise ValueError(f"Power={power} not in the range 1 to 100") + + if channel not in self.power.keys() or power != self.power[channel]: + self.serial_connection.write_and_check('SOUR'+str(channel)+':POW:LEV:IMM:AMPL '+str(power/1000)+'\r','OK',read_delay=0.01,print_response=False) + self.power[channel] = power + else: + pass # power is the same + + def set_modulation(self, channel, modulation): + if modulation not in CellX.VALID_MODULATIONS: + raise ValueError(f"Modulation '{modulation}' not in valid modulations: {CellX.VALID_MODULATIONS}") + self.serial_connection.write_and_check('SOUR'+str(channel)+':AM:' + modulation +'\r','OK',read_delay=0.01,print_response=False) + + def close(self): + self.serial_connection.close() + +class CellX_Simulation: + """Wrapper for communicating with LDI over serial""" + def __init__(self, SN=""): + self.serial_connection = SerialDevice(SN=SN,baudrate=115200, + bytesize=serial.EIGHTBITS,stopbits=serial.STOPBITS_ONE, + parity=serial.PARITY_NONE, + xonxoff=False,rtscts=False,dsrdtr=False) + self.serial_connection.open_ser() + self.power = {} + + def turn_on(self, channel): + pass + + def turn_off(self, channel): + pass + + def set_laser_power(self, channel, power): + if not (power >= 1 and power <= 100): + raise ValueError(f"Power={power} not in the range 1 to 100") + + if channel not in self.power.keys() or power != self.power[channel]: + self.power[channel] = power + else: + pass # power is the same + + def set_modulation(self, channel, modulation): + if modulation not in CellX.VALID_MODULATIONS: + raise ValueError(f"modulation '{modulation}' not in valid choices: {CellX.VALID_MODULATIONS}") + self.serial_connection.write_and_check('SOUR'+str(channel)+'AM:' + modulation +'\r','OK',read_delay=0.01,print_response=False) + + def close(self): + pass + +class FilterDeviceInfo: + """ + keep filter device information + """ + # default: 7.36 + firmware_version = '' + # default: 250000 + maxspeed = 0 + # default: 900 + accel = 0 + +class FilterController_Simulation: + """ + controller of filter device + """ + def __init__(self, _baudrate, _bytesize, _parity, _stopbits): + self.each_hole_microsteps = 4800 + self.current_position = 0 + self.current_index = 1 + ''' + the variable be used to keep current offset of wheel + it could be used by get the index of wheel position, the index could be '1', '2', '3' ... + ''' + self.offset_position = 0 + + self.deviceinfo = FilterDeviceInfo() + + def __del__(self): + pass + + def do_homing(self): + self.current_position = 0 + self.offset_position = 1100 + + def wait_homing_finish(self): + pass + + def set_emission_filter(self, index): + self.current_index = index + pass + + def get_emission_filter(self): + return 1 + + def start_homing(self): + pass + + def complete_homing_sequence(self): + pass + + def wait_for_homing_complete(self): + pass + +class FilterControllerError(Exception): + """Custom exception for FilterController errors.""" + pass + +class FilterController: + """Controller for filter device.""" + + MICROSTEPS_PER_HOLE = 4800 + OFFSET_POSITION = -8500 + VALID_POSITIONS = set(range(1, 8)) + MAX_RETRIES = 3 + COMMAND_TIMEOUT = 1 # seconds + + def __init__(self, serial_number: str, baudrate: int, bytesize: int, parity: str, stopbits: int): + self.log = squid.logging.get_logger(self.__class__.__name__) + self.current_position = 0 + self.current_index = 1 + self.serial = self._initialize_serial(serial_number, baudrate, bytesize, parity, stopbits) + self._configure_device() + + def _initialize_serial(self, serial_number: str, baudrate: int, bytesize: int, parity: str, stopbits: int) -> serial.Serial: + ports = [p.device for p in list_ports.comports() if serial_number == p.serial_number] + if not ports: + raise ValueError(f"No device found with serial number: {serial_number}") + return serial.Serial(ports[0], baudrate=baudrate, bytesize=bytesize, parity=parity, stopbits=stopbits, timeout=self.COMMAND_TIMEOUT) + + def _configure_device(self): + time.sleep(0.2) + self.firmware_version = self._get_device_info('/get version') + self._send_command_with_reply('/set maxspeed 250000') + self._send_command_with_reply('/set accel 900') + self.maxspeed = self._get_device_info('/get maxspeed') + self.accel = self._get_device_info('/get accel') + + def __del__(self): + if hasattr(self, 'serial') and self.serial.is_open: + self._send_command('/stop') + time.sleep(0.5) + self.serial.close() + + def _send_command(self, cmd: str) -> Tuple[bool, str]: + """ + Send a command to the device and handle the response. + + Args: + cmd (str): The command to send. + + Returns: + Tuple[bool, str]: A tuple containing a success flag and the response message. + + Raises: + FilterControllerError: If the command fails after maximum retries. + """ + if not self.serial.is_open: + raise RuntimeError("Serial port is not open") + + for attempt in range(self.MAX_RETRIES): + try: + self.serial.write(f"{cmd}\n".encode('utf-8')) + response = self.serial.readline().decode('utf-8').strip() + success, message = self._parse_response(response) + + if success: + return True, message + elif message.startswith('BUSY'): + time.sleep(0.1) # Wait a bit if the device is busy + continue + else: + # Log the error and retry + self.log.error(f"Command failed (attempt {attempt + 1}): {message}") + except serial.SerialTimeoutException: + self.log.error(f"Command timed out (attempt {attempt + 1})") + + time.sleep(0.5) # Wait before retrying + + raise FilterControllerError(f"Command '{cmd}' failed after {self.MAX_RETRIES} attempts") + + def _parse_response(self, response: str) -> Tuple[bool, str]: + """ + Parse the response from the device. + + Args: + response (str): The response string from the device. + + Returns: + Tuple[bool, str]: A tuple containing a success flag and the parsed message. + """ + if not response: + return False, "No response received" + + parts = response.split() + if len(parts) < 4: + return False, f"Invalid response format: {response}" + + if parts[0].startswith('@'): + if parts[2] == 'OK': + return True, ' '.join(parts[3:]) + else: + return False, ' '.join(parts[2:]) + elif parts[0].startswith('!'): + return False, f"Alert: {' '.join(parts[1:])}" + elif parts[0].startswith('#'): + return True, f"Info: {' '.join(parts[1:])}" + else: + return False, f"Unknown response format: {response}" + + def _send_command_with_reply(self, cmd: str) -> bool: + success, message = self._send_command(cmd) + return success and (message == 'IDLE' or message.startswith('BUSY')) + + def _get_device_info(self, cmd: str) -> Optional[str]: + success, message = self._send_command(cmd) + return message if success else None + + def get_current_position(self) -> Tuple[bool, int]: + success, message = self._send_command('/get pos') + if success: + try: + return True, int(message.split()[-1]) + except (ValueError, IndexError): + return False, 0 + return False, 0 + + def calculate_filter_index(self) -> int: + return (self.current_position - self.OFFSET_POSITION) // self.MICROSTEPS_PER_HOLE + + def move_to_offset_position(self): + self._move_to_absolute_position(self.OFFSET_POSITION) + + def _move_to_absolute_position(self, target_position: int, timeout: int = 5): + success, _ = self._send_command(f'/move abs {target_position}') + if not success: + raise FilterControllerError("Failed to initiate filter movement") + self._wait_for_position(target_position, target_index=None, timeout=timeout) + + def set_emission_filter(self, index: int, blocking: bool = True, timeout: int = 5): + """ + Set the emission filter to the specified position. + + Args: + position (int): The desired filter position (1-7). + blocking (bool): If True, wait for the movement to complete. If False, return immediately. + timeout (int): Maximum time to wait for the movement to complete (in seconds). + + Raises: + ValueError: If the position is invalid. + FilterControllerError: If the command fails to initiate movement. + TimeoutError: If the movement doesn't complete within the specified timeout (only in blocking mode). + """ + if index not in self.VALID_POSITIONS: + raise ValueError(f"Invalid emission filter wheel index position: {index}") + + target_position = self.OFFSET_POSITION + (index - 1) * self.MICROSTEPS_PER_HOLE + success, _ = self._send_command(f'/move abs {target_position}') + + if not success: + raise FilterControllerError("Failed to initiate filter movement") + + if blocking: + self._wait_for_position(target_position, index, timeout) + else: + # Update the current position without waiting + self.current_position = target_position + self.current_index = index + + def _wait_for_position(self, target_position: int, target_index : int, timeout: int): + """ + Wait for the filter to reach the target position. + + Args: + target_position (int): The expected final position. + timeout (int): Maximum time to wait (in seconds). + + Raises: + TimeoutError: If the movement doesn't complete within the specified timeout. + """ + start_time = time.time() + while time.time() - start_time < timeout: + time.sleep(0.003) + success, position = self.get_current_position() + if success and position == target_position: + self.current_position = target_position + self.current_index = target_index + return + raise TimeoutError(f"Filter move to position {target_position} timed out") + + def get_emission_filter_position(self) -> int: + return self.calculate_filter_index() + 1 + + def start_homing(self): + """ + Start the homing sequence for the filter device. + + This function initiates the homing process but does not wait for it to complete. + Use wait_for_homing_complete() to wait for the homing process to finish. + + Raises: + FilterControllerError: If the homing command fails to initiate. + """ + success, _ = self._send_command('/home') + if not success: + raise FilterControllerError("Failed to initiate homing sequence") + + def wait_for_homing_complete(self, timeout: int = 50) -> bool: + """ + Wait for the homing sequence to complete. + + Args: + timeout (int): Maximum time to wait for homing to complete, in seconds. + + Returns: + bool: True if homing completed successfully, False if it timed out. + + Raises: + FilterControllerError: If there's an error while checking the homing status. + """ + start_time = time.time() + while time.time() - start_time < timeout: + time.sleep(0.5) + success, position = self.get_current_position() + if not success: + raise FilterControllerError("Failed to get current position during homing") + if position == 0: + self.current_position = 0 + self.move_to_offset_position() + return True + return False + + def complete_homing_sequence(self, timeout: int = 50): + """ + Perform a complete homing sequence. + + This method starts the homing sequence and waits for it to complete. + + Args: + timeout (int): Maximum time to wait for homing to complete, in seconds. + + Raises: + FilterControllerError: If homing fails to start or complete. + TimeoutError: If homing doesn't complete within the specified timeout. + """ + self.start_homing() + if not self.wait_for_homing_complete(timeout): + raise TimeoutError("Filter device homing failed") + + +class Optospin: + def __init__(self, SN, baudrate=115200, timeout=1, max_retries=3, retry_delay=0.5): + self.log = squid.logging.get_logger(self.__class__.__name__) + + optospin_port = [p.device for p in serial.tools.list_ports.comports() if SN == p.serial_number] + self.ser = serial.Serial(optospin_port[0], baudrate=baudrate, timeout=timeout) + self.max_retries = max_retries + self.retry_delay = retry_delay + self.current_index = 1 + + def _send_command(self, command, data=None): + if data is None: + data = [] + full_command = struct.pack('>H', command) + bytes(data) + + for attempt in range(self.max_retries): + try: + self.ser.write(full_command) + response = self.ser.read(2) + + if len(response) != 2: + raise serial.SerialTimeoutException("Timeout: No response from device") + + status, length = struct.unpack('>BB', response) + + if status != 0xFF: + raise Exception(f"Command failed with status: {status}") + + if length > 0: + additional_data = self.ser.read(length) + if len(additional_data) != length: + raise serial.SerialTimeoutException("Timeout: Incomplete additional data") + return additional_data + return None + + except (serial.SerialTimeoutException, Exception) as e: + self.log.error(f"Attempt {attempt + 1} failed: {str(e)}") + if attempt < self.max_retries - 1: + self.log.error(f"Retrying in {self.retry_delay} seconds...") + time.sleep(self.retry_delay) + else: + raise Exception(f"Command failed after {self.max_retries} attempts: {str(e)}") + + + def get_version(self): + result = self._send_command(0x0040) + return struct.unpack('>BB', result) + + def set_speed(self, speed): + speed_int = int(speed * 100) + self._send_command(0x0048, struct.pack('> 4) & 0x07 + rotor3 = result[1] & 0x07 + rotor4 = (result[1] >> 4) & 0x07 + return rotor1, rotor2, rotor3, rotor4 + + def measure_temperatures(self): + self._send_command(0x00A8) + + def read_temperatures(self): + result = self._send_command(0x00AC) + return struct.unpack('>BBBB', result) + + def close(self): + self.ser.close() + +class Optospin_Simulation: + def __init__(self, SN, baudrate=115200, timeout=1, max_retries=3, retry_delay=0.5): + self.current_index = 1 + pass + + def _send_command(self, command, data=None): + pass + + def get_version(self): + pass + + def set_speed(self, speed): + pass + + def spin_rotors(self): + pass + + def stop_rotors(self): + pass + + def _usb_go(self, rotor1_pos, rotor2_pos=0, rotor3_pos=0, rotor4_pos=0): + pass + + def set_emission_filter(self, index): + self.current_index = index + pass + + def get_rotor_positions(self): + return 0,0,0,0 + + def measure_temperatures(self): + pass + + def read_temperatures(self): + pass + + def close(self): + pass diff --git a/software/control/stage_prior.py b/software/control/stage_prior.py new file mode 100644 index 000000000..0a73d86eb --- /dev/null +++ b/software/control/stage_prior.py @@ -0,0 +1,272 @@ +import serial +import time +import re +import threading + +class PriorStage(): + def __init__(self, sn, baudrate=115200, timeout=0.1, parent=None): + port = [p.device for p in serial.tools.list_ports.comports() if sn == p.serial_number] + self.serial = serial.Serial(port[0], baudrate=baudrate, timeout=timeout) + self.current_baudrate = baudrate + + # Position information + self.x_pos = 0 + self.y_pos = 0 + self.z_pos = 0 # Always 0 for Prior stage + self.theta_pos = 0 # Always 0 for Prior stage + + # Button and switch state + self.button_and_switch_state = 0 + self.joystick_button_pressed = 0 + self.signal_joystick_button_pressed_event = False + self.switch_state = 0 + self.joystick_enabled = False + + # Prior-specific properties + self.stage_microsteps_per_mm = 100000 # Stage property + self.user_unit = None + self.stage_model = None + self.stage_limits = None + self.resolution = 0.1 + self.x_direction = 1 # 1 or -1 + self.y_direction = 1 # 1 or -1 + self.speed = 200 # Default value + self.acceleration = 500 # Default value + + # Position updating callback + self.pos_callback_external = None + self.serial_lock = threading.Lock() + self.position_updating_event = threading.Event() + self.position_updating_thread = threading.Thread(target=self.return_position_info, daemon=True) + + self.set_baudrate(baudrate) + + self.initialize() + self.position_updating_thread.start() + + def set_baudrate(self, baud): + allowed_baudrates = {9600: '96', 19200: '19', 38400: '38', 115200: '115'} + if baud not in allowed_baudrates: + print('Baudrate not allowed. Setting baudrate to 9600') + baud_command = "BAUD 96" + else: + baud_command = "BAUD " + allowed_baudrates[baud] + print(baud_command) + + for bd in allowed_baudrates: + self.serial.baudrate = bd + self.serial.write(b'\r') + time.sleep(0.1) + self.serial.flushInput() + + self.send_command(baud_command) + + self.serial.baudrate = baud + + try: + test_response = self.send_command("$") # Send a simple query command + if not test_response: + raise Exception("No response received after changing baud rate") + else: + self.current_baudrate = baud + print(f"Baud rate successfully changed to {baud}") + return + except Exception as e: + # If verification fails, try to revert to the original baud rate + self.serial.baudrate = self.current_baudrate + print(f"Serial baudrate: {bd}") + print(f"Failed to verify communication at new baud rate: {e}") + + raise Exception("Failed to set baudrate.") + + def initialize(self): + self.send_command("COMP 0") # Set to standard mode + self.send_command("BLSH 1") # Enable backlash correction + self.send_command("RES,S," + str(self.resolution)) # Set resolution + response = self.send_command("H 0") # Joystick enabled + self.joystick_enabled = True + self.user_unit = self.stage_microsteps_per_mm * self.resolution + self.get_stage_info() + self.set_acceleration(self.acceleration) + self.set_max_speed(self.speed) + + def send_command(self, command): + with self.serial_lock: + self.serial.write(f"{command}\r".encode()) + response = self.serial.readline().decode().strip() + if response.startswith('E'): + raise Exception(f"Error from controller: {response}") + return response + + def get_stage_info(self): + stage_info = self.send_command("STAGE") + self.stage_model = re.search(r'STAGE\s*=\s*(\S+)', stage_info).group(1) + print("Stage model: ", self.stage_model) + + def mm_to_steps(self, mm): + return int(mm * self.user_unit) + + def steps_to_mm(self, steps): + return steps / self.user_unit + + def set_max_speed(self, speed=1000): + """Set the maximum speed of the stage. Range is 1 to 1000.""" + if 1 <= speed <= 1000: + response = self.send_command(f"SMS {speed}") + print(f"Maximum speed set to {speed}. Response: {response}") + else: + raise ValueError("Speed must be between 1 and 1000") + + def get_max_speed(self): + """Get the current maximum speed setting.""" + response = self.send_command("SMS") + print(f"Current maximum speed: {response}") + return int(response) + + def set_acceleration(self, acceleration=1000): + """Set the acceleration of the stage. Range is 1 to 1000.""" + if 1 <= acceleration <= 1000: + response = self.send_command(f"SAS {acceleration}") + self.acceleration = acceleration + print(f"Acceleration set to {acceleration}. Response: {response}") + else: + raise ValueError("Acceleration must be between 1 and 1000") + + def get_acceleration(self): + """Get the current acceleration setting.""" + response = self.send_command("SAS") + print(f"Current acceleration: {response}") + return int(response) + + def set_callback(self, function): + self.pos_callback_external = function + + def return_position_info(self): + while not self.position_updating_event.is_set(): + if self.pos_callback_external is not None: + self.pos_callback_external(self) + + def home_xy(self): + """Home the XY stage.""" + self.send_command("M") # 'M' command moves stage to (0,0,0) + self.wait_for_stop() + self.x_pos = 0 + self.y_pos = 0 + print('finished homing') + + def home_x(self): + self.move_relative(-self.x_pos, 0) + self.x_pos = 0 + + def home_y(self): + self.move_relative(0, -self.y_pos) + self.y_pos = 0 + + def zero_xy(self): + self.send_command("Z") + self.x_pos = 0 + self.y_pos = 0 + + def zero_x(self): + self.set_pos(0, self.y_pos) + + def zero_y(self): + self.set_pos(self.x_pos, 0) + + def get_pos(self): + response = self.send_command("P") + x, y, z = map(int, response.split(',')) + self.x_pos = x + self.y_pos = y + return x, y, 0, 0 # Z and theta are 0 + + def set_pos(self, x, y, z=0): + self.send_command(f"P {x},{y},{z}") + self.x_pos = x + self.y_pos = y + + def move_relative_mm(self, x_mm, y_mm): + x_steps = self.mm_to_steps(x_mm) + y_steps = self.mm_to_steps(y_mm) + return self.move_relative(x_steps, y_steps) + + def move_absolute_mm(self, x_mm, y_mm): + x_steps = self.mm_to_steps(x_mm) + y_steps = self.mm_to_steps(y_mm) + return self.move_absolute(x_steps, y_steps) + + def move_absolute_x_mm(self, x_mm): + x_steps = self.mm_to_steps(x_mm) + return self.move_absolute_x(x_steps) + + def move_absolute_y_mm(self, y_mm): + y_steps = self.mm_to_steps(y_mm) + return self.move_absolute_y(y_steps) + + def move_relative(self, x, y, blocking=True): + x = x * self.x_direction + y = y * self.y_direction + self.send_command(f"GR {x},{y}") + if blocking: + self.wait_for_stop() + else: + threading.Thread(target=self.wait_for_stop, daemon=True).start() + + def move_absolute(self, x, y, blocking=True): + x = x * self.x_direction + y = y * self.y_direction + self.send_command(f"G {x},{y}") + if blocking: + self.wait_for_stop() + else: + threading.Thread(target=self.wait_for_stop, daemon=True).start() + + def move_absolute_x(self, x, blocking=True): + x = x * self.x_direction + self.send_command(f"GX {x}") + if blocking: + self.wait_for_stop() + else: + threading.Thread(target=self.wait_for_stop, daemon=True).start() + + def move_absolute_y(self, y, blocking=True): + y = y * self.y_direction + self.send_command(f"GY {y}") + if blocking: + self.wait_for_stop() + else: + threading.Thread(target=self.wait_for_stop, daemon=True).start() + + def enable_joystick(self): + self.send_command("J") + self.joystick_enabled = True + + def disable_joystick(self): + self.send_command("H") + self.joystick_enabled = False + + def wait_for_stop(self): + while True: + status = int(self.send_command("$,S")) + if status == 0: + self.get_pos() + print('xy position: ', self.x_pos, self.y_pos) + break + time.sleep(0.05) + + def stop(self): + return self.send_command("K") + + def close(self): + self.disable_joystick() + self.position_updating_event.set() + self.position_updating_thread.join() + self.serial.close() + print('Stage closed') + + def home_z(self): + pass + + def zero_z(self): + pass + diff --git a/software/control/stitcher.py b/software/control/stitcher.py new file mode 100644 index 000000000..81a7138c2 --- /dev/null +++ b/software/control/stitcher.py @@ -0,0 +1,1779 @@ +# napari + stitching libs +import os +import sys +from control._def import * +from qtpy.QtCore import * + +import psutil +import shutil +import random +import json +import time +import math +from datetime import datetime +from lxml import etree +import numpy as np +import pandas as pd +import cv2 +import dask.array as da +from dask.array import from_zarr +from dask_image.imread import imread as dask_imread +from skimage.registration import phase_cross_correlation +import ome_zarr +import zarr +from tifffile import TiffWriter +from aicsimageio.writers import OmeTiffWriter +from aicsimageio.writers import OmeZarrWriter +from aicsimageio import types +from basicpy import BaSiC + +class Stitcher(QThread, QObject): + + update_progress = Signal(int, int) + getting_flatfields = Signal() + starting_stitching = Signal() + starting_saving = Signal(bool) + finished_saving = Signal(str, object) + + def __init__(self, input_folder, output_name='', output_format=".ome.zarr", apply_flatfield=0, use_registration=0, registration_channel='', registration_z_level=0, flexible=True): + QThread.__init__(self) + QObject.__init__(self) + self.input_folder = input_folder + self.image_folder = None + self.output_name = output_name + output_format + self.apply_flatfield = apply_flatfield + self.use_registration = use_registration + if use_registration: + self.registration_channel = registration_channel + self.registration_z_level = registration_z_level + + self.selected_modes = self.extract_selected_modes(self.input_folder) + self.acquisition_params = self.extract_acquisition_parameters(self.input_folder) + self.time_points = self.get_time_points(self.input_folder) + print("timepoints:", self.time_points) + self.is_reversed = self.determine_directions(self.input_folder) # init: top to bottom, left to right + print(self.is_reversed) + self.is_wellplate = IS_HCS + self.flexible = flexible + self.pixel_size_um = 1.0 + self.init_stitching_parameters() + # self.overlap_percent = Acquisition.OVERLAP_PERCENT + + def init_stitching_parameters(self): + self.is_rgb = {} + self.regions = [] + self.channel_names = [] + self.mono_channel_names = [] + self.channel_colors = [] + self.num_z = self.num_c = 1 + self.num_cols = self.num_rows = 1 + self.input_height = self.input_width = 0 + self.num_pyramid_levels = 5 + self.v_shift = self.h_shift = (0,0) + self.max_x_overlap = self.max_y_overlap = 0 + self.flatfields = {} + self.stitching_data = {} + self.tczyx_shape = (len(self.time_points),self.num_c,self.num_z,self.num_rows*self.input_height,self.num_cols*self.input_width) + self.stitched_images = None + self.chunks = None + self.dtype = np.uint16 + + def get_time_points(self, input_folder): + try: # detects directories named as integers, representing time points. + time_points = [d for d in os.listdir(input_folder) if os.path.isdir(os.path.join(input_folder, d)) and d.isdigit()] + time_points.sort(key=int) + return time_points + except Exception as e: + print(f"Error detecting time points: {e}") + return ['0'] + + def extract_selected_modes(self, input_folder): + try: + configs_path = os.path.join(input_folder, 'configurations.xml') + tree = etree.parse(configs_path) + root = tree.getroot() + selected_modes = {} + for mode in root.findall('.//mode'): + if mode.get('Selected') == '1': + mode_id = mode.get('ID') + selected_modes[mode_id] = { + 'Name': mode.get('Name'), + 'ExposureTime': mode.get('ExposureTime'), + 'AnalogGain': mode.get('AnalogGain'), + 'IlluminationSource': mode.get('IlluminationSource'), + 'IlluminationIntensity': mode.get('IlluminationIntensity') + } + return selected_modes + except Exception as e: + print(f"Error reading selected modes: {e}") + + def extract_acquisition_parameters(self, input_folder): + acquistion_params_path = os.path.join(input_folder, 'acquisition parameters.json') + with open(acquistion_params_path, 'r') as file: + acquisition_params = json.load(file) + return acquisition_params + + def extract_wavelength(self, name): + # Split the string and find the wavelength number immediately after "Fluorescence" + parts = name.split() + if 'Fluorescence' in parts: + index = parts.index('Fluorescence') + 1 + if index < len(parts): + return parts[index].split()[0] # Assuming '488 nm Ex' and taking '488' + for color in ['R', 'G', 'B']: + if color in parts: + return color + return None + + def determine_directions(self, input_folder): + # return {'rows': self.acquisition_params.get("row direction", False), + # 'cols': self.acquisition_params.get("col direction", False), + # 'z-planes': False} + coordinates = pd.read_csv(os.path.join(input_folder, self.time_points[0], 'coordinates.csv')) + try: + first_region = coordinates['region'].unique()[0] + coordinates = coordinates[coordinates['region'] == first_region] + self.is_wellplate = True + except Exception as e: + print("no coordinates.csv well data:", e) + self.is_wellplate = False + i_rev = not coordinates.sort_values(by='i')['y (mm)'].is_monotonic_increasing + j_rev = not coordinates.sort_values(by='j')['x (mm)'].is_monotonic_increasing + k_rev = not coordinates.sort_values(by='z_level')['z (um)'].is_monotonic_increasing + return {'rows': i_rev, 'cols': j_rev, 'z-planes': k_rev} + + def parse_filenames(self, time_point): + # Initialize directories and read files + self.image_folder = os.path.join(self.input_folder, str(time_point)) + print("stitching image folder:", self.image_folder) + self.init_stitching_parameters() + + all_files = os.listdir(self.image_folder) + sorted_input_files = sorted( + [filename for filename in all_files if filename.endswith((".bmp", ".tiff")) and 'focus_camera' not in filename] + ) + if not sorted_input_files: + raise Exception("No valid files found in directory.") + + first_filename = sorted_input_files[0] + try: + first_region, first_i, first_j, first_k, channel_name = os.path.splitext(first_filename)[0].split('_', 4) + first_k = int(first_k) + print("region_i_j_k_channel_name: ", os.path.splitext(first_filename)[0]) + self.is_wellplate = True + except ValueError as ve: + first_i, first_j, first_k, channel_name = os.path.splitext(first_filename)[0].split('_', 3) + print("i_j_k_channel_name: ", os.path.splitext(first_filename)[0]) + self.is_wellplate = False + + input_extension = os.path.splitext(sorted_input_files[0])[1] + max_i, max_j, max_k = 0, 0, 0 + regions, channel_names = set(), set() + + for filename in sorted_input_files: + if self.is_wellplate: + region, i, j, k, channel_name = os.path.splitext(filename)[0].split('_', 4) + else: + region = '0' + i, j, k, channel_name = os.path.splitext(filename)[0].split('_', 3) + + channel_name = channel_name.replace("_", " ").replace("full ", "full_") + i, j, k = int(i), int(j), int(k) + + regions.add(region) + channel_names.add(channel_name) + max_i, max_j, max_k = max(max_i, i), max(max_j, j), max(max_k, k) + + tile_info = { + 'filepath': os.path.join(self.image_folder, filename), + 'region': region, + 'channel': channel_name, + 'z_level': k, + 'row': i, + 'col': j + } + self.stitching_data.setdefault(region, {}).setdefault(channel_name, {}).setdefault(k, {}).setdefault((i, j), tile_info) + + self.regions = sorted(regions) + self.channel_names = sorted(channel_names) + self.num_z, self.num_cols, self.num_rows = max_k + 1, max_j + 1, max_i + 1 + + first_coord = f"{self.regions[0]}_{first_i}_{first_j}_{first_k}_" if self.is_wellplate else f"{first_i}_{first_j}_{first_k}_" + found_dims = False + mono_channel_names = [] + + for channel in self.channel_names: + filename = first_coord + channel.replace(" ", "_") + input_extension + image = dask_imread(os.path.join(self.image_folder, filename))[0] + + if not found_dims: + self.dtype = np.dtype(image.dtype) + self.input_height, self.input_width = image.shape[:2] + self.chunks = (1, 1, 1, self.input_height//2, self.input_width//2) + found_dims = True + print("chunks", self.chunks) + + if len(image.shape) == 3: + self.is_rgb[channel] = True + channel = channel.split('_')[0] + mono_channel_names.extend([f"{channel}_R", f"{channel}_G", f"{channel}_B"]) + else: + self.is_rgb[channel] = False + mono_channel_names.append(channel) + + self.mono_channel_names = mono_channel_names + self.num_c = len(mono_channel_names) + self.channel_colors = [CHANNEL_COLORS_MAP.get(self.extract_wavelength(name), {'hex': 0xFFFFFF})['hex'] for name in self.mono_channel_names] + print(self.mono_channel_names) + print(self.regions) + + def get_flatfields(self, progress_callback=None): + def process_images(images, channel_name): + images = np.array(images) + basic = BaSiC(get_darkfield=False, smoothness_flatfield=1) + basic.fit(images) + channel_index = self.mono_channel_names.index(channel_name) + self.flatfields[channel_index] = basic.flatfield + if progress_callback: + progress_callback(channel_index + 1, self.num_c) + + # Iterate only over the channels you need to process + for channel in self.channel_names: + all_tiles = [] + # Collect tiles from all roi and z-levels for the current channel + for roi in self.regions: + for z_level in self.stitching_data[roi][channel]: + for row_col, tile_info in self.stitching_data[roi][channel][z_level].items(): + all_tiles.append(tile_info) + + # Shuffle and select a subset of tiles for flatfield calculation + random.shuffle(all_tiles) + selected_tiles = all_tiles[:min(32, len(all_tiles))] + + if self.is_rgb[channel]: + # Process each color channel if the channel is RGB + images_r = [dask_imread(tile['filepath'])[0][:, :, 0] for tile in selected_tiles] + images_g = [dask_imread(tile['filepath'])[0][:, :, 1] for tile in selected_tiles] + images_b = [dask_imread(tile['filepath'])[0][:, :, 2] for tile in selected_tiles] + channel = channel.split('_')[0] + process_images(images_r, channel + '_R') + process_images(images_g, channel + '_G') + process_images(images_b, channel + '_B') + else: + # Process monochrome images + images = [dask_imread(tile['filepath'])[0] for tile in selected_tiles] + process_images(images, channel) + + def normalize_image(self, img): + img_min, img_max = img.min(), img.max() + img_normalized = (img - img_min) / (img_max - img_min) + scale_factor = np.iinfo(self.dtype).max if np.issubdtype(self.dtype, np.integer) else 1 + return (img_normalized * scale_factor).astype(self.dtype) + + def visualize_image(self, img1, img2, title): + if title == 'horizontal': + combined_image = np.hstack((img1, img2)) + else: + combined_image = np.vstack((img1, img2)) + cv2.imwrite(f"{self.input_folder}/{title}.png", combined_image) + + def calculate_horizontal_shift(self, img1_path, img2_path, max_overlap, margin_ratio=0.2): + try: + img1 = dask_imread(img1_path)[0].compute() + img2 = dask_imread(img2_path)[0].compute() + img1 = self.normalize_image(img1) + img2 = self.normalize_image(img2) + + margin = int(self.input_height * margin_ratio) + img1_overlap = (img1[margin:-margin, -max_overlap:]).astype(self.dtype) + img2_overlap = (img2[margin:-margin, :max_overlap]).astype(self.dtype) + + self.visualize_image(img1_overlap, img2_overlap, "horizontal") + shift, error, diffphase = phase_cross_correlation(img1_overlap, img2_overlap, upsample_factor=10) + return round(shift[0]), round(shift[1] - img1_overlap.shape[1]) + except Exception as e: + print(f"Error calculating horizontal shift: {e}") + return (0, 0) + + def calculate_vertical_shift(self, img1_path, img2_path, max_overlap, margin_ratio=0.2): + try: + img1 = dask_imread(img1_path)[0].compute() + img2 = dask_imread(img2_path)[0].compute() + img1 = self.normalize_image(img1) + img2 = self.normalize_image(img2) + + margin = int(self.input_width * margin_ratio) + img1_overlap = (img1[-max_overlap:, margin:-margin]).astype(self.dtype) + img2_overlap = (img2[:max_overlap, margin:-margin]).astype(self.dtype) + + self.visualize_image(img1_overlap, img2_overlap, "vertical") + shift, error, diffphase = phase_cross_correlation(img1_overlap, img2_overlap, upsample_factor=10) + return round(shift[0] - img1_overlap.shape[0]), round(shift[1]) + except Exception as e: + print(f"Error calculating vertical shift: {e}") + return (0, 0) + + def calculate_shifts(self, roi=""): + roi = self.regions[0] if roi not in self.regions else roi + self.registration_channel = self.registration_channel if self.registration_channel in self.channel_names else self.channel_names[0] + + # Calculate estimated overlap from acquisition parameters + dx_mm = self.acquisition_params['dx(mm)'] + dy_mm = self.acquisition_params['dy(mm)'] + obj_mag = self.acquisition_params['objective']['magnification'] + obj_tube_lens_mm = self.acquisition_params['objective']['tube_lens_f_mm'] + sensor_pixel_size_um = self.acquisition_params['sensor_pixel_size_um'] + tube_lens_mm = self.acquisition_params['tube_lens_mm'] + + obj_focal_length_mm = obj_tube_lens_mm / obj_mag + actual_mag = tube_lens_mm / obj_focal_length_mm + self.pixel_size_um = sensor_pixel_size_um / actual_mag + print("pixel_size_um:", self.pixel_size_um) + + dx_pixels = dx_mm * 1000 / self.pixel_size_um + dy_pixels = dy_mm * 1000 / self.pixel_size_um + print("dy_pixels", dy_pixels, ", dx_pixels:", dx_pixels) + + self.max_x_overlap = round(abs(self.input_width - dx_pixels) / 2) + self.max_y_overlap = round(abs(self.input_height - dy_pixels) / 2) + print("objective calculated - vertical overlap:", self.max_y_overlap, ", horizontal overlap:", self.max_x_overlap) + + col_left, col_right = (self.num_cols - 1) // 2, (self.num_cols - 1) // 2 + 1 + if self.is_reversed['cols']: + col_left, col_right = col_right, col_left + + row_top, row_bottom = (self.num_rows - 1) // 2, (self.num_rows - 1) // 2 + 1 + if self.is_reversed['rows']: + row_top, row_bottom = row_bottom, row_top + + img1_path = img2_path_vertical = img2_path_horizontal = None + for (row, col), tile_info in self.stitching_data[roi][self.registration_channel][self.registration_z_level].items(): + if col == col_left and row == row_top: + img1_path = tile_info['filepath'] + elif col == col_left and row == row_bottom: + img2_path_vertical = tile_info['filepath'] + elif col == col_right and row == row_top: + img2_path_horizontal = tile_info['filepath'] + + if img1_path is None: + raise Exception( + f"No input file found for c:{self.registration_channel} k:{self.registration_z_level} " + f"j:{col_left} i:{row_top}" + ) + + self.v_shift = ( + self.calculate_vertical_shift(img1_path, img2_path_vertical, self.max_y_overlap) + if self.max_y_overlap > 0 and img2_path_vertical and img1_path != img2_path_vertical else (0, 0) + ) + self.h_shift = ( + self.calculate_horizontal_shift(img1_path, img2_path_horizontal, self.max_x_overlap) + if self.max_x_overlap > 0 and img2_path_horizontal and img1_path != img2_path_horizontal else (0, 0) + ) + print("vertical shift:", self.v_shift, ", horizontal shift:", self.h_shift) + + def calculate_dynamic_shifts(self, roi, channel, z_level, row, col): + h_shift, v_shift = self.h_shift, self.v_shift + + # Check for left neighbor + if (row, col - 1) in self.stitching_data[roi][channel][z_level]: + left_tile_path = self.stitching_data[roi][channel][z_level][row, col - 1]['filepath'] + current_tile_path = self.stitching_data[roi][channel][z_level][row, col]['filepath'] + # Calculate horizontal shift + new_h_shift = self.calculate_horizontal_shift(left_tile_path, current_tile_path, abs(self.h_shift[1])) + + # Check if the new horizontal shift is within 10% of the precomputed shift + if self.h_shift == (0,0) or (0.95 * abs(self.h_shift[1]) <= abs(new_h_shift[1]) <= 1.05 * abs(self.h_shift[1]) and + 0.95 * abs(self.h_shift[0]) <= abs(new_h_shift[0]) <= 1.05 * abs(self.h_shift[0])): + print("new h shift", new_h_shift, h_shift) + h_shift = new_h_shift + + # Check for top neighbor + if (row - 1, col) in self.stitching_data[roi][channel][z_level]: + top_tile_path = self.stitching_data[roi][channel][z_level][row - 1, col]['filepath'] + current_tile_path = self.stitching_data[roi][channel][z_level][row, col]['filepath'] + # Calculate vertical shift + new_v_shift = self.calculate_vertical_shift(top_tile_path, current_tile_path, abs(self.v_shift[0])) + + # Check if the new vertical shift is within 10% of the precomputed shift + if self.v_shift == (0,0) or (0.95 * abs(self.v_shift[0]) <= abs(new_v_shift[0]) <= 1.05 * abs(self.v_shift[0]) and + 0.95 * abs(self.v_shift[1]) <= abs(new_v_shift[1]) <= 1.05 * abs(self.v_shift[1])): + print("new v shift", new_v_shift, v_shift) + v_shift = new_v_shift + + return h_shift, v_shift + + def init_output(self, time_point, region_id): + output_folder = os.path.join(self.input_folder, f"{time_point}_stitched") + os.makedirs(output_folder, exist_ok=True) + self.output_path = os.path.join(output_folder, f"{region_id}_{self.output_name}" if self.is_wellplate else self.output_name) + + x_max = (self.input_width + ((self.num_cols - 1) * (self.input_width + self.h_shift[1])) + # horizontal width with overlap + abs((self.num_rows - 1) * self.v_shift[1])) # horizontal shift from vertical registration + y_max = (self.input_height + ((self.num_rows - 1) * (self.input_height + self.v_shift[0])) + # vertical height with overlap + abs((self.num_cols - 1) * self.h_shift[0])) # vertical shift from horizontal registration + if self.use_registration and DYNAMIC_REGISTRATION: + y_max *= 1.05 + x_max *= 1.05 + size = max(y_max, x_max) + num_levels = 1 + + # Get the number of rows and columns + if self.is_wellplate and STITCH_COMPLETE_ACQUISITION: + rows, columns = self.get_rows_and_columns() + self.num_pyramid_levels = math.ceil(np.log2(max(x_max, y_max) / 1024 * max(len(rows), len(columns)))) + else: + self.num_pyramid_levels = math.ceil(np.log2(max(x_max, y_max) / 1024)) + print("num_pyramid_levels", self.num_pyramid_levels) + + tczyx_shape = (1, self.num_c, self.num_z, y_max, x_max) + self.tczyx_shape = tczyx_shape + print(f"(t:{time_point}, roi:{region_id}) output shape: {tczyx_shape}") + return da.zeros(tczyx_shape, dtype=self.dtype, chunks=self.chunks) + + def stitch_images(self, time_point, roi, progress_callback=None): + self.stitched_images = self.init_output(time_point, roi) + total_tiles = sum(len(z_data) for channel_data in self.stitching_data[roi].values() for z_data in channel_data.values()) + processed_tiles = 0 + + for z_level in range(self.num_z): + + for row in range(self.num_rows): + row = self.num_rows - 1 - row if self.is_reversed['rows'] else row + + for col in range(self.num_cols): + col = self.num_cols - 1 - col if self.is_reversed['cols'] else col + + if self.use_registration and DYNAMIC_REGISTRATION and z_level == self.registration_z_level: + if (row, col) in self.stitching_data[roi][self.registration_channel][z_level]: + tile_info = self.stitching_data[roi][self.registration_channel][z_level][(row, col)] + self.h_shift, self.v_shift = self.calculate_dynamic_shifts(roi, self.registration_channel, z_level, row, col) + + # Now apply the same shifts to all channels + for channel in self.channel_names: + if (row, col) in self.stitching_data[roi][channel][z_level]: + tile_info = self.stitching_data[roi][channel][z_level][(row, col)] + tile = dask_imread(tile_info['filepath'])[0] + #tile = tile[:, ::-1] + if self.is_rgb[channel]: + for color_idx, color in enumerate(['R', 'G', 'B']): + tile_color = tile[:, :, color_idx] + color_channel = f"{channel}_{color}" + self.stitch_single_image(tile_color, z_level, self.mono_channel_names.index(color_channel), row, col) + processed_tiles += 1 + else: + self.stitch_single_image(tile, z_level, self.mono_channel_names.index(channel), row, col) + processed_tiles += 1 + if progress_callback is not None: + progress_callback(processed_tiles, total_tiles) + + def stitch_single_image(self, tile, z_level, channel_idx, row, col): + #print(tile.shape) + if self.apply_flatfield: + tile = (tile / self.flatfields[channel_idx]).clip(min=np.iinfo(self.dtype).min, + max=np.iinfo(self.dtype).max).astype(self.dtype) + # Determine crop for tile edges + top_crop = max(0, (-self.v_shift[0] // 2) - abs(self.h_shift[0]) // 2) if row > 0 else 0 + bottom_crop = max(0, (-self.v_shift[0] // 2) - abs(self.h_shift[0]) // 2) if row < self.num_rows - 1 else 0 + left_crop = max(0, (-self.h_shift[1] // 2) - abs(self.v_shift[1]) // 2) if col > 0 else 0 + right_crop = max(0, (-self.h_shift[1] // 2) - abs(self.v_shift[1]) // 2) if col < self.num_cols - 1 else 0 + + tile = tile[top_crop:tile.shape[0]-bottom_crop, left_crop:tile.shape[1]-right_crop] + + # Initialize starting coordinates based on tile position and shift + y = row * (self.input_height + self.v_shift[0]) + top_crop + if self.h_shift[0] < 0: + y -= (self.num_cols - 1 - col) * self.h_shift[0] # Moves up if negative + else: + y += col * self.h_shift[0] # Moves down if positive + + x = col * (self.input_width + self.h_shift[1]) + left_crop + if self.v_shift[1] < 0: + x -= (self.num_rows - 1 - row) * self.v_shift[1] # Moves left if negative + else: + x += row * self.v_shift[1] # Moves right if positive + + # Place cropped tile on the stitched image canvas + self.stitched_images[0, channel_idx, z_level, y:y+tile.shape[0], x:x+tile.shape[1]] = tile + # print(f" col:{col}, \trow:{row},\ty:{y}-{y+tile.shape[0]}, \tx:{x}-{x+tile.shape[-1]}") + + def save_as_ome_tiff(self): + dz_um = self.acquisition_params.get("dz(um)", None) + sensor_pixel_size_um = self.acquisition_params.get("sensor_pixel_size_um", None) + dims = "TCZYX" + # if self.is_rgb: + # dims += "S" + + ome_metadata = OmeTiffWriter.build_ome( + image_name=[os.path.basename(self.output_path)], + data_shapes=[self.stitched_images.shape], + data_types=[self.stitched_images.dtype], + dimension_order=[dims], + channel_names=[self.mono_channel_names], + physical_pixel_sizes=[types.PhysicalPixelSizes(dz_um, self.pixel_size_um, self.pixel_size_um)], + #is_rgb=self.is_rgb + #channel colors + ) + OmeTiffWriter.save( + data=self.stitched_images, + uri=self.output_path, + ome_xml=ome_metadata, + dimension_order=[dims] + #channel colors / names + ) + self.stitched_images = None + + def save_as_ome_zarr(self): + dz_um = self.acquisition_params.get("dz(um)", None) + sensor_pixel_size_um = self.acquisition_params.get("sensor_pixel_size_um", None) + dims = "TCZYX" + intensity_min = np.iinfo(self.dtype).min + intensity_max = np.iinfo(self.dtype).max + channel_minmax = [(intensity_min, intensity_max)] * self.num_c + for i in range(self.num_c): + print(f"Channel {i}:", self.mono_channel_names[i], " \tColor:", self.channel_colors[i], " \tPixel Range:", channel_minmax[i]) + + zarr_writer = OmeZarrWriter(self.output_path) + zarr_writer.build_ome( + size_z=self.num_z, + image_name=os.path.basename(self.output_path), + channel_names=self.mono_channel_names, + channel_colors=self.channel_colors, + channel_minmax=channel_minmax + ) + zarr_writer.write_image( + image_data=self.stitched_images, + image_name=os.path.basename(self.output_path), + physical_pixel_sizes=types.PhysicalPixelSizes(dz_um, self.pixel_size_um, self.pixel_size_um), + channel_names=self.mono_channel_names, + channel_colors=self.channel_colors, + dimension_order=dims, + scale_num_levels=self.num_pyramid_levels, + chunk_dims=self.chunks + ) + self.stitched_images = None + + def create_complete_ome_zarr(self): + """ Creates a complete OME-ZARR with proper channel metadata. """ + final_path = os.path.join(self.input_folder, self.output_name.replace(".ome.zarr","") + "_complete_acquisition.ome.zarr") + if len(self.time_points) == 1: + zarr_path = os.path.join(self.input_folder, f"0_stitched", self.output_name) + #final_path = zarr_path + shutil.copytree(zarr_path, final_path) + else: + store = ome_zarr.io.parse_url(final_path, mode="w").store + root_group = zarr.group(store=store) + intensity_min = np.iinfo(self.dtype).min + intensity_max = np.iinfo(self.dtype).max + + data = self.load_and_merge_timepoints() + ome_zarr.writer.write_image( + image=data, + group=root_group, + axes="tczyx", + channel_names=self.mono_channel_names, + storage_options=dict(chunks=self.chunks) + ) + + channel_info = [{ + "label": self.mono_channel_names[i], + "color": f"{self.channel_colors[i]:06X}", + "window": {"start": intensity_min, "end": intensity_max}, + "active": True + } for i in range(self.num_c)] + + # Assign the channel metadata to the image group + root_group.attrs["omero"] = {"channels": channel_info} + + print(f"Data saved in OME-ZARR format at: {final_path}") + root = zarr.open(final_path, mode='r') + print(root.tree()) + print(dict(root.attrs)) + self.finished_saving.emit(final_path, self.dtype) + + def create_hcs_ome_zarr(self): + """Creates a hierarchical Zarr file in the HCS OME-ZARR format for visualization in napari.""" + hcs_path = os.path.join(self.input_folder, self.output_name.replace(".ome.zarr","") + "_complete_acquisition.ome.zarr") + if len(self.time_points) == 1 and len(self.regions) == 1: + stitched_zarr_path = os.path.join(self.input_folder, f"0_stitched", f"{self.regions[0]}_{self.output_name}") + #hcs_path = stitched_zarr_path # replace next line with this if no copy wanted + shutil.copytree(stitched_zarr_path, hcs_path) + else: + store = ome_zarr.io.parse_url(hcs_path, mode="w").store + root_group = zarr.group(store=store) + + # Retrieve row and column information for plate metadata + rows, columns = self.get_rows_and_columns() + well_paths = [f"{well_id[0]}/{well_id[1:]}" for well_id in sorted(self.regions)] + print(well_paths) + ome_zarr.writer.write_plate_metadata(root_group, rows, [str(col) for col in columns], well_paths) + + # Loop over each well and save its data + for well_id in self.regions: + row, col = well_id[0], well_id[1:] + row_group = root_group.require_group(row) + well_group = row_group.require_group(col) + self.write_well_and_metadata(well_id, well_group) + + print(f"Data saved in HCS OME-ZARR format at: {hcs_path}") + + print("HCS root attributes:") + root = zarr.open(hcs_path, mode='r') + print(root.tree()) + print(dict(root.attrs)) + + self.finished_saving.emit(hcs_path, self.dtype) + + def write_well_and_metadata(self, well_id, well_group): + """Process and save data for a single well across all timepoints.""" + # Load data from precomputed Zarrs for each timepoint + data = self.load_and_merge_timepoints(well_id) + intensity_min = np.iinfo(self.dtype).min + intensity_max = np.iinfo(self.dtype).max + #dataset = well_group.create_dataset("data", data=data, chunks=(1, 1, 1, self.input_height, self.input_width), dtype=data.dtype) + field_paths = ["0"] # Assuming single field of view + ome_zarr.writer.write_well_metadata(well_group, field_paths) + for fi, field in enumerate(field_paths): + image_group = well_group.require_group(str(field)) + ome_zarr.writer.write_image(image=data, + group=image_group, + axes="tczyx", + channel_names=self.mono_channel_names, + storage_options=dict(chunks=self.chunks) + ) + channel_info = [{ + "label": self.mono_channel_names[c], + "color": f"{self.channel_colors[c]:06X}", + "window": {"start": intensity_min, "end": intensity_max}, + "active": True + } for c in range(self.num_c)] + + image_group.attrs["omero"] = {"channels": channel_info} + + def pad_to_largest(self, array, target_shape): + if array.shape == target_shape: + return array + pad_widths = [(0, max(0, ts - s)) for s, ts in zip(array.shape, target_shape)] + return da.pad(array, pad_widths, mode='constant', constant_values=0) + + def load_and_merge_timepoints(self, well_id=''): + """Load and merge data for a well from Zarr files for each timepoint.""" + t_data = [] + t_shapes = [] + for t in self.time_points: + if self.is_wellplate: + filepath = f"{well_id}_{self.output_name}" + else: + filepath = f"{self.output_name}" + zarr_path = os.path.join(self.input_folder, f"{t}_stitched", filepath) + print(f"t:{t} well:{well_id}, \t{zarr_path}") + z = zarr.open(zarr_path, mode='r') + # Ensure that '0' contains the data and it matches expected dimensions + x_max = self.input_width + ((self.num_cols - 1) * (self.input_width + self.h_shift[1])) + abs((self.num_rows - 1) * self.v_shift[1]) + y_max = self.input_height + ((self.num_rows - 1) * (self.input_height + self.v_shift[0])) + abs((self.num_cols - 1) * self.h_shift[0]) + t_array = da.from_zarr(z['0'], chunks=self.chunks) + t_data.append(t_array) + t_shapes.append(t_array.shape) + + # Concatenate arrays along the existing time axis if multiple timepoints are present + if len(t_data) > 1: + max_shape = tuple(max(s) for s in zip(*t_shapes)) + padded_data = [self.pad_to_largest(t, max_shape) for t in t_data] + data = da.concatenate(padded_data, axis=0) + print(f"(merged timepoints, well:{well_id}) output shape: {data.shape}") + return data + elif len(t_data) == 1: + data = t_data[0] + return data + else: + raise ValueError("no data loaded from timepoints.") + + def get_rows_and_columns(self): + """Utility to extract rows and columns from well identifiers.""" + rows = set() + columns = set() + for well_id in self.regions: + rows.add(well_id[0]) # Assuming well_id like 'A1' + columns.add(int(well_id[1:])) + return sorted(rows), sorted(columns) + + def run(self): + # Main stitching logic + stime = time.time() + try: + for time_point in self.time_points: + ttime = time.time() + print(f"starting t:{time_point}...") + self.parse_filenames(time_point) + + if self.apply_flatfield: + print(f"getting flatfields...") + self.getting_flatfields.emit() + self.get_flatfields(progress_callback=self.update_progress.emit) + print("time to apply flatfields", time.time() - ttime) + + + if self.use_registration: + shtime = time.time() + print(f"calculating shifts...") + self.calculate_shifts() + print("time to calculate shifts", time.time() - shtime) + + for well in self.regions: + wtime = time.time() + self.starting_stitching.emit() + print(f"\nstarting stitching...") + self.stitch_images(time_point, well, progress_callback=self.update_progress.emit) + + sttime = time.time() + print("time to stitch well", sttime - wtime) + + self.starting_saving.emit(not STITCH_COMPLETE_ACQUISITION) + print(f"saving...") + if ".ome.tiff" in self.output_path: + self.save_as_ome_tiff() + else: + self.save_as_ome_zarr() + + print("time to save stitched well", time.time() - sttime) + print("time per well", time.time() - wtime) + if well != '0': + print(f"...done saving well:{well}") + print(f"...finished t:{time_point}") + print("time per timepoint", time.time() - ttime) + + if STITCH_COMPLETE_ACQUISITION and not self.flexible and ".ome.zarr" in self.output_name: + self.starting_saving.emit(True) + scatime = time.time() + if self.is_wellplate: + self.create_hcs_ome_zarr() + print(f"...done saving complete hcs successfully") + else: + self.create_complete_ome_zarr() + print(f"...done saving complete successfully") + print("time to save merged wells and timepoints", time.time() - scatime) + else: + self.finished_saving.emit(self.output_path, self.dtype) + print("total time to stitch + save:", time.time() - stime) + + except Exception as e: + print("time before error", time.time() - stime) + print(f"error While Stitching: {e}") + + + +class CoordinateStitcher(QThread, QObject): + update_progress = Signal(int, int) + getting_flatfields = Signal() + starting_stitching = Signal() + starting_saving = Signal(bool) + finished_saving = Signal(str, object) + + def __init__(self, input_folder, output_name='', output_format=".ome.zarr", apply_flatfield=0, use_registration=0, registration_channel='', registration_z_level=0, overlap_percent=0): + super().__init__() + self.input_folder = input_folder + self.output_name = output_name + output_format + self.output_format = output_format + self.apply_flatfield = apply_flatfield + self.use_registration = use_registration + if use_registration: + self.registration_channel = registration_channel + self.registration_z_level = registration_z_level + self.coordinates_df = None + self.pixel_size_um = None + self.acquisition_params = None + self.time_points = [] + self.regions = [] + self.overlap_percent = overlap_percent + self.scan_pattern = FOV_PATTERN + self.init_stitching_parameters() + + + def init_stitching_parameters(self): + self.is_rgb = {} + self.channel_names = [] + self.mono_channel_names = [] + self.channel_colors = [] + self.num_z = self.num_c = self.num_t = 1 + self.input_height = self.input_width = 0 + self.num_pyramid_levels = 5 + self.flatfields = {} + self.stitching_data = {} + self.dtype = np.uint16 + self.chunks = None + self.h_shift = (0, 0) + if self.scan_pattern == 'S-Pattern': + self.h_shift_rev = (0, 0) + self.h_shift_rev_odd = 0 # 0 reverse even rows, 1 reverse odd rows + self.v_shift = (0, 0) + self.x_positions = set() + self.y_positions = set() + + def get_time_points(self): + self.time_points = [d for d in os.listdir(self.input_folder) if os.path.isdir(os.path.join(self.input_folder, d)) and d.isdigit()] + self.time_points.sort(key=int) + return self.time_points + + def extract_acquisition_parameters(self): + acquistion_params_path = os.path.join(self.input_folder, 'acquisition parameters.json') + with open(acquistion_params_path, 'r') as file: + self.acquisition_params = json.load(file) + + def get_pixel_size_from_params(self): + obj_mag = self.acquisition_params['objective']['magnification'] + obj_tube_lens_mm = self.acquisition_params['objective']['tube_lens_f_mm'] + sensor_pixel_size_um = self.acquisition_params['sensor_pixel_size_um'] + tube_lens_mm = self.acquisition_params['tube_lens_mm'] + + obj_focal_length_mm = obj_tube_lens_mm / obj_mag + actual_mag = tube_lens_mm / obj_focal_length_mm + self.pixel_size_um = sensor_pixel_size_um / actual_mag + print("pixel_size_um:", self.pixel_size_um) + + def parse_filenames(self): + self.extract_acquisition_parameters() + self.get_pixel_size_from_params() + + self.stitching_data = {} + self.regions = set() + self.channel_names = set() + max_z = 0 + max_fov = 0 + + for t, time_point in enumerate(self.time_points): + image_folder = os.path.join(self.input_folder, str(time_point)) + coordinates_path = os.path.join(self.input_folder, time_point, 'coordinates.csv') + coordinates_df = pd.read_csv(coordinates_path) + + print(f"Processing timepoint {time_point}, image folder: {image_folder}") + + image_files = sorted([f for f in os.listdir(image_folder) if f.endswith(('.bmp', '.tiff')) and 'focus_camera' not in f]) + + if not image_files: + raise Exception(f"No valid files found in directory for timepoint {time_point}.") + + for file in image_files: + parts = file.split('_', 3) + region, fov, z_level, channel = parts[0], int(parts[1]), int(parts[2]), os.path.splitext(parts[3])[0] + channel = channel.replace("_", " ").replace("full ", "full_") + + coord_row = coordinates_df[(coordinates_df['region'] == region) & + (coordinates_df['fov'] == fov) & + (coordinates_df['z_level'] == z_level)] + + if coord_row.empty: + print(f"Warning: No matching coordinates found for file {file}") + continue + + coord_row = coord_row.iloc[0] + + key = (t, region, fov, z_level, channel) + self.stitching_data[key] = { + 'filepath': os.path.join(image_folder, file), + 'x': coord_row['x (mm)'], + 'y': coord_row['y (mm)'], + 'z': coord_row['z (um)'], + 'channel': channel, + 'z_level': z_level, + 'region': region, + 'fov_idx': fov, + 't': t + } + + self.regions.add(region) + self.channel_names.add(channel) + max_z = max(max_z, z_level) + max_fov = max(max_fov, fov) + + self.regions = sorted(self.regions) + self.channel_names = sorted(self.channel_names) + self.num_t = len(self.time_points) + self.num_z = max_z + 1 + self.num_fovs_per_region = max_fov + 1 + + # Set up image parameters based on the first image + first_key = list(self.stitching_data.keys())[0] + first_region = self.stitching_data[first_key]['region'] + first_fov = self.stitching_data[first_key]['fov_idx'] + first_z_level = self.stitching_data[first_key]['z_level'] + first_image = dask_imread(self.stitching_data[first_key]['filepath'])[0] + + self.dtype = first_image.dtype + if len(first_image.shape) == 2: + self.input_height, self.input_width = first_image.shape + elif len(first_image.shape) == 3: + self.input_height, self.input_width = first_image.shape[:2] + else: + raise ValueError(f"Unexpected image shape: {first_image.shape}") + self.chunks = (1, 1, 1, 512, 512) + + # Set up final monochrome channels + self.mono_channel_names = [] + for channel in self.channel_names: + channel_key = (t, first_region, first_fov, first_z_level, channel) + channel_image = dask_imread(self.stitching_data[channel_key]['filepath'])[0] + if len(channel_image.shape) == 3 and channel_image.shape[2] == 3: + self.is_rgb[channel] = True + channel = channel.split('_')[0] + self.mono_channel_names.extend([f"{channel}_R", f"{channel}_G", f"{channel}_B"]) + else: + self.is_rgb[channel] = False + self.mono_channel_names.append(channel) + self.num_c = len(self.mono_channel_names) + self.channel_colors = [self.get_channel_color(name) for name in self.mono_channel_names] + + print(f"FOV dimensions: {self.input_height}x{self.input_width}") + print(f"{self.num_z} Z levels, {self.num_t} Time points") + print(f"{self.num_c} Channels: {self.mono_channel_names}") + print(f"{len(self.regions)} Regions: {self.regions}") + + def get_channel_color(self, channel_name): + color_map = { + '405': 0x0000FF, # Blue + '488': 0x00FF00, # Green + '561': 0xFFCF00, # Yellow + '638': 0xFF0000, # Red + '730': 0x770000, # Dark Red" + '_B': 0x0000FF, # Blue + '_G': 0x00FF00, # Green + '_R': 0xFF0000 # Red + } + for key in color_map: + if key in channel_name: + return color_map[key] + return 0xFFFFFF # Default to white if no match found + + def calculate_output_dimensions(self, region): + region_data = [tile_info for key, tile_info in self.stitching_data.items() if key[1] == region] + + if not region_data: + raise ValueError(f"No data found for region {region}") + + self.x_positions = sorted(set(tile_info['x'] for tile_info in region_data)) + self.y_positions = sorted(set(tile_info['y'] for tile_info in region_data)) + + if self.use_registration: # Add extra space for shifts + num_cols = len(self.x_positions) + num_rows = len(self.y_positions) + + if self.scan_pattern == 'S-Pattern': + max_h_shift = (max(self.h_shift[0], self.h_shift_rev[0]), max(self.h_shift[1], self.h_shift_rev[1])) + else: + max_h_shift = self.h_shift + + width_pixels = int(self.input_width + ((num_cols - 1) * (self.input_width + max_h_shift[1]))) # horizontal width with overlap + width_pixels += abs((num_rows - 1) * self.v_shift[1]) # horizontal shift from vertical registration + height_pixels = int(self.input_height + ((num_rows - 1) * (self.input_height + self.v_shift[0]))) # vertical height with overlap + height_pixels += abs((num_cols - 1) * max_h_shift[0]) # vertical shift from horizontal registration + + else: # Use coordinates shifts + width_mm = max(self.x_positions) - min(self.x_positions) + (self.input_width * self.pixel_size_um / 1000) + height_mm = max(self.y_positions) - min(self.y_positions) + (self.input_height * self.pixel_size_um / 1000) + + width_pixels = int(np.ceil(width_mm * 1000 / self.pixel_size_um)) + height_pixels = int(np.ceil(height_mm * 1000 / self.pixel_size_um)) + + # Round up to the next multiple of 4 + width_pixels = ((width_pixels + 3) & ~3) + 4 + height_pixels = ((height_pixels + 3) & ~3) + 4 + + # Get the number of rows and columns + if len(self.regions) > 1: + rows, columns = self.get_rows_and_columns() + max_dimension = max(len(rows), len(columns)) + else: + max_dimension = 1 + + # Calculate the number of pyramid levels + self.num_pyramid_levels = math.ceil(np.log2(max(width_pixels, height_pixels) / 1024 * max_dimension)) + print("# Pyramid levels:", self.num_pyramid_levels) + return width_pixels, height_pixels + + def init_output(self, region): + width, height = self.calculate_output_dimensions(region) + self.output_shape = (self.num_t, self.num_c, self.num_z, height, width) + print(f"Output shape for region {region}: {self.output_shape}") + return da.zeros(self.output_shape, dtype=self.dtype, chunks=self.chunks) + + def get_flatfields(self, progress_callback=None): + def process_images(images, channel_name): + if images.size == 0: + print(f"WARNING: No images found for channel {channel_name}") + return + + if images.ndim != 3 and images.ndim != 4: + raise ValueError(f"Images must be 3 or 4-dimensional array, with dimension of (T, Y, X) or (T, Z, Y, X). Got shape {images.shape}") + + basic = BaSiC(get_darkfield=False, smoothness_flatfield=1) + basic.fit(images) + channel_index = self.mono_channel_names.index(channel_name) + self.flatfields[channel_index] = basic.flatfield + if progress_callback: + progress_callback(channel_index + 1, self.num_c) + + for channel in self.channel_names: + print(f"Calculating {channel} flatfield...") + images = [] + for t in self.time_points: + time_images = [dask_imread(tile['filepath'])[0] for key, tile in self.stitching_data.items() if tile['channel'] == channel and key[0] == int(t)] + if not time_images: + print(f"WARNING: No images found for channel {channel} at timepoint {t}") + continue + random.shuffle(time_images) + selected_tiles = time_images[:min(32, len(time_images))] + images.extend(selected_tiles) + + if not images: + print(f"WARNING: No images found for channel {channel} across all timepoints") + continue + + images = np.array(images) + + if images.ndim == 3: + # Images are in the shape (N, Y, X) + process_images(images, channel) + elif images.ndim == 4: + if images.shape[-1] == 3: + # Images are in the shape (N, Y, X, 3) for RGB images + images_r = images[..., 0] + images_g = images[..., 1] + images_b = images[..., 2] + channel = channel.split('_')[0] + process_images(images_r, channel + '_R') + process_images(images_g, channel + '_G') + process_images(images_b, channel + '_B') + else: + # Images are in the shape (N, Z, Y, X) + process_images(images, channel) + else: + raise ValueError(f"Unexpected number of dimensions in images array: {images.ndim}") + + def calculate_shifts(self, region): + region_data = [v for k, v in self.stitching_data.items() if k[1] == region] + + # Get unique x and y positions + x_positions = sorted(set(tile['x'] for tile in region_data)) + y_positions = sorted(set(tile['y'] for tile in region_data)) + + # Initialize shifts + self.h_shift = (0, 0) + self.v_shift = (0, 0) + + # Set registration channel if not already set + if not self.registration_channel: + self.registration_channel = self.channel_names[0] + elif self.registration_channel not in self.channel_names: + print(f"Warning: Specified registration channel '{self.registration_channel}' not found. Using {self.channel_names[0]}.") + self.registration_channel = self.channel_names[0] + + + max_x_overlap = round(self.input_width * self.overlap_percent / 2 / 100) + max_y_overlap = round(self.input_height * self.overlap_percent / 2 / 100) + print(f"Expected shifts - Horizontal: {(0, -max_x_overlap)}, Vertical: {(-max_y_overlap , 0)}") + + # Find center positions + center_x_index = (len(x_positions) - 1) // 2 + center_y_index = (len(y_positions) - 1) // 2 + + center_x = x_positions[center_x_index] + center_y = y_positions[center_y_index] + + right_x = None + bottom_y = None + + # Calculate horizontal shift + if center_x_index + 1 < len(x_positions): + right_x = x_positions[center_x_index + 1] + center_tile = self.get_tile(region, center_x, center_y, self.registration_channel, self.registration_z_level) + right_tile = self.get_tile(region, right_x, center_y, self.registration_channel, self.registration_z_level) + + if center_tile is not None and right_tile is not None: + self.h_shift = self.calculate_horizontal_shift(center_tile, right_tile, max_x_overlap) + else: + print(f"Warning: Missing tiles for horizontal shift calculation in region {region}.") + + # Calculate vertical shift + if center_y_index + 1 < len(y_positions): + bottom_y = y_positions[center_y_index + 1] + center_tile = self.get_tile(region, center_x, center_y, self.registration_channel, self.registration_z_level) + bottom_tile = self.get_tile(region, center_x, bottom_y, self.registration_channel, self.registration_z_level) + + if center_tile is not None and bottom_tile is not None: + self.v_shift = self.calculate_vertical_shift(center_tile, bottom_tile, max_y_overlap) + else: + print(f"Warning: Missing tiles for vertical shift calculation in region {region}.") + + if self.scan_pattern == 'S-Pattern' and right_x and bottom_y: + center_tile = self.get_tile(region, center_x, bottom_y, self.registration_channel, self.registration_z_level) + right_tile = self.get_tile(region, right_x, bottom_y, self.registration_channel, self.registration_z_level) + + if center_tile is not None and right_tile is not None: + self.h_shift_rev = self.calculate_horizontal_shift(center_tile, right_tile, max_x_overlap) + self.h_shift_rev_odd = center_y_index % 2 == 0 + print(f"Bi-Directional Horizontal Shift - Reverse Horizontal: {self.h_shift_rev}") + else: + print(f"Warning: Missing tiles for reverse horizontal shift calculation in region {region}.") + + print(f"Calculated Uni-Directional Shifts - Horizontal: {self.h_shift}, Vertical: {self.v_shift}") + + + def calculate_horizontal_shift(self, img1, img2, max_overlap): + img1 = self.normalize_image(img1) + img2 = self.normalize_image(img2) + + margin = int(img1.shape[0] * 0.2) # 20% margin + img1_overlap = img1[margin:-margin, -max_overlap:] + img2_overlap = img2[margin:-margin, :max_overlap] + + self.visualize_image(img1_overlap, img2_overlap, 'horizontal') + + shift, error, diffphase = phase_cross_correlation(img1_overlap, img2_overlap, upsample_factor=10) + return round(shift[0]), round(shift[1] - img1_overlap.shape[1]) + + def calculate_vertical_shift(self, img1, img2, max_overlap): + img1 = self.normalize_image(img1) + img2 = self.normalize_image(img2) + + margin = int(img1.shape[1] * 0.2) # 20% margin + img1_overlap = img1[-max_overlap:, margin:-margin] + img2_overlap = img2[:max_overlap, margin:-margin] + + self.visualize_image(img1_overlap, img2_overlap, 'vertical') + + shift, error, diffphase = phase_cross_correlation(img1_overlap, img2_overlap, upsample_factor=10) + return round(shift[0] - img1_overlap.shape[0]), round(shift[1]) + + def get_tile(self, region, x, y, channel, z_level): + for key, value in self.stitching_data.items(): + if (key[1] == region and + value['x'] == x and + value['y'] == y and + value['channel'] == channel and + value['z_level'] == z_level): + try: + return dask_imread(value['filepath'])[0] + except FileNotFoundError: + print(f"Warning: Tile file not found: {value['filepath']}") + return None + print(f"Warning: No matching tile found for region {region}, x={x}, y={y}, channel={channel}, z={z_level}") + return None + + def normalize_image(self, img): + img_min, img_max = img.min(), img.max() + img_normalized = (img - img_min) / (img_max - img_min) + scale_factor = np.iinfo(self.dtype).max if np.issubdtype(self.dtype, np.integer) else 1 + return (img_normalized * scale_factor).astype(self.dtype) + + def visualize_image(self, img1, img2, title): + try: + # Ensure images are numpy arrays + img1 = np.asarray(img1) + img2 = np.asarray(img2) + + if title == 'horizontal': + combined_image = np.hstack((img1, img2)) + else: + combined_image = np.vstack((img1, img2)) + + # Convert to uint8 for saving as PNG + combined_image_uint8 = (combined_image / np.iinfo(self.dtype).max * 255).astype(np.uint8) + + cv2.imwrite(f"{self.input_folder}/{title}.png", combined_image_uint8) + + print(f"Saved {title}.png successfully") + except Exception as e: + print(f"Error in visualize_image: {e}") + + def stitch_and_save_region(self, region, progress_callback=None): + stitched_images = self.init_output(region) # sets self.x_positions, self.y_positions + region_data = {k: v for k, v in self.stitching_data.items() if k[1] == region} + total_tiles = len(region_data) + processed_tiles = 0 + + x_min = min(self.x_positions) + y_min = min(self.y_positions) + + for key, tile_info in region_data.items(): + t, _, fov, z_level, channel = key + tile = dask_imread(tile_info['filepath'])[0] + if self.use_registration: + self.col_index = self.x_positions.index(tile_info['x']) + self.row_index = self.y_positions.index(tile_info['y']) + + if self.scan_pattern == 'S-Pattern' and self.row_index % 2 == self.h_shift_rev_odd: + h_shift = self.h_shift_rev + else: + h_shift = self.h_shift + + # Initialize starting coordinates based on tile position and shift + x_pixel = int(self.col_index * (self.input_width + h_shift[1])) + y_pixel = int(self.row_index * (self.input_height + self.v_shift[0])) + + # Apply horizontal shift effect on y-coordinate + if h_shift[0] < 0: + y_pixel += int((len(self.x_positions) - 1 - self.col_index) * abs(h_shift[0])) # Fov moves up as cols go right + else: + y_pixel += int(self.col_index * h_shift[0]) # Fov moves down as cols go right + + # Apply vertical shift effect on x-coordinate + if self.v_shift[1] < 0: + x_pixel += int((len(self.y_positions) - 1 - self.row_index) * abs(self.v_shift[1])) # Fov moves left as rows go down + else: + x_pixel += int(self.row_index * self.v_shift[1]) # Fov moves right as rows go down + + else: + # Calculate base position + x_pixel = int((tile_info['x'] - x_min) * 1000 / self.pixel_size_um) + y_pixel = int((tile_info['y'] - y_min) * 1000 / self.pixel_size_um) + + self.place_tile(stitched_images, tile, x_pixel, y_pixel, z_level, channel, t) + + processed_tiles += 1 + if progress_callback: + progress_callback(processed_tiles, total_tiles) + + self.starting_saving.emit(False) + if len(self.regions) > 1: + self.save_region_to_hcs_ome_zarr(region, stitched_images) + else: + # self.save_as_ome_zarr(region, stitched_images) + self.save_region_to_ome_zarr(region, stitched_images) # bugs: when starting to save, main gui lags and disconnects + + def place_tile(self, stitched_images, tile, x_pixel, y_pixel, z_level, channel, t): + if len(tile.shape) == 2: + # Handle 2D grayscale image + channel_idx = self.mono_channel_names.index(channel) + self.place_single_channel_tile(stitched_images, tile, x_pixel, y_pixel, z_level, channel_idx, t) + + elif len(tile.shape) == 3: + if tile.shape[2] == 3: + # Handle RGB image + channel = channel.split('_')[0] + for i, color in enumerate(['R', 'G', 'B']): + channel_idx = self.mono_channel_names.index(f"{channel}_{color}") + self.place_single_channel_tile(stitched_images, tile[:,:,i], x_pixel, y_pixel, z_level, channel_idx, t) + elif tile.shape[0] == 1: + channel_idx = self.mono_channel_names.index(channel) + self.place_single_channel_tile(stitched_images, tile[0], x_pixel, y_pixel, z_level, channel_idx, t) + else: + raise ValueError(f"Unexpected tile shape: {tile.shape}") + + def place_single_channel_tile(self, stitched_images, tile, x_pixel, y_pixel, z_level, channel_idx, t): + if len(stitched_images.shape) != 5: + raise ValueError(f"Unexpected stitched_images shape: {stitched_images.shape}. Expected 5D array (t, c, z, y, x).") + + if self.apply_flatfield: + tile = self.apply_flatfield_correction(tile, channel_idx) + + if self.use_registration: + if self.scan_pattern == 'S-Pattern' and self.row_index % 2 == self.h_shift_rev_odd: + h_shift = self.h_shift_rev + else: + h_shift = self.h_shift + + # Determine crop for tile edges + top_crop = max(0, (-self.v_shift[0] // 2) - abs(h_shift[0]) // 2) if self.row_index > 0 else 0 # if y + bottom_crop = max(0, (-self.v_shift[0] // 2) - abs(h_shift[0]) // 2) if self.row_index < len(self.y_positions) - 1 else 0 + left_crop = max(0, (-h_shift[1] // 2) - abs(self.v_shift[1]) // 2) if self.col_index > 0 else 0 + right_crop = max(0, (-h_shift[1] // 2) - abs(self.v_shift[1]) // 2) if self.col_index < len(self.x_positions) - 1 else 0 + + # Apply cropping to the tile + tile = tile[top_crop:tile.shape[0]-bottom_crop, left_crop:tile.shape[1]-right_crop] + + # Adjust x_pixel and y_pixel based on cropping + x_pixel += left_crop + y_pixel += top_crop + + y_end = min(y_pixel + tile.shape[0], stitched_images.shape[3]) + x_end = min(x_pixel + tile.shape[1], stitched_images.shape[4]) + + try: + stitched_images[t, channel_idx, z_level, y_pixel:y_end, x_pixel:x_end] = tile[:y_end-y_pixel, :x_end-x_pixel] + except Exception as e: + print(f"ERROR: Failed to place tile. Details: {str(e)}") + print(f"DEBUG: t:{t}, channel_idx:{channel_idx}, z_level:{z_level}, y:{y_pixel}-{y_end}, x:{x_pixel}-{x_end}") + print(f"DEBUG: tile slice shape: {tile[:y_end-y_pixel, :x_end-x_pixel].shape}") + raise + + def apply_flatfield_correction(self, tile, channel_idx): + if channel_idx in self.flatfields: + return (tile / self.flatfields[channel_idx]).clip(min=np.iinfo(self.dtype).min, + max=np.iinfo(self.dtype).max).astype(self.dtype) + return tile + + def generate_pyramid(self, image, num_levels): + pyramid = [image] + for level in range(1, num_levels): + scale_factor = 2 ** level + factors = {0: 1, 1: 1, 2: 1, 3: scale_factor, 4: scale_factor} + if isinstance(image, da.Array): + downsampled = da.coarsen(np.mean, image, factors, trim_excess=True) + else: + block_size = (1, 1, 1, scale_factor, scale_factor) + downsampled = downscale_local_mean(image, block_size) + pyramid.append(downsampled) + return pyramid + + def save_region_to_hcs_ome_zarr(self, region, stitched_images): + output_path = os.path.join(self.input_folder, self.output_name) + store = ome_zarr.io.parse_url(output_path, mode="a").store + root = zarr.group(store=store) + + row, col = region[0], region[1:] + row_group = root.require_group(row) + well_group = row_group.require_group(col) + + if 'well' not in well_group.attrs: + well_metadata = { + "images": [{"path": "0", "acquisition": 0}], + } + ome_zarr.writer.write_well_metadata(well_group, well_metadata["images"]) + + image_group = well_group.require_group("0") + + pyramid = self.generate_pyramid(stitched_images, self.num_pyramid_levels) + coordinate_transformations = [ + [{ + "type": "scale", + "scale": [1, 1, self.acquisition_params.get("dz(um)", 1), self.pixel_size_um * (2 ** i), self.pixel_size_um * (2 ** i)] + }] for i in range(self.num_pyramid_levels) + ] + + axes = [ + {"name": "t", "type": "time", "unit": "second"}, + {"name": "c", "type": "channel"}, + {"name": "z", "type": "space", "unit": "micrometer"}, + {"name": "y", "type": "space", "unit": "micrometer"}, + {"name": "x", "type": "space", "unit": "micrometer"} + ] + + # Prepare channels metadata + omero_channels = [{ + "label": name, + "color": f"{color:06X}", + "window": {"start": 0, "end": np.iinfo(self.dtype).max, "min": 0, "max": np.iinfo(self.dtype).max} + } for name, color in zip(self.mono_channel_names, self.channel_colors)] + + omero = { + "name": f"{region}", + "version": "0.4", + "channels": omero_channels + } + + image_group.attrs["omero"] = omero + + # Write the multiscale image data and metadata + ome_zarr.writer.write_multiscale( + pyramid=pyramid, + group=image_group, + chunks=self.chunks, + axes=axes, + coordinate_transformations=coordinate_transformations, + storage_options=dict(chunks=self.chunks), + name=f"{region}" + ) + + def save_as_ome_zarr(self, region, stitched_images): + output_path = os.path.join(self.input_folder, self.output_name) + dz_um = self.acquisition_params.get("dz(um)", None) + sensor_pixel_size_um = self.acquisition_params.get("sensor_pixel_size_um", None) + channel_minmax = [(np.iinfo(self.dtype).min, np.iinfo(self.dtype).max)] * self.num_c + for i in range(self.num_c): + print(f"Channel {i}:", self.mono_channel_names[i], " \tColor:", self.channel_colors[i], " \tPixel Range:", channel_minmax[i]) + + zarr_writer = OmeZarrWriter(output_path) + zarr_writer.build_ome( + size_z=self.num_z, + image_name=region, + channel_names=self.mono_channel_names, + channel_colors=self.channel_colors, + channel_minmax=channel_minmax + ) + zarr_writer.write_image( + image_data=stitched_images, + image_name=region, + physical_pixel_sizes=types.PhysicalPixelSizes(dz_um, self.pixel_size_um, self.pixel_size_um), + channel_names=self.mono_channel_names, + channel_colors=self.channel_colors, + dimension_order="TCZYX", + scale_num_levels=self.num_pyramid_levels, + chunk_dims=self.chunks + ) + + def save_region_to_ome_zarr(self, region, stitched_images): + output_path = os.path.join(self.input_folder, self.output_name) + store = ome_zarr.io.parse_url(output_path, mode="a").store + root = zarr.group(store=store) + + # Generate the pyramid + pyramid = self.generate_pyramid(stitched_images, self.num_pyramid_levels) + + datasets = [] + for i in range(self.num_pyramid_levels): + scale = 2**i + datasets.append({ + "path": str(i), + "coordinateTransformations": [{ + "type": "scale", + "scale": [1, 1, self.acquisition_params.get("dz(um)", 1), self.pixel_size_um * scale, self.pixel_size_um * scale] + }] + }) + + axes = [ + {"name": "t", "type": "time", "unit": "second"}, + {"name": "c", "type": "channel"}, + {"name": "z", "type": "space", "unit": "micrometer"}, + {"name": "y", "type": "space", "unit": "micrometer"}, + {"name": "x", "type": "space", "unit": "micrometer"} + ] + + ome_zarr.writer.write_multiscales_metadata(root, datasets, axes=axes, name="stitched_image") + + omero = { + "name": "stitched_image", + "version": "0.4", + "channels": [{ + "label": name, + "color": f"{color:06X}", + "window": {"start": 0, "end": np.iinfo(self.dtype).max, "min": 0, "max": np.iinfo(self.dtype).max} + } for name, color in zip(self.mono_channel_names, self.channel_colors)] + } + root.attrs["omero"] = omero + + coordinate_transformations = [ + dataset["coordinateTransformations"] for dataset in datasets + ] + + ome_zarr.writer.write_multiscale( + pyramid=pyramid, + group=root, + axes="tczyx", + coordinate_transformations=coordinate_transformations, + storage_options=dict(chunks=self.chunks) + ) + + def write_stitched_plate_metadata(self): + output_path = os.path.join(self.input_folder, self.output_name) + store = ome_zarr.io.parse_url(output_path, mode="a").store + root = zarr.group(store=store) + + rows, columns = self.get_rows_and_columns() + well_paths = [f"{well_id[0]}/{well_id[1:]}" for well_id in sorted(self.regions)] + + plate_metadata = { + "name": "Stitched Plate", + "rows": [{"name": row} for row in rows], + "columns": [{"name": col} for col in columns], + "wells": [{"path": path, "rowIndex": rows.index(path[0]), "columnIndex": columns.index(path[2:])} + for path in well_paths], + "field_count": 1, + "acquisitions": [{ + "id": 0, + "maximumfieldcount": 1, + "name": "Stitched Acquisition" + }] + } + + ome_zarr.writer.write_plate_metadata( + root, + rows=[row["name"] for row in plate_metadata["rows"]], + columns=[col["name"] for col in plate_metadata["columns"]], + wells=plate_metadata["wells"], + acquisitions=plate_metadata["acquisitions"], + name=plate_metadata["name"], + field_count=plate_metadata["field_count"] + ) + + def get_rows_and_columns(self): + rows = sorted(set(region[0] for region in self.regions)) + columns = sorted(set(region[1:] for region in self.regions)) + return rows, columns + + def create_ome_tiff(self, stitched_images): + output_path = os.path.join(self.input_folder, self.output_name) + + with TiffWriter(output_path, bigtiff=True, ome=True) as tif: + tif.write( + data=stitched_images, + shape=stitched_images.shape, + dtype=self.dtype, + photometric='minisblack', + planarconfig='separate', + metadata={ + 'axes': 'TCZYX', + 'Channel': {'Name': self.mono_channel_names}, + 'SignificantBits': stitched_images.dtype.itemsize * 8, + 'Pixels': { + 'PhysicalSizeX': self.pixel_size_um, + 'PhysicalSizeXUnit': 'µm', + 'PhysicalSizeY': self.pixel_size_um, + 'PhysicalSizeYUnit': 'µm', + 'PhysicalSizeZ': self.acquisition_params.get("dz(um)", 1.0), + 'PhysicalSizeZUnit': 'µm', + }, + } + ) + + print(f"Data saved in OME-TIFF format at: {output_path}") + self.finished_saving.emit(output_path, self.dtype) + + + def run(self): + stime = time.time() + # try: + self.get_time_points() + self.parse_filenames() + + if self.apply_flatfield: + print("Calculating flatfields...") + self.getting_flatfields.emit() + self.get_flatfields(progress_callback=self.update_progress.emit) + print("time to apply flatfields", time.time() - stime) + + if self.num_fovs_per_region > 1: + self.run_regions() + else: + self.run_fovs() # only displays one fov per region even though all fovs are saved in zarr with metadata + + # except Exception as e: + # print("time before error", time.time() - stime) + # print(f"Error while stitching: {e}") + # raise + + + def run_regions(self): + stime = time.time() + if len(self.regions) > 1: + self.write_stitched_plate_metadata() + + if self.use_registration: + print(f"\nCalculating shifts for region {self.regions[0]}...") + self.calculate_shifts(self.regions[0]) + + for region in self.regions: + wtime = time.time() + + # if self.use_registration: + # print(f"\nCalculating shifts for region {region}...") + # self.calculate_shifts(region) + + self.starting_stitching.emit() + print(f"\nstarting stitching for region {region}...") + self.stitch_and_save_region(region, progress_callback=self.update_progress.emit) + + sttime = time.time() + print(f"time to stitch and save region {region}", time.time() - wtime) + print(f"...done with region:{region}") + + if self.output_format.endswith('.ome.tiff'): + self.create_ome_tiff(self.stitched_images) + else: + output_path = os.path.join(self.input_folder, self.output_name) + print(f"Data saved in OME-ZARR format at: {output_path}") + self.print_zarr_structure(output_path) + + self.finished_saving.emit(os.path.join(self.input_folder, self.output_name), self.dtype) + print("total time to stitch + save:", time.time() - stime) + + +#________________________________________________________________________________________________________________________________ +# run_fovs: directly save fovs to final hcs ome zarr +# +# issue: +# only shows one fov per region when there are multiple fovs +# - (fix metadata? translation, scale, path, multiscale?) +# correct channels in napari, well + plate metadata, z-stack shape, time-point shape + + def run_fovs(self): + stime = time.time() + self.starting_stitching.emit() + + output_path = os.path.join(self.input_folder, self.output_name) + store = ome_zarr.io.parse_url(output_path, mode="a").store + root = zarr.group(store=store) + + self.write_fov_plate_metadata(root) + + total_fovs = sum(len(set([k[2] for k in self.stitching_data.keys() if k[1] == region])) for region in self.regions) + processed_fovs = 0 + + for region in self.regions: + region_data = {k: v for k, v in self.stitching_data.items() if k[1] == region} + well_group = self.write_fov_well_metadata(root, region) + + for fov_idx in range(self.num_fovs_per_region): + fov_data = {k: v for k, v in region_data.items() if k[2] == fov_idx} + + if not fov_data: + continue # Skip if no data for this FOV index + + tcz_fov = self.compile_single_fov_data(fov_data) + self.write_fov_to_zarr(well_group, tcz_fov, fov_idx, fov_data) + processed_fovs += 1 + self.update_progress.emit(processed_fovs, total_fovs) + + omero_channels = [{ + "label": name, + "color": f"{color:06X}", + "window": {"start": 0, "end": np.iinfo(self.dtype).max, "min": 0, "max": np.iinfo(self.dtype).max} + } for name, color in zip(self.mono_channel_names, self.channel_colors)] + + omero = { + "name": "hcs-acquisition", + "version": "0.4", + "channels": omero_channels + } + + root.attrs["omero"] = omero + + print(f"Data saved in OME-ZARR format at: {output_path}") + self.print_zarr_structure(output_path) + self.finished_saving.emit(output_path, self.dtype) + + print("total time to save FOVs:", time.time() - stime) + + def compile_single_fov_data(self, fov_data): + # Initialize a 5D array to hold all the data for this FOV + tcz_fov = np.zeros((self.num_t, self.num_c, self.num_z, self.input_height, self.input_width), dtype=self.dtype) + + for key, scan_info in fov_data.items(): + t, _, _, z_level, channel = key + image = dask_imread(scan_info['filepath'])[0] + + if self.apply_flatfield: + channel_idx = self.mono_channel_names.index(channel) + image = self.apply_flatfield_correction(image, channel_idx) + + if len(image.shape) == 3 and image.shape[2] == 3: # RGB image + channel = channel.split('_')[0] + for i, color in enumerate(['R', 'G', 'B']): + c_idx = self.mono_channel_names.index(f"{channel}_{color}") + tcz_fov[t, c_idx, z_level] = image[:, :, i] + else: # Grayscale image + c_idx = self.mono_channel_names.index(channel) + tcz_fov[t, c_idx, z_level] = image + + return da.from_array(tcz_fov, chunks=self.chunks) + + def write_fov_plate_metadata(self, root): + rows, columns = self.get_rows_and_columns() + well_paths = [f"{well_id[0]}/{well_id[1:]}" for well_id in sorted(self.regions)] + + plate_metadata = { + "name": "Sample", + "rows": [{"name": row} for row in rows], + "columns": [{"name": col} for col in columns], + "wells": [{"path": path, "rowIndex": rows.index(path[0]), "columnIndex": columns.index(path[2:])} + for path in well_paths], + "field_count": self.num_fovs_per_region * len(self.regions), + "acquisitions": [{ + "id": 0, + "maximumfieldcount": self.num_fovs_per_region, + "name": "Multipoint Acquisition" + }] + } + + ome_zarr.writer.write_plate_metadata( + root, + rows=[row["name"] for row in plate_metadata["rows"]], + columns=[col["name"] for col in plate_metadata["columns"]], + wells=plate_metadata["wells"], + acquisitions=plate_metadata["acquisitions"], + name=plate_metadata["name"], + field_count=plate_metadata["field_count"] + ) + + def write_fov_well_metadata(self, root, region): + row, col = region[0], region[1:] + row_group = root.require_group(row) + well_group = row_group.require_group(col) + + if 'well' not in well_group.attrs: + well_metadata = { + "images": [{"path": str(fov_idx), "acquisition": 0} for fov_idx in range(self.num_fovs_per_region)] + } + ome_zarr.writer.write_well_metadata(well_group, well_metadata["images"]) + return well_group + + def write_fov_to_zarr(self, well_group, tcz_fov, fov_idx, fov_data): + axes = [ + {"name": "t", "type": "time", "unit": "second"}, + {"name": "c", "type": "channel"}, + {"name": "z", "type": "space", "unit": "micrometer"}, + {"name": "y", "type": "space", "unit": "micrometer"}, + {"name": "x", "type": "space", "unit": "micrometer"} + ] + + # Generate pyramid levels + pyramid = self.generate_pyramid(tcz_fov, self.num_pyramid_levels) + + # Get the position of the FOV (use the first scan in fov_data) + first_scan = next(iter(fov_data.values())) + x_mm, y_mm = first_scan['x'], first_scan['y'] + + # Get the z positions + z_positions = sorted(set(scan_info['z'] for scan_info in fov_data.values())) + z_min = min(z_positions) + dz = self.acquisition_params.get("dz(um)", 1.0) + + # Create coordinate transformations for each pyramid level + coordinate_transformations = [] + for level in range(len(pyramid)): + scale_factor = 2 ** level + coordinate_transformations.append([ + { + "type": "scale", + "scale": [1, 1, dz, self.pixel_size_um * scale_factor, self.pixel_size_um * scale_factor] + }, + { + "type": "translation", + "translation": [0, 0, z_min, y_mm*1000, x_mm*1000] + } + ]) + + image_group = well_group.require_group(str(fov_idx)) + + # Prepare datasets for multiscales metadata + datasets = [ + { + "path": str(i), + "coordinateTransformations": coord_trans + } for i, coord_trans in enumerate(coordinate_transformations) + ] + + # Write multiscales metadata + ome_zarr.writer.write_multiscales_metadata( + group=image_group, + datasets=datasets, + axes=axes, + name=f"FOV_{fov_idx}" # This will be passed as part of **metadata + ) + + # Write the actual data + ome_zarr.writer.write_multiscale( + pyramid=pyramid, + group=image_group, + axes=axes, + coordinate_transformations=coordinate_transformations, + storage_options=dict(chunks=self.chunks), + ) + + # Add OMERO metadata + omero_channels = [{ + "label": name, + "color": f"{color:06X}", + "window": {"start": 0, "end": np.iinfo(self.dtype).max, "min": 0, "max": np.iinfo(self.dtype).max} + } for name, color in zip(self.mono_channel_names, self.channel_colors)] + + omero = { + "name": f"FOV_{fov_idx}", + "version": "0.4", + "channels": omero_channels + } + + image_group.attrs["omero"] = omero + + def print_zarr_structure(self, path, indent=""): + root = zarr.open(path, mode='r') + print(f"Zarr Tree and Metadata for: {path}") + print(root.tree()) + print(dict(root.attrs)) \ No newline at end of file diff --git a/software/control/toupcam.py b/software/control/toupcam.py index 0d727113c..6b20dba60 100644 --- a/software/control/toupcam.py +++ b/software/control/toupcam.py @@ -1,4 +1,4 @@ -"""Version: 54.23714.20231029 +"""Version: 57.26813.20241028 We use ctypes to call into the toupcam.dll/libtoupcam.so/libtoupcam.dylib API, the python class Toupcam is a thin wrapper class to the native api of toupcam.dll/libtoupcam.so/libtoupcam.dylib. So the manual en.html(English) and hans.html(Simplified Chinese) are also applicable for programming with toupcam.py. @@ -64,6 +64,9 @@ TOUPCAM_FLAG_CAMERALINK = 0x0008000000000000 # camera link TOUPCAM_FLAG_CXP = 0x0010000000000000 # CXP: CoaXPress TOUPCAM_FLAG_RAW12PACK = 0x0020000000000000 # pixel format, RAW 12bits packed +TOUPCAM_FLAG_SELFTRIGGER = 0x0040000000000000 # self trigger +TOUPCAM_FLAG_RAW11 = 0x0080000000000000 # pixel format, RAW 11bits +TOUPCAM_FLAG_GHOPTO = 0x0100000000000000 # ghopto sensor TOUPCAM_EVENT_EXPOSURE = 0x0001 # exposure time or gain changed TOUPCAM_EVENT_TEMPTINT = 0x0002 # white balance changed, Temp/Tint mode @@ -79,10 +82,10 @@ TOUPCAM_EVENT_LEVELRANGE = 0x000c # level range changed TOUPCAM_EVENT_AUTOEXPO_CONV = 0x000d # auto exposure convergence TOUPCAM_EVENT_AUTOEXPO_CONVFAIL = 0x000e # auto exposure once mode convergence failed +TOUPCAM_EVENT_FPNC = 0x000f # fix pattern noise correction status changed TOUPCAM_EVENT_ERROR = 0x0080 # generic error TOUPCAM_EVENT_DISCONNECTED = 0x0081 # camera disconnected TOUPCAM_EVENT_NOFRAMETIMEOUT = 0x0082 # no frame timeout error -TOUPCAM_EVENT_AFFEEDBACK = 0x0083 # auto focus information feedback TOUPCAM_EVENT_FOCUSPOS = 0x0084 # focus positon TOUPCAM_EVENT_NOPACKETTIMEOUT = 0x0085 # no packet timeout TOUPCAM_EVENT_EXPO_START = 0x4000 # hardware event: exposure start @@ -100,7 +103,7 @@ TOUPCAM_OPTION_RAW = 0x04 # raw data mode, read the sensor "raw" data. This can be set only while camea is NOT running. 0 = rgb, 1 = raw, default value: 0 TOUPCAM_OPTION_HISTOGRAM = 0x05 # 0 = only one, 1 = continue mode TOUPCAM_OPTION_BITDEPTH = 0x06 # 0 = 8 bits mode, 1 = 16 bits mode -TOUPCAM_OPTION_FAN = 0x07 # 0 = turn off the cooling fan, [1, max] = fan speed +TOUPCAM_OPTION_FAN = 0x07 # 0 = turn off the cooling fan, [1, max] = fan speed, set to "-1" means to use default fan speed TOUPCAM_OPTION_TEC = 0x08 # 0 = turn off the thermoelectric cooler, 1 = turn on the thermoelectric cooler TOUPCAM_OPTION_LINEAR = 0x09 # 0 = turn off the builtin linear tone mapping, 1 = turn on the builtin linear tone mapping, default value: 1 TOUPCAM_OPTION_CURVE = 0x0a # 0 = turn off the builtin curve tone mapping, 1 = turn on the builtin polynomial curve tone mapping, 2 = logarithmic curve tone mapping, default value: 2 @@ -108,16 +111,18 @@ TOUPCAM_OPTION_RGB = 0x0c # 0 => RGB24; 1 => enable RGB48 format when bitdepth > 8; 2 => RGB32; 3 => 8 Bits Grey (only for mono camera); 4 => 16 Bits Grey (only for mono camera when bitdepth > 8); 5 => 64(RGB64) TOUPCAM_OPTION_COLORMATIX = 0x0d # enable or disable the builtin color matrix, default value: 1 TOUPCAM_OPTION_WBGAIN = 0x0e # enable or disable the builtin white balance gain, default value: 1 -TOUPCAM_OPTION_TECTARGET = 0x0f # get or set the target temperature of the thermoelectric cooler, in 0.1 degree Celsius. For example, 125 means 12.5 degree Celsius, -35 means -3.5 degree Celsius +TOUPCAM_OPTION_TECTARGET = 0x0f # get or set the target temperature of the thermoelectric cooler, in 0.1 degree Celsius. For example, 125 means 12.5 degree Celsius, -35 means -3.5 degree Celsius. Set "-2730" or below means using the default for that model TOUPCAM_OPTION_AUTOEXP_POLICY = 0x10 # auto exposure policy: - # 0: Exposure Only - # 1: Exposure Preferred - # 2: Gain Only - # 3: Gain Preferred - # default value: 1 + # 0: Exposure Only + # 1: Exposure Preferred + # 2: Gain Only + # 3: Gain Preferred + # default value: 1 # -TOUPCAM_OPTION_FRAMERATE = 0x11 # limit the frame rate, range=[0, 63], the default value 0 means no limit -TOUPCAM_OPTION_DEMOSAIC = 0x12 # demosaic method for both video and still image: BILINEAR = 0, VNG(Variable Number of Gradients) = 1, PPG(Patterned Pixel Grouping) = 2, AHD(Adaptive Homogeneity Directed) = 3, EA(Edge Aware) = 4, see https://en.wikipedia.org/wiki/Demosaicing, default value: 0 +TOUPCAM_OPTION_FRAMERATE = 0x11 # limit the frame rate, the default value 0 means no limit +TOUPCAM_OPTION_DEMOSAIC = 0x12 # demosaic method for both video and still image: BILINEAR = 0, VNG(Variable Number of Gradients) = 1, PPG(Patterned Pixel Grouping) = 2, AHD(Adaptive Homogeneity Directed) = 3, EA(Edge Aware) = 4, see https://en.wikipedia.org/wiki/Demosaicing + # In terms of CPU usage, EA is the lowest, followed by BILINEAR, and the others are higher. + # default value: 0 TOUPCAM_OPTION_DEMOSAIC_VIDEO = 0x13 # demosaic method for video TOUPCAM_OPTION_DEMOSAIC_STILL = 0x14 # demosaic method for still image TOUPCAM_OPTION_BLACKLEVEL = 0x15 # black level @@ -130,18 +135,23 @@ # The final image size is rounded down to an even number, such as 640/3 to get 212 # TOUPCAM_OPTION_ROTATE = 0x18 # rotate clockwise: 0, 90, 180, 270 -TOUPCAM_OPTION_CG = 0x19 # Conversion Gain mode: 0 = LCG, 1 = HCG, 2 = HDR +TOUPCAM_OPTION_CG = 0x19 # Conversion Gain: + # 0 = LCG + # 1 = HCG + # 2 = HDR (for camera with flag TOUPCAM_FLAG_CGHDR) + # 2 = MCG (for camera with flag TOUPCAM_FLAG_GHOPTO) + # TOUPCAM_OPTION_PIXEL_FORMAT = 0x1a # pixel format TOUPCAM_OPTION_FFC = 0x1b # flat field correction - # set: - # 0: disable - # 1: enable - # -1: reset - # (0xff000000 | n): set the average number to n, [1~255] - # get: - # (val & 0xff): 0 => disable, 1 => enable, 2 => inited - # ((val & 0xff00) >> 8): sequence - # ((val & 0xff0000) >> 16): average number + # set: + # 0: disable + # 1: enable + # -1: reset + # (0xff000000 | n): set the average number to n, [1~255] + # get: + # (val & 0xff): 0 => disable, 1 => enable, 2 => inited + # ((val & 0xff00) >> 8): sequence + # ((val & 0xff0000) >> 16): average number # TOUPCAM_OPTION_DDR_DEPTH = 0x1c # the number of the frames that DDR can cache # 1: DDR cache only one frame @@ -176,9 +186,8 @@ # default: 1 (win), 0 (linux/macos) # TOUPCAM_OPTION_FOCUSPOS = 0x24 # focus positon -TOUPCAM_OPTION_AFMODE = 0x25 # auto focus mode (0:manul focus; 1:auto focus; 2:once focus; 3:conjugate calibration) -TOUPCAM_OPTION_AFZONE = 0x26 # auto focus zone -TOUPCAM_OPTION_AFFEEDBACK = 0x27 # auto focus information feedback; 0:unknown; 1:focused; 2:focusing; 3:defocus; 4:up; 5:down +TOUPCAM_OPTION_AFMODE = 0x25 # auto focus mode, see ToupcamAFMode +TOUPCAM_OPTION_AFSTATUS = 0x27 # auto focus status, see ToupcamAFStaus TOUPCAM_OPTION_TESTPATTERN = 0x28 # test pattern: # 0: off # 3: monochrome diagonal stripes @@ -190,10 +199,10 @@ TOUPCAM_OPTION_BYTEORDER = 0x2a # Byte order, BGR or RGB: 0 => RGB, 1 => BGR, default value: 1(Win), 0(macOS, Linux, Android) TOUPCAM_OPTION_NOPACKET_TIMEOUT = 0x2b # no packet timeout: 0 => disable, positive value (>= NOPACKET_TIMEOUT_MIN) => timeout milliseconds. default: disable TOUPCAM_OPTION_MAX_PRECISE_FRAMERATE = 0x2c # get the precise frame rate maximum value in 0.1 fps, such as 115 means 11.5 fps. E_NOTIMPL means not supported -TOUPCAM_OPTION_PRECISE_FRAMERATE = 0x2d # precise frame rate current value in 0.1 fps, range:[1~maximum] +TOUPCAM_OPTION_PRECISE_FRAMERATE = 0x2d # precise frame rate current value in 0.1 fps. use TOUPCAM_OPTION_MAX_PRECISE_FRAMERATE, TOUPCAM_OPTION_MIN_PRECISE_FRAMERATE to get the range. if the set value is out of range, E_INVALIDARG will be returned TOUPCAM_OPTION_BANDWIDTH = 0x2e # bandwidth, [1-100]% TOUPCAM_OPTION_RELOAD = 0x2f # reload the last frame in trigger mode -TOUPCAM_OPTION_CALLBACK_THREAD = 0x30 # dedicated thread for callback +TOUPCAM_OPTION_CALLBACK_THREAD = 0x30 # dedicated thread for callback: 0 => disable, 1 => enable, default: 0 TOUPCAM_OPTION_FRONTEND_DEQUE_LENGTH = 0x31 # frontend (raw) frame buffer deque length, range: [2, 1024], default: 4 # All the memory will be pre-allocated when the camera starts, so, please attention to memory usage # @@ -254,7 +263,7 @@ # 1~99: peak percent average # 0 or 100: full roi average, means "disabled" # -TOUPCAM_OPTION_ANTI_SHUTTER_EFFECT = 0x4b # anti shutter effect: 1 => disable, 0 => disable; default: 1 +TOUPCAM_OPTION_ANTI_SHUTTER_EFFECT = 0x4b # anti shutter effect: 1 => disable, 0 => disable; default: 0 TOUPCAM_OPTION_CHAMBER_HT = 0x4c # get chamber humidity & temperature: # high 16 bits: humidity, in 0.1%, such as: 325 means humidity is 32.5% # low 16 bits: temperature, in 0.1 degrees Celsius, such as: 32 means 3.2 degrees Celsius @@ -267,23 +276,23 @@ TOUPCAM_OPTION_LINE_PRE_DELAY = 0x52 # specified line signal pre-delay, microsecond TOUPCAM_OPTION_LINE_POST_DELAY = 0x53 # specified line signal post-delay, microsecond TOUPCAM_OPTION_TEC_VOLTAGE_MAX_RANGE = 0x54 # get the tec maximum voltage range: - # high 16 bits: max - # low 16 bits: min + # high 16 bits: max + # low 16 bits: min TOUPCAM_OPTION_HIGH_FULLWELL = 0x55 # high fullwell capacity: 0 => disable, 1 => enable TOUPCAM_OPTION_DYNAMIC_DEFECT = 0x56 # dynamic defect pixel correction: - # threshold, t1: high 16 bits: [10, 100], means: [1.0, 10.0] - # value, t2: low 16 bits: [0, 100], means: [0.00, 1.00] + # dead pixel ratio, t1: high 16 bits: [0, 100], means: [0.0, 1.0] + # hot pixel ratio, t2: low 16 bits: [0, 100], means: [0.0, 1.0] TOUPCAM_OPTION_HDR_KB = 0x57 # HDR synthesize - # K (high 16 bits): [1, 25500] - # B (low 16 bits): [0, 65535] - # 0xffffffff => set to default + # K (high 16 bits): [1, 25500] + # B (low 16 bits): [0, 65535] + # 0xffffffff => set to default TOUPCAM_OPTION_HDR_THRESHOLD = 0x58 # HDR synthesize - # threshold: [1, 4094] - # 0xffffffff => set to default + # threshold: [1, 4094] + # 0xffffffff => set to default TOUPCAM_OPTION_GIGETIMEOUT = 0x5a # For GigE cameras, the application periodically sends heartbeat signals to the camera to keep the connection to the camera alive. # If the camera doesn't receive heartbeat signals within the time period specified by the heartbeat timeout counter, the camera resets the connection. - # When the application is stopped by the debugger, the application cannot create the heartbeat signals - # 0 => auto: when the camera is opened, disable if debugger is present or enable if no debugger is present + # When the application is stopped by the debugger, the application cannot send the heartbeat signals + # 0 => auto: when the camera is opened, enable if no debugger is present or disable if debugger is present # 1 => enable # 2 => disable # default: auto @@ -291,21 +300,79 @@ TOUPCAM_OPTION_OVERCLOCK_MAX = 0x5c # get overclock range: [0, max] TOUPCAM_OPTION_OVERCLOCK = 0x5d # overclock, default: 0 TOUPCAM_OPTION_RESET_SENSOR = 0x5e # reset sensor - -TOUPCAM_OPTION_ADC = 0x08000000 # Analog-Digital Conversion: - # get: - # (option | 'C'): get the current value - # (option | 'N'): get the supported ADC number - # (option | n): get the nth supported ADC value, such as 11bits, 12bits, etc; the first value is the default - # set: val = ADC value, such as 11bits, 12bits, etc TOUPCAM_OPTION_ISP = 0x5f # Enable hardware ISP: 0 => auto (disable in RAW mode, otherwise enable), 1 => enable, -1 => disable; default: 0 -TOUPCAM_OPTION_AUTOEXP_EXPOTIME_STEP = 0x60 # Auto exposure: time step (thousandths) -TOUPCAM_OPTION_AUTOEXP_GAIN_STEP = 0x61 # Auto exposure: gain step (thousandths) +TOUPCAM_OPTION_AUTOEXP_EXPOTIME_DAMP = 0x60 # Auto exposure damp: damping coefficient (thousandths). The larger the damping coefficient, the smoother and slower the exposure time changes +TOUPCAM_OPTION_AUTOEXP_GAIN_DAMP = 0x61 # Auto exposure damp: damping coefficient (thousandths). The larger the damping coefficient, the smoother and slower the gain changes TOUPCAM_OPTION_MOTOR_NUMBER = 0x62 # range: [1, 20] TOUPCAM_OPTION_MOTOR_POS = 0x10000000 # range: [1, 702] TOUPCAM_OPTION_PSEUDO_COLOR_START = 0x63 # Pseudo: start color, BGR format TOUPCAM_OPTION_PSEUDO_COLOR_END = 0x64 # Pseudo: end color, BGR format -TOUPCAM_OPTION_PSEUDO_COLOR_ENABLE = 0x65 # Pseudo: 1 => enable, 0 => disable +TOUPCAM_OPTION_PSEUDO_COLOR_ENABLE = 0x65 # Pseudo: -1 => custom: use startcolor & endcolor to generate the colormap + # 0 => disable + # 1 => spot + # 2 => spring + # 3 => summer + # 4 => autumn + # 5 => winter + # 6 => bone + # 7 => jet + # 8 => rainbow + # 9 => deepgreen + # 10 => ocean + # 11 => cool + # 12 => hsv + # 13 => pink + # 14 => hot + # 15 => parula + # 16 => magma + # 17 => inferno + # 18 => plasma + # 19 => viridis + # 20 => cividis + # 21 => twilight + # 22 => twilight_shifted + # 23 => turbo + # 24 => red + # 25 => green + # 26 => blue + # +TOUPCAM_OPTION_LOW_POWERCONSUMPTION = 0x66 # Low Power Consumption: 0 => disable, 1 => enable +TOUPCAM_OPTION_FPNC = 0x67 # Fix Pattern Noise Correction + # set: + # 0: disable + # 1: enable + # -1: reset + # (0xff000000 | n): set the average number to n, [1~255] + # get: + # (val & 0xff): 0 => disable, 1 => enable, 2 => inited + # ((val & 0xff00) >> 8): sequence + # ((val & 0xff0000) >> 16): average number + # +TOUPCAM_OPTION_OVEREXP_POLICY = 0x68 # Auto exposure over exposure policy: when overexposed, + # 0 => directly reduce the exposure time/gain to the minimum value; or + # 1 => reduce exposure time/gain in proportion to current and target brightness. + # n(n>1) => first adjust the exposure time to (maximum automatic exposure time * maximum automatic exposure gain) * n / 1000, and then adjust according to the strategy of 1 + # The advantage of policy 0 is that the convergence speed is faster, but there is black screen. + # Policy 1 avoids the black screen, but the convergence speed is slower. + # Default: 0 +TOUPCAM_OPTION_READOUT_MODE = 0x69 # Readout mode: 0 = IWR (Integrate While Read), 1 = ITR (Integrate Then Read) + # The working modes of the detector readout circuit can be divided into two types: ITR and IWR. Using the IWR readout mode can greatly increase the frame rate. In the ITR mode, the integration of the (n+1)th frame starts after all the data of the nth frame are read out, while in the IWR mode, the data of the nth frame is read out at the same time when the (n+1)th frame is integrated +TOUPCAM_OPTION_TAILLIGHT = 0x6a # Turn on/off tail Led light: 0 => off, 1 => on; default: on +TOUPCAM_OPTION_LENSSTATE = 0x6b # Load/Save lens state to EEPROM: 0 => load, 1 => save +TOUPCAM_OPTION_AWB_CONTINUOUS = 0x6c # Auto White Balance: continuous mode + # 0: disable (default) + # n>0: every n millisecond(s) + # n<0: every -n frame +TOUPCAM_OPTION_TECTARGET_RANGE = 0x6d # TEC target range: min(low 16 bits) = (short)(val & 0xffff), max(high 16 bits) = (short)((val >> 16) & 0xffff) +TOUPCAM_OPTION_CDS = 0x6e # Correlated Double Sampling +TOUPCAM_OPTION_LOW_POWER_EXPOTIME = 0x6f # Low Power Consumption: Enable if exposure time is greater than the set value +TOUPCAM_OPTION_ZERO_OFFSET = 0x70 # Sensor output offset to zero: 0 => disable, 1 => eanble; default: 0 +TOUPCAM_OPTION_GVCP_TIMEOUT = 0x71 # GVCP Timeout: millisecond, range = [3, 75], default: 15 + # Unless in very special circumstances, generally no modification is required, just use the default value +TOUPCAM_OPTION_GVCP_RETRY = 0x72 # GVCP Retry: range = [2, 8], default: 4 + # Unless in very special circumstances, generally no modification is required, just use the default value +TOUPCAM_OPTION_GVSP_WAIT_PERCENT = 0x73 # GVSP wait percent: range = [0, 100], default = (trigger mode: 100, realtime: 0, other: 1) +TOUPCAM_OPTION_RESET_SEQ_TIMESTAMP = 0x74 # Reset to 0: 1 => seq; 2 => timestamp; 3 => both TOUPCAM_PIXELFORMAT_RAW8 = 0x00 TOUPCAM_PIXELFORMAT_RAW10 = 0x01 @@ -320,6 +387,12 @@ TOUPCAM_PIXELFORMAT_GMCY12 = 0x0a # map to RGGB 12 bits TOUPCAM_PIXELFORMAT_UYVY = 0x0b TOUPCAM_PIXELFORMAT_RAW12PACK = 0x0c +TOUPCAM_PIXELFORMAT_RAW11 = 0x0d +TOUPCAM_PIXELFORMAT_HDR8HL = 0x0e # HDR, Bitdepth: 8, Conversion Gain: High + Low +TOUPCAM_PIXELFORMAT_HDR10HL = 0x0f # HDR, Bitdepth: 10, Conversion Gain: High + Low +TOUPCAM_PIXELFORMAT_HDR11HL = 0x10 # HDR, Bitdepth: 11, Conversion Gain: High + Low +TOUPCAM_PIXELFORMAT_HDR12HL = 0x11 # HDR, Bitdepth: 12, Conversion Gain: High + Low +TOUPCAM_PIXELFORMAT_HDR14HL = 0x12 # HDR, Bitdepth: 14, Conversion Gain: High + Low TOUPCAM_FRAMEINFO_FLAG_SEQ = 0x00000001 # frame sequence number TOUPCAM_FRAMEINFO_FLAG_TIMESTAMP = 0x00000002 # timestamp @@ -327,88 +400,120 @@ TOUPCAM_FRAMEINFO_FLAG_EXPOGAIN = 0x00000008 # exposure gain TOUPCAM_FRAMEINFO_FLAG_BLACKLEVEL = 0x00000010 # black level TOUPCAM_FRAMEINFO_FLAG_SHUTTERSEQ = 0x00000020 # sequence shutter counter +TOUPCAM_FRAMEINFO_FLAG_GPS = 0x00000040 # GPS +TOUPCAM_FRAMEINFO_FLAG_AUTOFOCUS = 0x00000080 # auto focus: uLum & uFV +TOUPCAM_FRAMEINFO_FLAG_COUNT = 0x00000100 # timecount, framecount, tricount TOUPCAM_FRAMEINFO_FLAG_STILL = 0x00008000 # still image -TOUPCAM_IOCONTROLTYPE_GET_SUPPORTEDMODE = 0x01 # 0x01 => Input, 0x02 => Output, (0x01 | 0x02) => support both Input and Output -TOUPCAM_IOCONTROLTYPE_GET_GPIODIR = 0x03 # 0x01 => Input, 0x02 => Output -TOUPCAM_IOCONTROLTYPE_SET_GPIODIR = 0x04 -TOUPCAM_IOCONTROLTYPE_GET_FORMAT = 0x05 # 0x00 => not connected - # 0x01 => Tri-state: Tri-state mode (Not driven) - # 0x02 => TTL: TTL level signals - # 0x03 => LVDS: LVDS level signals - # 0x04 => RS422: RS422 level signals - # 0x05 => Opto-coupled -TOUPCAM_IOCONTROLTYPE_SET_FORMAT = 0x06 -TOUPCAM_IOCONTROLTYPE_GET_OUTPUTINVERTER = 0x07 # boolean, only support output signal -TOUPCAM_IOCONTROLTYPE_SET_OUTPUTINVERTER = 0x08 -TOUPCAM_IOCONTROLTYPE_GET_INPUTACTIVATION = 0x09 # 0x00 => Rising edge, 0x01 => Falling edge, 0x02 => Level high, 0x03 => Level low -TOUPCAM_IOCONTROLTYPE_SET_INPUTACTIVATION = 0x0a -TOUPCAM_IOCONTROLTYPE_GET_DEBOUNCERTIME = 0x0b # debouncer time in microseconds, range: [0, 20000] -TOUPCAM_IOCONTROLTYPE_SET_DEBOUNCERTIME = 0x0c -TOUPCAM_IOCONTROLTYPE_GET_TRIGGERSOURCE = 0x0d # 0x00 => Opto-isolated input - # 0x01 => GPIO0 - # 0x02 => GPIO1 - # 0x03 => Counter - # 0x04 => PWM - # 0x05 => Software -TOUPCAM_IOCONTROLTYPE_SET_TRIGGERSOURCE = 0x0e -TOUPCAM_IOCONTROLTYPE_GET_TRIGGERDELAY = 0x0f # Trigger delay time in microseconds, range: [0, 5000000] -TOUPCAM_IOCONTROLTYPE_SET_TRIGGERDELAY = 0x10 -TOUPCAM_IOCONTROLTYPE_GET_BURSTCOUNTER = 0x11 # Burst Counter, range: [1 ~ 65535] -TOUPCAM_IOCONTROLTYPE_SET_BURSTCOUNTER = 0x12 -TOUPCAM_IOCONTROLTYPE_GET_COUNTERSOURCE = 0x13 # 0x00 => Opto-isolated input, 0x01 => GPIO0, 0x02 => GPIO1 -TOUPCAM_IOCONTROLTYPE_SET_COUNTERSOURCE = 0x14 -TOUPCAM_IOCONTROLTYPE_GET_COUNTERVALUE = 0x15 # Counter Value, range: [1 ~ 65535] -TOUPCAM_IOCONTROLTYPE_SET_COUNTERVALUE = 0x16 -TOUPCAM_IOCONTROLTYPE_SET_RESETCOUNTER = 0x18 -TOUPCAM_IOCONTROLTYPE_GET_PWM_FREQ = 0x19 -TOUPCAM_IOCONTROLTYPE_SET_PWM_FREQ = 0x1a -TOUPCAM_IOCONTROLTYPE_GET_PWM_DUTYRATIO = 0x1b -TOUPCAM_IOCONTROLTYPE_SET_PWM_DUTYRATIO = 0x1c -TOUPCAM_IOCONTROLTYPE_GET_PWMSOURCE = 0x1d # 0x00 => Opto-isolated input, 0x01 => GPIO0, 0x02 => GPIO1 -TOUPCAM_IOCONTROLTYPE_SET_PWMSOURCE = 0x1e -TOUPCAM_IOCONTROLTYPE_GET_OUTPUTMODE = 0x1f # 0x00 => Frame Trigger Wait - # 0x01 => Exposure Active - # 0x02 => Strobe - # 0x03 => User output - # 0x04 => Counter Output - # 0x05 => Timer Output -TOUPCAM_IOCONTROLTYPE_SET_OUTPUTMODE = 0x20 -TOUPCAM_IOCONTROLTYPE_GET_STROBEDELAYMODE = 0x21 # boolean, 1 => delay, 0 => pre-delay; compared to exposure active signal -TOUPCAM_IOCONTROLTYPE_SET_STROBEDELAYMODE = 0x22 -TOUPCAM_IOCONTROLTYPE_GET_STROBEDELAYTIME = 0x23 # Strobe delay or pre-delay time in microseconds, range: [0, 5000000] -TOUPCAM_IOCONTROLTYPE_SET_STROBEDELAYTIME = 0x24 -TOUPCAM_IOCONTROLTYPE_GET_STROBEDURATION = 0x25 # Strobe duration time in microseconds, range: [0, 5000000] -TOUPCAM_IOCONTROLTYPE_SET_STROBEDURATION = 0x26 -TOUPCAM_IOCONTROLTYPE_GET_USERVALUE = 0x27 # bit0 => Opto-isolated output - # bit1 => GPIO0 output - # bit2 => GPIO1 output -TOUPCAM_IOCONTROLTYPE_SET_USERVALUE = 0x28 -TOUPCAM_IOCONTROLTYPE_GET_UART_ENABLE = 0x29 # enable: 1 => on; 0 => off -TOUPCAM_IOCONTROLTYPE_SET_UART_ENABLE = 0x2a -TOUPCAM_IOCONTROLTYPE_GET_UART_BAUDRATE = 0x2b # baud rate: 0 => 9600; 1 => 19200; 2 => 38400; 3 => 57600; 4 => 115200 -TOUPCAM_IOCONTROLTYPE_SET_UART_BAUDRATE = 0x2c -TOUPCAM_IOCONTROLTYPE_GET_UART_LINEMODE = 0x2d # line mode: 0 => TX(GPIO_0)/RX(GPIO_1); 1 => TX(GPIO_1)/RX(GPIO_0) -TOUPCAM_IOCONTROLTYPE_SET_UART_LINEMODE = 0x2e -TOUPCAM_IOCONTROLTYPE_GET_EXPO_ACTIVE_MODE = 0x2f # exposure time signal: 0 => specified line, 1 => common exposure time -TOUPCAM_IOCONTROLTYPE_SET_EXPO_ACTIVE_MODE = 0x30 -TOUPCAM_IOCONTROLTYPE_GET_EXPO_START_LINE = 0x31 # exposure start line, default: 0 -TOUPCAM_IOCONTROLTYPE_SET_EXPO_START_LINE = 0x32 -TOUPCAM_IOCONTROLTYPE_GET_EXPO_END_LINE = 0x33 # exposure end line, default: 0 - # end line must be no less than start line -TOUPCAM_IOCONTROLTYPE_SET_EXPO_END_LINE = 0x34 -TOUPCAM_IOCONTROLTYPE_GET_EXEVT_ACTIVE_MODE = 0x35 # exposure event: 0 => specified line, 1 => common exposure time -TOUPCAM_IOCONTROLTYPE_SET_EXEVT_ACTIVE_MODE = 0x36 -TOUPCAM_IOCONTROLTYPE_GET_OUTPUTCOUNTERVALUE = 0x37 # Output Counter Value, range: [0 ~ 65535] -TOUPCAM_IOCONTROLTYPE_SET_OUTPUTCOUNTERVALUE = 0x38 -TOUPCAM_IOCONTROLTYPE_SET_OUTPUT_PAUSE = 0x3a # Output pause: 1 => puase, 0 => unpause - +TOUPCAM_IOCONTROLTYPE_GET_SUPPORTEDMODE = 0x01 # 0x01 => Input, 0x02 => Output, (0x01 | 0x02) => support both Input and Output +TOUPCAM_IOCONTROLTYPE_GET_GPIODIR = 0x03 # 0x01 => Input, 0x02 => Output +TOUPCAM_IOCONTROLTYPE_SET_GPIODIR = 0x04 +TOUPCAM_IOCONTROLTYPE_GET_FORMAT = 0x05 # 0x00 => not connected + # 0x01 => Tri-state: Tri-state mode (Not driven) + # 0x02 => TTL: TTL level signals + # 0x03 => LVDS: LVDS level signals + # 0x04 => RS422: RS422 level signals + # 0x05 => Opto-coupled +TOUPCAM_IOCONTROLTYPE_SET_FORMAT = 0x06 +TOUPCAM_IOCONTROLTYPE_GET_OUTPUTINVERTER = 0x07 # boolean, only support output signal +TOUPCAM_IOCONTROLTYPE_SET_OUTPUTINVERTER = 0x08 +TOUPCAM_IOCONTROLTYPE_GET_INPUTACTIVATION = 0x09 # 0x00 => Rising edge, 0x01 => Falling edge, 0x02 => Level high, 0x03 => Level low +TOUPCAM_IOCONTROLTYPE_SET_INPUTACTIVATION = 0x0a +TOUPCAM_IOCONTROLTYPE_GET_DEBOUNCERTIME = 0x0b # debouncer time in microseconds, range: [0, 20000] +TOUPCAM_IOCONTROLTYPE_SET_DEBOUNCERTIME = 0x0c +TOUPCAM_IOCONTROLTYPE_GET_TRIGGERSOURCE = 0x0d # 0x00 => Opto-isolated input + # 0x01 => GPIO0 + # 0x02 => GPIO1 + # 0x03 => Counter + # 0x04 => PWM + # 0x05 => Software +TOUPCAM_IOCONTROLTYPE_SET_TRIGGERSOURCE = 0x0e +TOUPCAM_IOCONTROLTYPE_GET_TRIGGERDELAY = 0x0f # Trigger delay time in microseconds, range: [0, 5000000] +TOUPCAM_IOCONTROLTYPE_SET_TRIGGERDELAY = 0x10 +TOUPCAM_IOCONTROLTYPE_GET_BURSTCOUNTER = 0x11 # Burst Counter, range: [1 ~ 65535] +TOUPCAM_IOCONTROLTYPE_SET_BURSTCOUNTER = 0x12 +TOUPCAM_IOCONTROLTYPE_GET_COUNTERSOURCE = 0x13 # 0x00 => Opto-isolated input, 0x01 => GPIO0, 0x02 => GPIO1 +TOUPCAM_IOCONTROLTYPE_SET_COUNTERSOURCE = 0x14 +TOUPCAM_IOCONTROLTYPE_GET_COUNTERVALUE = 0x15 # Counter Value, range: [1 ~ 65535] +TOUPCAM_IOCONTROLTYPE_SET_COUNTERVALUE = 0x16 +TOUPCAM_IOCONTROLTYPE_SET_RESETCOUNTER = 0x18 +TOUPCAM_IOCONTROLTYPE_GET_PWM_FREQ = 0x19 +TOUPCAM_IOCONTROLTYPE_SET_PWM_FREQ = 0x1a +TOUPCAM_IOCONTROLTYPE_GET_PWM_DUTYRATIO = 0x1b +TOUPCAM_IOCONTROLTYPE_SET_PWM_DUTYRATIO = 0x1c +TOUPCAM_IOCONTROLTYPE_GET_PWMSOURCE = 0x1d # 0x00 => Opto-isolated input, 0x01 => GPIO0, 0x02 => GPIO1 +TOUPCAM_IOCONTROLTYPE_SET_PWMSOURCE = 0x1e +TOUPCAM_IOCONTROLTYPE_GET_OUTPUTMODE = 0x1f # 0x00 => Frame Trigger Wait + # 0x01 => Exposure Active + # 0x02 => Strobe + # 0x03 => User output + # 0x04 => Counter Output + # 0x05 => Timer Output +TOUPCAM_IOCONTROLTYPE_SET_OUTPUTMODE = 0x20 +TOUPCAM_IOCONTROLTYPE_GET_STROBEDELAYMODE = 0x21 # boolean, 1 => delay, 0 => pre-delay; compared to exposure active signal +TOUPCAM_IOCONTROLTYPE_SET_STROBEDELAYMODE = 0x22 +TOUPCAM_IOCONTROLTYPE_GET_STROBEDELAYTIME = 0x23 # Strobe delay or pre-delay time in microseconds, range: [0, 5000000] +TOUPCAM_IOCONTROLTYPE_SET_STROBEDELAYTIME = 0x24 +TOUPCAM_IOCONTROLTYPE_GET_STROBEDURATION = 0x25 # Strobe duration time in microseconds, range: [0, 5000000] +TOUPCAM_IOCONTROLTYPE_SET_STROBEDURATION = 0x26 +TOUPCAM_IOCONTROLTYPE_GET_USERVALUE = 0x27 # bit0 => Opto-isolated output + # bit1 => GPIO0 output + # bit2 => GPIO1 output +TOUPCAM_IOCONTROLTYPE_SET_USERVALUE = 0x28 +TOUPCAM_IOCONTROLTYPE_GET_UART_ENABLE = 0x29 # enable: 1 => on; 0 => off +TOUPCAM_IOCONTROLTYPE_SET_UART_ENABLE = 0x2a +TOUPCAM_IOCONTROLTYPE_GET_UART_BAUDRATE = 0x2b # baud rate: 0 => 9600; 1 => 19200; 2 => 38400; 3 => 57600; 4 => 115200 +TOUPCAM_IOCONTROLTYPE_SET_UART_BAUDRATE = 0x2c +TOUPCAM_IOCONTROLTYPE_GET_UART_LINEMODE = 0x2d # line mode: 0 => TX(GPIO_0)/RX(GPIO_1); 1 => TX(GPIO_1)/RX(GPIO_0) +TOUPCAM_IOCONTROLTYPE_SET_UART_LINEMODE = 0x2e +TOUPCAM_IOCONTROLTYPE_GET_EXPO_ACTIVE_MODE = 0x2f # exposure time signal: 0 => specified line, 1 => common exposure time +TOUPCAM_IOCONTROLTYPE_SET_EXPO_ACTIVE_MODE = 0x30 +TOUPCAM_IOCONTROLTYPE_GET_EXPO_START_LINE = 0x31 # exposure start line, default: 0 +TOUPCAM_IOCONTROLTYPE_SET_EXPO_START_LINE = 0x32 +TOUPCAM_IOCONTROLTYPE_GET_EXPO_END_LINE = 0x33 # exposure end line, default: 0 + # end line must be no less than start line +TOUPCAM_IOCONTROLTYPE_SET_EXPO_END_LINE = 0x34 +TOUPCAM_IOCONTROLTYPE_GET_EXEVT_ACTIVE_MODE = 0x35 # exposure event: 0 => specified line, 1 => common exposure time +TOUPCAM_IOCONTROLTYPE_SET_EXEVT_ACTIVE_MODE = 0x36 +TOUPCAM_IOCONTROLTYPE_GET_OUTPUTCOUNTERVALUE = 0x37 # Output Counter Value, range: [0 ~ 65535] +TOUPCAM_IOCONTROLTYPE_SET_OUTPUTCOUNTERVALUE = 0x38 +TOUPCAM_IOCONTROLTYPE_SET_OUTPUT_PAUSE = 0x3a # Output pause: 1 => puase, 0 => unpause +TOUPCAM_IOCONTROLTYPE_GET_INPUT_STATE = 0x3b # Input state: 0 (low level) or 1 (high level) +TOUPCAM_IOCONTROLTYPE_GET_USER_PULSE_HIGH = 0x3d # User pulse high level time: us +TOUPCAM_IOCONTROLTYPE_SET_USER_PULSE_HIGH = 0x3e +TOUPCAM_IOCONTROLTYPE_GET_USER_PULSE_LOW = 0x3f # User pulse low level time: us +TOUPCAM_IOCONTROLTYPE_SET_USER_PULSE_LOW = 0x40 +TOUPCAM_IOCONTROLTYPE_GET_USER_PULSE_NUMBER = 0x41 # User pulse number: default 0 +TOUPCAM_IOCONTROLTYPE_SET_USER_PULSE_NUMBER = 0x42 +TOUPCAM_IOCONTROLTYPE_GET_EXTERNAL_TRIGGER_NUMBER = 0x43 # External trigger number +TOUPCAM_IOCONTROLTYPE_GET_DEBOUNCER_TRIGGER_NUMBER = 0x45 # Trigger signal number after debounce +TOUPCAM_IOCONTROLTYPE_GET_EFFECTIVE_TRIGGER_NUMBER = 0x47 # Effective trigger signal number + +TOUPCAM_IOCONTROL_DELAYTIME_MAX = 5 * 1000 * 1000 + +TOUPCAM_AFMODE_CALIBRATE = 0x0 # lens calibration mode +TOUPCAM_AFMODE_MANUAL = 0x1 # manual focus mode +TOUPCAM_AFMODE_ONCE = 0x2 # onepush focus mode +TOUPCAM_AFMODE_AUTO = 0x3 # autofocus mode +TOUPCAM_AFMODE_NONE = 0x4 # no active selection of focus mode +TOUPCAM_AFMODE_IDLE = 0x5 + +TOUPCAM_AFSTATUS_NA = 0x0 # Not available +TOUPCAM_AFSTATUS_PEAKPOINT = 0x1 # Focus completed, find the focus position +TOUPCAM_AFSTATUS_DEFOCUS = 0x2 # End of focus, defocus +TOUPCAM_AFSTATUS_NEAR = 0x3 # Focusing ended, object too close +TOUPCAM_AFSTATUS_FAR = 0x4 # Focusing ended, object too far +TOUPCAM_AFSTATUS_ROICHANGED = 0x5 # Focusing ends, roi changes +TOUPCAM_AFSTATUS_SCENECHANGED = 0x6 # Focusing ends, scene changes +TOUPCAM_AFSTATUS_MODECHANGED = 0x7 # The end of focusing and the change in focusing mode is usually determined by the user moderator +TOUPCAM_AFSTATUS_UNFINISH = 0x8 # The focus is not complete. At the beginning of focusing, it will be set as incomplete + # AAF: Astro Auto Focuser TOUPCAM_AAF_SETPOSITION = 0x01 TOUPCAM_AAF_GETPOSITION = 0x02 TOUPCAM_AAF_SETZERO = 0x03 -TOUPCAM_AAF_GETZERO = 0x04 TOUPCAM_AAF_SETDIRECTION = 0x05 +TOUPCAM_AAF_GETDIRECTION = 0x06 TOUPCAM_AAF_SETMAXINCREMENT = 0x07 TOUPCAM_AAF_GETMAXINCREMENT = 0x08 TOUPCAM_AAF_SETFINE = 0x09 @@ -420,11 +525,12 @@ TOUPCAM_AAF_SETBACKLASH = 0x0f TOUPCAM_AAF_GETBACKLASH = 0x10 TOUPCAM_AAF_GETAMBIENTTEMP = 0x12 -TOUPCAM_AAF_GETTEMP = 0x14 +TOUPCAM_AAF_GETTEMP = 0x14 # in 0.1 degrees Celsius, such as: 32 means 3.2 degrees Celsius TOUPCAM_AAF_ISMOVING = 0x16 TOUPCAM_AAF_HALT = 0x17 TOUPCAM_AAF_SETMAXSTEP = 0x1b TOUPCAM_AAF_GETMAXSTEP = 0x1c +TOUPCAM_AAF_GETSTEPSIZE = 0x1e TOUPCAM_AAF_RANGEMIN = 0xfd # Range: min value TOUPCAM_AAF_RANGEMAX = 0xfe # Range: max value TOUPCAM_AAF_RANGEDEF = 0xff # Range: default value @@ -446,18 +552,18 @@ # HRESULT: error code S_OK = 0x00000000 # Success -S_FALSE = 0x00000001 # Yet another success -E_UNEXPECTED = 0x8000ffff # Catastrophic failure -E_NOTIMPL = 0x80004001 # Not supported or not implemented -E_ACCESSDENIED = 0x80070005 # Permission denied +S_FALSE = 0x00000001 # Yet another success # Remark: Different from S_OK, such as internal values and user-set values have coincided, equivalent to noop +E_UNEXPECTED = 0x8000ffff # Catastrophic failure # Remark: Generally indicates that the conditions are not met, such as calling put_Option setting some options that do not support modification when the camera is running, and so on +E_NOTIMPL = 0x80004001 # Not supported or not implemented # Remark: This feature is not supported on this model of camera +E_ACCESSDENIED = 0x80070005 # Permission denied # Remark: The program on Linux does not have permission to open the USB device, please enable udev rules file or run as root E_OUTOFMEMORY = 0x8007000e # Out of memory E_INVALIDARG = 0x80070057 # One or more arguments are not valid -E_POINTER = 0x80004003 # Pointer that is not valid +E_POINTER = 0x80004003 # Pointer that is not valid # Remark: Pointer is NULL E_FAIL = 0x80004005 # Generic failure E_WRONG_THREAD = 0x8001010e # Call function in the wrong thread -E_GEN_FAILURE = 0x8007001f # Device not functioning -E_BUSY = 0x800700aa # The requested resource is in use -E_PENDING = 0x8000000a # The data necessary to complete this operation is not yet available +E_GEN_FAILURE = 0x8007001f # Device not functioning # Remark: It is generally caused by hardware errors, such as cable problems, USB port problems, poor contact, camera hardware damage, etc +E_BUSY = 0x800700aa # The requested resource is in use # Remark: The camera is already in use, such as duplicated opening/starting the camera, or being used by other application, etc +E_PENDING = 0x8000000a # The data necessary to complete this operation is not yet available # Remark: No data is available at this time E_TIMEOUT = 0x8001011f # This operation returned because the timeout period expired TOUPCAM_EXPOGAIN_DEF = 100 # exposure gain, default value @@ -475,11 +581,11 @@ TOUPCAM_SATURATION_MIN = 0 # saturation TOUPCAM_SATURATION_MAX = 255 # saturation TOUPCAM_BRIGHTNESS_DEF = 0 # brightness -TOUPCAM_BRIGHTNESS_MIN = -64 # brightness -TOUPCAM_BRIGHTNESS_MAX = 64 # brightness +TOUPCAM_BRIGHTNESS_MIN = -255 # brightness +TOUPCAM_BRIGHTNESS_MAX = 255 # brightness TOUPCAM_CONTRAST_DEF = 0 # contrast -TOUPCAM_CONTRAST_MIN = -100 # contrast -TOUPCAM_CONTRAST_MAX = 100 # contrast +TOUPCAM_CONTRAST_MIN = -255 # contrast +TOUPCAM_CONTRAST_MAX = 255 # contrast TOUPCAM_GAMMA_DEF = 100 # gamma TOUPCAM_GAMMA_MIN = 20 # gamma TOUPCAM_GAMMA_MAX = 180 # gamma @@ -492,6 +598,7 @@ TOUPCAM_BLACKLEVEL_MIN = 0 # minimum black level TOUPCAM_BLACKLEVEL8_MAX = 31 # maximum black level for bitdepth = 8 TOUPCAM_BLACKLEVEL10_MAX = 31 * 4 # maximum black level for bitdepth = 10 +TOUPCAM_BLACKLEVEL11_MAX = 31 * 8 # maximum black level for bitdepth = 11 TOUPCAM_BLACKLEVEL12_MAX = 31 * 16 # maximum black level for bitdepth = 12 TOUPCAM_BLACKLEVEL14_MAX = 31 * 64 # maximum black level for bitdepth = 14 TOUPCAM_BLACKLEVEL16_MAX = 31 * 256 # maximum black level for bitdepth = 16 @@ -507,18 +614,15 @@ TOUPCAM_AUTOEXPO_THRESHOLD_DEF = 5 # auto exposure threshold TOUPCAM_AUTOEXPO_THRESHOLD_MIN = 2 # auto exposure threshold TOUPCAM_AUTOEXPO_THRESHOLD_MAX = 15 # auto exposure threshold -TOUPCAM_AUTOEXPO_STEP_DEF = 1000 # auto exposure step: thousandths -TOUPCAM_AUTOEXPO_STEP_MIN = 1 # auto exposure step: thousandths -TOUPCAM_AUTOEXPO_STEP_MAX = 1000 # auto exposure step: thousandths +TOUPCAM_AUTOEXPO_DAMP_DEF = 0 # auto exposure damping coefficient: thousandths +TOUPCAM_AUTOEXPO_DAMP_MIN = 0 # auto exposure damping coefficient: thousandths +TOUPCAM_AUTOEXPO_DAMP_MAX = 1000 # auto exposure damping coefficient: thousandths TOUPCAM_BANDWIDTH_DEF = 100 # bandwidth TOUPCAM_BANDWIDTH_MIN = 1 # bandwidth TOUPCAM_BANDWIDTH_MAX = 100 # bandwidth TOUPCAM_DENOISE_DEF = 0 # denoise TOUPCAM_DENOISE_MIN = 0 # denoise TOUPCAM_DENOISE_MAX = 100 # denoise -TOUPCAM_TEC_TARGET_MIN = -500 # TEC target: -50.0 degrees Celsius -TOUPCAM_TEC_TARGET_DEF = 100 # TEC target: 0.0 degrees Celsius -TOUPCAM_TEC_TARGET_MAX = 400 # TEC target: 40.0 degrees Celsius TOUPCAM_HEARTBEAT_MIN = 100 # millisecond TOUPCAM_HEARTBEAT_MAX = 10000 # millisecond TOUPCAM_AE_PERCENT_MIN = 0 # auto exposure percent; 0 or 100 => full roi average, means "disabled" @@ -526,18 +630,20 @@ TOUPCAM_AE_PERCENT_DEF = 10 # auto exposure percent: enabled, percentage = 10% TOUPCAM_NOPACKET_TIMEOUT_MIN = 500 # no packet timeout minimum: 500ms TOUPCAM_NOFRAME_TIMEOUT_MIN = 500 # no frame timeout minimum: 500ms -TOUPCAM_DYNAMIC_DEFECT_T1_MIN = 10 # dynamic defect pixel correction, threshold, means: 1.0 -TOUPCAM_DYNAMIC_DEFECT_T1_MAX = 100 # means: 10.0 -TOUPCAM_DYNAMIC_DEFECT_T1_DEF = 13 # means: 1.3 -TOUPCAM_DYNAMIC_DEFECT_T2_MIN = 0 # dynamic defect pixel correction, value, means: 0.00 -TOUPCAM_DYNAMIC_DEFECT_T2_MAX = 100 # means: 1.00 -TOUPCAM_DYNAMIC_DEFECT_T2_DEF = 100 +TOUPCAM_DYNAMIC_DEFECT_T1_MIN = 0 # dynamic defect pixel correction, dead pixel ratio: the smaller the dead ratio is, the more stringent the conditions for processing dead pixels are, and fewer pixels will be processed +TOUPCAM_DYNAMIC_DEFECT_T1_MAX = 100 # means: 1.0 +TOUPCAM_DYNAMIC_DEFECT_T1_DEF = 90 # means: 0.9 +TOUPCAM_DYNAMIC_DEFECT_T2_MIN = 0 # dynamic defect pixel correction, hot pixel ratio: the smaller the hot ratio is, the more stringent the conditions for processing hot pixels are, and fewer pixels will be processed +TOUPCAM_DYNAMIC_DEFECT_T2_MAX = 100 +TOUPCAM_DYNAMIC_DEFECT_T2_DEF = 90 TOUPCAM_HDR_K_MIN = 1 # HDR synthesize TOUPCAM_HDR_K_MAX = 25500 TOUPCAM_HDR_B_MIN = 0 TOUPCAM_HDR_B_MAX = 65535 TOUPCAM_HDR_THRESHOLD_MIN = 0 TOUPCAM_HDR_THRESHOLD_MAX = 4094 +TOUPCAM_CDS_MIN = 0 +TOUPCAM_CDS_MAX = 4094 def TDIBWIDTHBYTES(bits): return ((bits + 31) // 32 * 4) @@ -548,13 +654,13 @@ def TDIBWIDTHBYTES(bits): |-----------------------------------------------------------------| | Auto Exposure Target | 16~235 | 120 | | Exposure Gain | 100~ | 100 | -| Temp | 2000~15000 | 6503 | -| Tint | 200~2500 | 1000 | +| Temp | 1000~25000 | 6503 | +| Tint | 100~2500 | 1000 | | LevelRange | 0~255 | Low = 0, High = 255 | -| Contrast | -100~100 | 0 | +| Contrast | -255~255 | 0 | | Hue | -180~180 | 0 | | Saturation | 0~255 | 128 | -| Brightness | -64~64 | 0 | +| Brightness | -255~255 | 0 | | Gamma | 20~180 | 100 | | WBGain | -127~127 | 0 | ------------------------------------------------------------------| @@ -565,7 +671,7 @@ def __init__(self, w, h): self.width = w self.height = h -class ToupcamAfParam: +class ToupcamFocusMotor: def __init__(self, imax, imin, idef, imaxabs, iminabs, zoneh, zonev): self.imax = imax # maximum auto focus sensor board positon self.imin = imin # minimum auto focus sensor board positon @@ -587,6 +693,27 @@ def __init__(self): self.expogain = 0 # expogain self.blacklevel = 0 # black level +class ToupcamGps: + def __init__(self): + self.utcstart = 0 # exposure start time: nanosecond since epoch (00:00:00 UTC on Thursday, 1 January 1970, see https://en.wikipedia.org/wiki/Unix_time) + self.utcend = 0 # exposure end time + self.longitude = 0 # millionth of a degree, 0.000001 degree + self.latitude = 0 + self.altitude = 0 # millimeter + self.satellite = 0 # number of satellite + self.reserved = 0 # not used + +class ToupcamFrameInfoV4: + def __init__(self): + self.v3 = ToupcamFrameInfoV3() + self.reserved = 0 + self.uLum = 0 + self.uFV = 0 + self.timecount = 0 + self.framecount = 0 + self.tricount = 0 + self.gps = ToupcamGps() + class ToupcamFrameInfoV2: def __init__(self): self.width = 0 @@ -614,6 +741,29 @@ def __init__(self, displayname, id, model): self.id = id # unique and opaque id of a connected camera, for Toupcam_Open self.model = model # ToupcamModelV2 +class ToupcamSelfTrigger: + def __init__(self, sensingLeft, sensingTop, sensingWidth, sensingHeight, hThreshold, lThreshold, expoTime, expoGain, hCount, lCount, reserved): + self.sensingLeft = sensingLeft + self.sensingTop = sensingTop + self.sensingWidth = sensingWidth + self.sensingHeight = sensingHeight # Sensing Area + self.hThreshold = hThreshold + self.lThreshold = lThreshold # threshold High side, threshold Low side + self.expoTime = expoTime # Exposure Time + self.expoGain = expoGain # Exposure Gain + self.hCount = hCount + self.lCount = lCount # Count threshold High side, Count threshold Low side, thousandths of Sensing Area + self.reserved = reserved + +class ToupcamAFState: + def __init__(self, AF_Mode, AF_Status, AF_LensAP_Update_Flag, AF_LensManual_Flag, Reserved0, Reserved1): + self.AF_Mode = AF_Mode + self.AF_Status = AF_Status + self.AF_LensAP_Update_Flag = AF_LensAP_Update_Flag # mark for whether the lens aperture is calibrated + self.AF_LensManual_Flag = AF_LensManual_Flag # if true, allows manual operation + self.Reserved0 = Reserved0 + self.Reserved1 = Reserved1 + if sys.platform == 'win32': class HRESULTException(OSError): def __init__(self, hr): @@ -624,51 +774,24 @@ class HRESULTException(Exception): def __init__(self, hr): self.hr = hr -class _Resolution(ctypes.Structure): - _fields_ = [('width', ctypes.c_uint), - ('height', ctypes.c_uint)] - -if sys.platform == 'win32': - class _ModelV2(ctypes.Structure): # camera model v2 win32 - _fields_ = [('name', ctypes.c_wchar_p), # model name, in Windows, we use unicode - ('flag', ctypes.c_ulonglong), # TOUPCAM_FLAG_xxx, 64 bits - ('maxspeed', ctypes.c_uint), # number of speed level, same as Toupcam_get_MaxSpeed(), the speed range = [0, maxspeed], closed interval - ('preview', ctypes.c_uint), # number of preview resolution, same as Toupcam_get_ResolutionNumber() - ('still', ctypes.c_uint), # number of still resolution, same as Toupcam_get_StillResolutionNumber() - ('maxfanspeed', ctypes.c_uint), # maximum fan speed, fan speed range = [0, max], closed interval - ('ioctrol', ctypes.c_uint), # number of input/output control - ('xpixsz', ctypes.c_float), # physical pixel size in micrometer - ('ypixsz', ctypes.c_float), # physical pixel size in micrometer - ('res', _Resolution * 16)] - class _DeviceV2(ctypes.Structure): # win32 - _fields_ = [('displayname', ctypes.c_wchar * 64), # display name - ('id', ctypes.c_wchar * 64), # unique and opaque id of a connected camera, for Toupcam_Open - ('model', ctypes.POINTER(_ModelV2))] -else: - class _ModelV2(ctypes.Structure): # camera model v2 linux/mac - _fields_ = [('name', ctypes.c_char_p), # model name - ('flag', ctypes.c_ulonglong), # TOUPCAM_FLAG_xxx, 64 bits - ('maxspeed', ctypes.c_uint), # number of speed level, same as Toupcam_get_MaxSpeed(), the speed range = [0, maxspeed], closed interval - ('preview', ctypes.c_uint), # number of preview resolution, same as Toupcam_get_ResolutionNumber() - ('still', ctypes.c_uint), # number of still resolution, same as Toupcam_get_StillResolutionNumber() - ('maxfanspeed', ctypes.c_uint), # maximum fan speed - ('ioctrol', ctypes.c_uint), # number of input/output control - ('xpixsz', ctypes.c_float), # physical pixel size in micrometer - ('ypixsz', ctypes.c_float), # physical pixel size in micrometer - ('res', _Resolution * 16)] - class _DeviceV2(ctypes.Structure): # linux/mac - _fields_ = [('displayname', ctypes.c_char * 64), # display name - ('id', ctypes.c_char * 64), # unique and opaque id of a connected camera, for Toupcam_Open - ('model', ctypes.POINTER(_ModelV2))] - class Toupcam: + class __Resolution(ctypes.Structure): + _fields_ = [('width', ctypes.c_uint), + ('height', ctypes.c_uint)] + class __RECT(ctypes.Structure): _fields_ = [('left', ctypes.c_int), ('top', ctypes.c_int), ('right', ctypes.c_int), ('bottom', ctypes.c_int)] - class __AfParam(ctypes.Structure): + class __ModelV2(ctypes.Structure): + pass + + class __DeviceV2(ctypes.Structure): + pass + + class __FocusMotor(ctypes.Structure): _fields_ = [('imax', ctypes.c_int), # maximum auto focus sensor board positon ('imin', ctypes.c_int), # minimum auto focus sensor board positon ('idef', ctypes.c_int), # conjugate calibration positon @@ -688,6 +811,18 @@ class __FrameInfoV3(ctypes.Structure): ('expogain', ctypes.c_ushort), # expogain ('blacklevel', ctypes.c_ushort)] # black level + class __Gps(ctypes.Structure): + _fields_ = [('utcstart', ctypes.c_longlong), # UTC: exposure start time + ('utcend', ctypes.c_longlong), # exposure end time + ('longitude', ctypes.c_int), # millionth of a degree, 0.000001 degree + ('latitude', ctypes.c_int), + ('altitude', ctypes.c_int), # millimeter + ('satellite', ctypes.c_ushort), # number of satellite + ('reserved', ctypes.c_ushort)] # not used + + class __FrameInfoV4(ctypes.Structure): + pass + class __FrameInfoV2(ctypes.Structure): _fields_ = [('width', ctypes.c_uint), ('height', ctypes.c_uint), @@ -695,6 +830,26 @@ class __FrameInfoV2(ctypes.Structure): ('seq', ctypes.c_uint), # frame sequence number ('timestamp', ctypes.c_longlong)] # microsecond + class __SelfTrigger(ctypes.Structure): + _fields_ = [('sensingLeft', ctypes.c_uint), + ('sensingTop', ctypes.c_uint), + ('sensingWidth', ctypes.c_uint), + ('sensingHeight', ctypes.c_uint), # Sensing Area + ('hThreshold', ctypes.c_uint), + ('lThreshold', ctypes.c_uint), # threshold High side, threshold Low side + ('expoTime', ctypes.c_uint), # Exposure Time + ('expoGain', ctypes.c_ushort), # Exposure Gain + ('hCount', ctypes.c_ushort), + ('lCount', ctypes.c_ushort), # Count threshold High side, Count threshold Low side, thousandths of Sensing Area + ('reserved', ctypes.c_ushort)] + + class __AFState(ctypes.Structure): + _fields_ = [('AF_Status', ctypes.c_uint), + ('AF_LensAP_Update_Flag', ctypes.c_byte), # mark for whether the lens aperture is calibrated + ('AF_LensManual_Flag', ctypes.c_byte), # if true, allows manual operation + ('Reserved0', ctypes.c_byte), + ('Reserved1', ctypes.c_byte)] + if sys.platform == 'win32': __EVENT_CALLBACK = ctypes.WINFUNCTYPE(None, ctypes.c_uint, ctypes.py_object) __PROGRESS_CALLBACK = ctypes.WINFUNCTYPE(None, ctypes.c_int, ctypes.py_object) @@ -732,7 +887,7 @@ def __convertStr(x): @classmethod def Version(cls): - """get the version of this dll, which is: 54.23714.20231029""" + """get the version of this dll, which is: 57.26813.20241028""" cls.__initlib() return cls.__lib.Toupcam_Version() @@ -777,7 +932,7 @@ def __hotplugCallbackFun(ctx): @classmethod def HotPlug(cls, fun, ctx): """ - USB hotplug is only available on macOS and Linux, it's unnecessary on Windows & Android. To process the device plug in / pull out: + This function is only available on macOS and Linux, it's unnecessary on Windows & Android. To process the device plug in / pull out: (1) On Windows, please refer to the MSDN (a) Device Management, https://docs.microsoft.com/en-us/windows/win32/devio/device-management (b) Detecting Media Insertion or Removal, https://docs.microsoft.com/en-us/windows/win32/devio/detecting-media-insertion-or-removal @@ -801,7 +956,7 @@ def HotPlug(cls, fun, ctx): @classmethod def EnumV2(cls): cls.__initlib() - a = (_DeviceV2 * TOUPCAM_MAX)() + a = (cls.__DeviceV2 * TOUPCAM_MAX)() n = cls.__lib.Toupcam_EnumV2(a) arr = [] for i in range(0, n): @@ -811,7 +966,7 @@ def EnumV2(cls): @classmethod def EnumWithName(cls): cls.__initlib() - a = (_DeviceV2 * TOUPCAM_MAX)() + a = (cls.__DeviceV2 * TOUPCAM_MAX)() n = cls.__lib.Toupcam_EnumWithName(a) arr = [] for i in range(0, n): @@ -906,6 +1061,27 @@ def __convertFrameInfoV3(pInfo, x): pInfo.expotime = x.expotime pInfo.expogain = x.expogain pInfo.blacklevel = x.blacklevel + + @staticmethod + def __convertGps(gps, x): + gps.utcstart = x.utcstart + gps.utcend = x.utcend + gps.longitude = x.longitude + gps.latitude = x.latitude + gps.altitude = x.altitude + gps.satellite = x.satellite + gps.reserved = x.reserved + + @staticmethod + def __convertFrameInfoV4(pInfo, x): + __class__.__convertFrameInfoV3(pInfo.v3, x.v3) + pInfo.reserved = x.reserved + pInfo.uLum = x.uLum + pInfo.uFV = x.uFV + pInfo.timecount = x.timecount + pInfo.framecount = x.framecount + pInfo.tricount = x.tricount + __class__.__convertGps(pInfo.gps, x.gps) @staticmethod def __convertFrameInfoV2(pInfo, x): @@ -917,7 +1093,7 @@ def __convertFrameInfoV2(pInfo, x): """ nWaitMS: The timeout interval, in milliseconds. If a non-zero value is specified, the function either successfully fetches the image or waits for a timeout. - If nWaitMS is zero, the function does not wait when there are no images to fetch; It always returns immediately; this is equal to PullImageV3. + If nWaitMS is zero, the function does not wait when there are no images to fetch; It always returns immediately; this is equal to PullImageV4. bStill: to pull still image, set to 1, otherwise 0 bits: 24 (RGB24), 32 (RGB32), 48 (RGB48), 8 (Grey), 16 (Grey), 64 (RGB64). In RAW mode, this parameter is ignored. @@ -957,6 +1133,22 @@ def __convertFrameInfoV2(pInfo, x): | | 10/12/14/16bits Mode | Width * 2 | Width * 2 | |-----------|------------------------|-------------------------------|-----------------------| """ + def PullImageV4(self, pImageData, bStill, bits, rowPitch, pInfo): + if pInfo is None: + self.__lib.Toupcam_PullImageV4(self.__h, pImageData, bStill, bits, rowPitch, None) + else: + x = self.__FrameInfoV4() + self.__lib.Toupcam_PullImageV4(self.__h, pImageData, bStill, bits, rowPitch, ctypes.byref(x)) + self.__convertFrameInfoV4(pInfo, x) + + def WaitImageV4(self, nWaitMS, pImageData, bStill, bits, rowPitch, pInfo): + if pInfo is None: + self.__lib.Toupcam_WaitImageV4(self.__h, nWaitMS, pImageData, bStill, bits, rowPitch, None) + else: + x = self.__FrameInfoV4() + self.__lib.Toupcam_WaitImageV4(self.__h, nWaitMS, pImageData, bStill, bits, rowPitch, ctypes.byref(x)) + self.__convertFrameInfoV4(pInfo, x) + def PullImageV3(self, pImageData, bStill, bits, rowPitch, pInfo): if pInfo is None: self.__lib.Toupcam_PullImageV3(self.__h, pImageData, bStill, bits, rowPitch, None) @@ -1093,18 +1285,32 @@ def Trigger(self, nNumber): """ self.__lib.Toupcam_Trigger(self.__h, ctypes.c_ushort(nNumber)) - def TriggerSync(self, nTimeout, pImageData, bits, rowPitch, pInfo): + def TriggerSyncV4(self, nWaitMS, pImageData, bits, rowPitch, pInfo): """ trigger synchronously - nTimeout: 0: by default, exposure * 102% + 4000 milliseconds + nWaitMS: 0: by default, exposure * 102% + 4000 milliseconds 0xffffffff: wait infinite - other: milliseconds to wait + other: milliseconds to wait """ if pInfo is None: - self.__lib.TriggerSync(self.__h, nTimeout, pImageData, bits, rowPitch, None) + self.__lib.Toupcam_TriggerSyncV4(self.__h, nWaitMS, pImageData, bits, rowPitch, None) + else: + x = self.__FrameInfoV4() + self.__lib.Toupcam_TriggerSyncV4(self.__h, nWaitMS, pImageData, bits, rowPitch, ctypes.byref(x)) + self.__convertFrameInfoV4(pInfo, x) + + def TriggerSync(self, nWaitMS, pImageData, bits, rowPitch, pInfo): + """ + trigger synchronously + nWaitMS: 0: by default, exposure * 102% + 4000 milliseconds + 0xffffffff: wait infinite + other: milliseconds to wait + """ + if pInfo is None: + self.__lib.Toupcam_TriggerSync(self.__h, nWaitMS, pImageData, bits, rowPitch, None) else: x = self.__FrameInfoV3() - self.__lib.TriggerSync(self.__h, nTimeout, pImageData, bits, rowPitch, ctypes.byref(x)) + self.__lib.Toupcam_TriggerSync(self.__h, nWaitMS, pImageData, bits, rowPitch, ctypes.byref(x)) self.__convertFrameInfoV3(pInfo, x) def put_Size(self, nWidth, nHeight): @@ -1184,7 +1390,8 @@ def get_RawFormat(self): def put_RealTime(self, val): """ - 0: stop grab frame when frame buffer deque is full, until the frames in the queue are pulled away and the queue is not full + 0: no realtime + stop grab frame when frame buffer deque is full, until the frames in the queue are pulled away and the queue is not full 1: realtime use minimum frame buffer. When new frame arrive, drop all the pending frame regardless of whether the frame buffer is full. If DDR present, also limit the DDR frame buffer to only one frame. @@ -1232,6 +1439,7 @@ def put_AutoExpoTarget(self, Target): self.__lib.Toupcam_put_AutoExpoTarget(self.__h, ctypes.c_int(Target)) def put_AutoExpoRange(self, maxTime, minTime, maxGain, minGain): + ''' set the maximum/minimal auto exposure time and agin. The default maximum auto exposure time is 350ms ''' return self.__lib.Toupcam_put_AutoExpoRange(self.__h, ctypes.c_uint(maxTime), ctypes.c_uint(minTime), ctypes.c_ushort(maxGain), ctypes.c_ushort(minGain)) def get_AutoExpoRange(self): @@ -1266,6 +1474,12 @@ def get_ExpoTime(self): self.__lib.Toupcam_get_ExpoTime(self.__h, ctypes.byref(x)) return x.value + def get_RealExpoTime(self): + """in microseconds""" + x = ctypes.c_uint(0) + self.__lib.Toupcam_get_RealExpoTime(self.__h, ctypes.byref(x)) + return x.value + def put_ExpoTime(self, Time): self.__lib.Toupcam_put_ExpoTime(self.__h, ctypes.c_uint(Time)) @@ -1574,6 +1788,17 @@ def get_Option(self, iOption): self.__lib.Toupcam_get_Option(self.__h, ctypes.c_uint(iOption), ctypes.byref(x)) return x.value + def get_PixelFormatSupport(self, cmd): + """" + cmd: + 0xff: query the number + 0~number: query the nth pixel format + output: TOUPCAM_PIXELFORMAT_xxxx + """ + x = ctypes.c_int(0) + self.__lib.Toupcam_get_PixelFormatSupport(self.__h, cmd, ctypes.byref(x)) + return x.value + def put_Linear(self, v8, v16): self.__lib.Toupcam_put_Linear(self.__h, v8, v16) @@ -1594,6 +1819,17 @@ def put_InitWBGain(self, v): else: raise HRESULTException(0x80070057) + def get_TecTargetRange(self): + """TEC Target range: [min, max]""" + x = ctypes.c_int(0) + self.__lib.Toupcam_get_Option(self.__h, TOUPCAM_OPTION_TECTARGET_RANGE, ctypes.byref(x)) + low, high = x.value & 0xffff, (x.value >> 16) & 0xffff + if low > 32767: + low = low - 65536 + if high > 32767: + high = high - 65536 + return (low, high) + def get_Temperature(self): """get the temperature of the sensor, in 0.1 degrees Celsius (32 means 3.2 degrees Celsius, -35 means -3.5 degree Celsius)""" x = ctypes.c_short(0) @@ -1601,7 +1837,10 @@ def get_Temperature(self): return x.value def put_Temperature(self, nTemperature): - """set the target temperature of the sensor or TEC, in 0.1 degrees Celsius (32 means 3.2 degrees Celsius, -35 means -3.5 degree Celsius)""" + """ + set the target temperature of the sensor or TEC, in 0.1 degrees Celsius (32 means 3.2 degrees Celsius, -35 means -3.5 degree Celsius) + set "-2730" or below means using the default value of this model + """ self.__lib.Toupcam_put_Temperature(self.__h, ctypes.c_short(nTemperature)) def put_Roi(self, xOffset, yOffset, xWidth, yHeight): @@ -1617,6 +1856,55 @@ def get_Roi(self): self.__lib.Toupcam_get_Roi(self.__h, ctypes.byref(x), ctypes.byref(y), ctypes.byref(w), ctypes.byref(h)) return (x.value, y.value, w.value, h.value) + def put_RoiN(self, xOffset, yOffset, xWidth, yHeight): + '''multiple Roi''' + Num = len(xOffset) + pxOffset = (ctypes.c_uint * Num)(*xOffset) + pyOffset = (ctypes.c_uint * Num)(*yOffset) + pxWidth = (ctypes.c_uint * Num)(*xWidth) + pyHeight = (ctypes.c_uint * Num)(*yHeight) + self.__lib.Toupcam_put_RoiN(self.__h, pxOffset, pyOffset, pxWidth, pyHeight, Num) + + def put_XY(self, x, y): + self.__lib.Toupcam_put_XY(self.__h, ctypes.c_int(x), ctypes.c_int(y)) + + def put_SelfTrigger(self, pSt): + x = self.__SelfTrigger() + x.sensingLeft = pSt.sensingLeft + x.sensingTop = pSt.sensingTop + x.sensingWidth = pSt.sensingWidth + x.sensingHeight = pSt.sensingHeight + x.hThreshold = pSt.hThreshold + x.lThreshold = pSt.lThreshold + x.expoTime = pSt.expoTime + x.expoGain = pSt.expoGain + x.hCount = pSt.hCount + x.lCount = pSt.lCount + x.reserved = pSt.reserved + self.__lib.Toupcam_put_SelfTrigger(self.__h, ctypes.byref(x)) + + def get_SelfTrigger(self, pSt): + x = self.__SelfTrigger() + self.__lib.Toupcam_get_SelfTrigger(self.__h, ctypes.byref(x)) + return ToupcamSelfTrigger(x.sensingLeft.value, x.sensingTop.value, x.sensingWidth.value, x.sensingHeight.value, x.hThreshold.value, x.lThreshold.value, x.expoTime.value, x.expoGain.value, x.hCount.value, x.lCount.value, x.reserved.value) + + def get_AFState(self): + x = ctypes.c_uint(0) + self.__lib.Toupcam_get_AFState(self.__h, ctypes.byref(x)) + return x.value + + def put_AFMode(self, mode): + self.__lib.Toupcam_put_AFRoi(self.__h, ctypes.c_uint(mode)) + + def put_AFRoi(self, xOffset, yOffset, xWidth, yHeight): + self.__lib.Toupcam_put_AFRoi(self.__h, ctypes.c_uint(xOffset), ctypes.c_uint(yOffset), ctypes.c_uint(xWidth), ctypes.c_uint(yHeight)) + + def put_AFAperture(self, iAperture): + self.__lib.Toupcam_put_AFAperture(self.__h, ctypes.c_int(iAperture)) + + def put_AFFMPos(self, iFMPos): + self.__lib.Toupcam_put_AFFMPos(self.__h, ctypes.c_int(iFMPos)) + def get_FrameRate(self): """ get the frame rate: framerate (fps) = Frame * 1000.0 / nTime @@ -1660,29 +1948,44 @@ def DfcOnce(self): def DfcOnePush(self): DfcOnce(self) - def DfcExport(self, filepath): + def FpncOnce(self): + self.__lib.Toupcam_FpncOnce(self.__h) + + def DfcExport(self, filePath): + if sys.platform == 'win32': + self.__lib.Toupcam_DfcExport(self.__h, filePath) + else: + self.__lib.Toupcam_DfcExport(self.__h, filePath.encode()) + + def FfcExport(self, filePath): + if sys.platform == 'win32': + self.__lib.Toupcam_FfcExport(self.__h, filePath) + else: + self.__lib.Toupcam_FfcExport(self.__h, filePath.encode()) + + def DfcImport(self, filePath): if sys.platform == 'win32': - self.__lib.Toupcam_DfcExport(self.__h, filepath) + self.__lib.Toupcam_DfcImport(self.__h, filePath) else: - self.__lib.Toupcam_DfcExport(self.__h, filepath.encode()) + self.__lib.Toupcam_DfcImport(self.__h, filePath.encode()) - def FfcExport(self, filepath): + def FfcImport(self, filePath): if sys.platform == 'win32': - self.__lib.Toupcam_FfcExport(self.__h, filepath) + self.__lib.Toupcam_FfcImport(self.__h, filePath) else: - self.__lib.Toupcam_FfcExport(self.__h, filepath.encode()) + self.__lib.Toupcam_FfcImport(self.__h, filePath.encode()) - def DfcImport(self, filepath): + def FpncExport(self, filePath): if sys.platform == 'win32': - self.__lib.Toupcam_DfcImport(self.__h, filepath) + self.__lib.Toupcam_FpncExport(self.__h, filePath) else: - self.__lib.Toupcam_DfcImport(self.__h, filepath.encode()) + self.__lib.Toupcam_FpncExport(self.__h, filePath.encode()) - def FfcImport(self, filepath): + def FpncImport(self, filePath): if sys.platform == 'win32': - self.__lib.Toupcam_FfcImport(self.__h, filepath) + self.__lib.Toupcam_FpncImport(self.__h, filePath) else: - self.__lib.Toupcam_FfcImport(self.__h, filepath.encode()) + self.__lib.Toupcam_FpncImport(self.__h, filePath.encode()) def IoControl(self, ioLineNumber, eType, outVal): x = ctypes.c_int(0) @@ -1694,10 +1997,18 @@ def AAF(self, action, outVal): self.__lib.Toupcam_AAF(self.__h, ctypes.c_int(action), ctypes.c_int(outVal), ctypes.byref(x)) return x.value - def get_AfParam(self): - x = self.__AfParam() - self.__lib.Toupcam_get_AfParam(self.__h, ctypes.byref(x)) - return ToupcamAfParam(x.imax.value, x.imin.value, x.idef.value, x.imaxabs.value, x.iminabs.value, x.zoneh.value, x.zonev.value) + def get_FocusMotor(self): + x = self.__FocusMotor() + self.__lib.Toupcam_get_FocusMotor(self.__h, ctypes.byref(x)) + return ToupcamFocusMotor(x.imax.value, x.imin.value, x.idef.value, x.imaxabs.value, x.iminabs.value, x.zoneh.value, x.zonev.value) + + def set_Name(self, name): + self.__lib.Toupcam_set_Name(self.__h, name.encode()) + + def query_Name(self): + str = (ctypes.c_char * 64)() + self.__lib.Toupcam_query_Name(self.__h, str) + return str.value.decode() @staticmethod def __histogramCallbackFun(aHist, nFlag, ctx): @@ -1717,6 +2028,29 @@ def GetHistogram(self, fun, ctx): self.__cbhistogram = __class__.__HISTOGRAM_CALLBACK(__class__.__histogramCallbackFun) self.__lib.Toupcam_GetHistogramV2(self.__h, self.__cbhistogram, ctypes.py_object(self)) + @classmethod + def put_Name(cls, camId, name): + cls.__initlib() + if sys.platform == 'win32': + return cls.__lib.Toupcam_put_Name(camId, name) + else: + return cls.__lib.Toupcam_put_Name(camId.encode('ascii'), name) + + @classmethod + def get_Name(cls, camId): + cls.__initlib() + str = (ctypes.c_char * 64)() + if sys.platform == 'win32': + cls.__lib.Toupcam_get_Name(camId, str) + else: + cls.__lib.Toupcam_get_Name(camId.encode('ascii'), str) + return str.value.decode() + + @classmethod + def PixelFormatName(cls, pixelFormat): + cls.__initlib() + return cls.__lib.Toupcam_get_PixelFormatName(pixelFormat) + @classmethod def Replug(cls, camId): """ @@ -1779,48 +2113,102 @@ def TempTint2Gain(cls, temp, tint): def __initlib(cls): if cls.__lib is None: try: # Firstly try to load the library in the directory where this file is located - #dir = os.path.dirname(os.path.realpath(__file__)) + # dir = os.path.dirname(os.path.realpath(__file__)) dir_ = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','drivers and libraries','toupcam')) - # to do: auto detect platform and architecture if sys.platform == 'win32': - cls.__lib = ctypes.windll.LoadLibrary(os.path.join(dir_, 'win', - 'x64','toupcam.dll')) + cls.__lib = ctypes.windll.LoadLibrary(os.path.join(dir_,'win','x64','toupcam.dll')) elif sys.platform.startswith('linux'): cls.__lib = ctypes.cdll.LoadLibrary(os.path.join(dir_,'linux','x64','libtoupcam.so')) else: - print("----------" + os.path.join(dir_,'mac','libtoupcam.dylib') + "----------") cls.__lib = ctypes.cdll.LoadLibrary(os.path.join(dir_,'mac','libtoupcam.dylib')) except OSError: pass if cls.__lib is None: if sys.platform == 'win32': - cls.__lib = ctypes.windll.LoadLibrary('toupcam.dll') + cls.__lib = ctypes.windll.LoadLibrary("drivers and libraries/toupcam/windows/toupcam.dll") elif sys.platform.startswith('linux'): cls.__lib = ctypes.cdll.LoadLibrary('libtoupcam.so') else: cls.__lib = ctypes.cdll.LoadLibrary('libtoupcam.dylib') + cls.__FrameInfoV4._fields_ = [ + ('v3', cls.__FrameInfoV3), + ('reserved', ctypes.c_uint), + ('uLum', ctypes.c_uint), + ('uFV', ctypes.c_longlong), + ('timecount', ctypes.c_longlong), + ('framecount', ctypes.c_uint), + ('tricount', ctypes.c_uint), + ('gps', cls.__Gps)] + + if sys.platform == 'win32': + cls.__ModelV2._fields_ = [ # camera model v2 win32 + ('name', ctypes.c_wchar_p), # model name, in Windows, we use unicode + ('flag', ctypes.c_ulonglong), # TOUPCAM_FLAG_xxx, 64 bits + ('maxspeed', ctypes.c_uint), # number of speed level, same as Toupcam_get_MaxSpeed(), the speed range = [0, maxspeed], closed interval + ('preview', ctypes.c_uint), # number of preview resolution, same as Toupcam_get_ResolutionNumber() + ('still', ctypes.c_uint), # number of still resolution, same as Toupcam_get_StillResolutionNumber() + ('maxfanspeed', ctypes.c_uint), # maximum fan speed, fan speed range = [0, max], closed interval + ('ioctrol', ctypes.c_uint), # number of input/output control + ('xpixsz', ctypes.c_float), # physical pixel size in micrometer + ('ypixsz', ctypes.c_float), # physical pixel size in micrometer + ('res', cls.__Resolution * 16)] + cls.__DeviceV2._fields_ = [ # win32 + ('displayname', ctypes.c_wchar * 64), # display name + ('id', ctypes.c_wchar * 64), # unique and opaque id of a connected camera, for Toupcam_Open + ('model', ctypes.POINTER(cls.__ModelV2))] + else: + cls.__ModelV2._fields_ = [ # camera model v2 linux/mac + ('name', ctypes.c_char_p), # model name + ('flag', ctypes.c_ulonglong), # TOUPCAM_FLAG_xxx, 64 bits + ('maxspeed', ctypes.c_uint), # number of speed level, same as Toupcam_get_MaxSpeed(), the speed range = [0, maxspeed], closed interval + ('preview', ctypes.c_uint), # number of preview resolution, same as Toupcam_get_ResolutionNumber() + ('still', ctypes.c_uint), # number of still resolution, same as Toupcam_get_StillResolutionNumber() + ('maxfanspeed', ctypes.c_uint), # maximum fan speed + ('ioctrol', ctypes.c_uint), # number of input/output control + ('xpixsz', ctypes.c_float), # physical pixel size in micrometer + ('ypixsz', ctypes.c_float), # physical pixel size in micrometer + ('res', cls.__Resolution * 16)] + cls.__DeviceV2._fields_ = [ # linux/mac + ('displayname', ctypes.c_char * 64), # display name + ('id', ctypes.c_char * 64), # unique and opaque id of a connected camera, for Toupcam_Open + ('model', ctypes.POINTER(cls.__ModelV2))] + cls.__lib.Toupcam_Version.argtypes = None cls.__lib.Toupcam_EnumV2.restype = ctypes.c_uint - cls.__lib.Toupcam_EnumV2.argtypes = [_DeviceV2 * TOUPCAM_MAX] + cls.__lib.Toupcam_EnumV2.argtypes = [cls.__DeviceV2 * TOUPCAM_MAX] cls.__lib.Toupcam_EnumWithName.restype = ctypes.c_uint - cls.__lib.Toupcam_EnumWithName.argtypes = [_DeviceV2 * TOUPCAM_MAX] + cls.__lib.Toupcam_EnumWithName.argtypes = [cls.__DeviceV2 * TOUPCAM_MAX] + cls.__lib.Toupcam_put_Name.restype = ctypes.c_int + cls.__lib.Toupcam_get_Name.restype = ctypes.c_int cls.__lib.Toupcam_Open.restype = ctypes.c_void_p cls.__lib.Toupcam_Replug.restype = ctypes.c_int cls.__lib.Toupcam_Update.restype = ctypes.c_int if sys.platform == 'win32': cls.__lib.Toupcam_Version.restype = ctypes.c_wchar_p + cls.__lib.Toupcam_put_Name.argtypes = [ctypes.c_wchar_p, ctypes.c_char_p] + cls.__lib.Toupcam_get_Name.argtypes = [ctypes.c_wchar_p, ctypes.c_char * 64] cls.__lib.Toupcam_Open.argtypes = [ctypes.c_wchar_p] cls.__lib.Toupcam_Replug.argtypes = [ctypes.c_wchar_p] cls.__lib.Toupcam_Update.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, cls.__PROGRESS_CALLBACK, ctypes.py_object] else: cls.__lib.Toupcam_Version.restype = ctypes.c_char_p + cls.__lib.Toupcam_put_Name.argtypes = [ctypes.c_char_p, ctypes.c_char_p] + cls.__lib.Toupcam_get_Name.argtypes = [ctypes.c_char_p, ctypes.c_char * 64] cls.__lib.Toupcam_Open.argtypes = [ctypes.c_char_p] cls.__lib.Toupcam_Replug.argtypes = [ctypes.c_char_p] cls.__lib.Toupcam_Update.argtypes = [ctypes.c_char_p, ctypes.c_char_p, cls.__PROGRESS_CALLBACK, ctypes.py_object] + cls.__lib.Toupcam_put_Name.errcheck = cls.__errcheck + cls.__lib.Toupcam_get_Name.errcheck = cls.__errcheck cls.__lib.Toupcam_Replug.errcheck = cls.__errcheck cls.__lib.Toupcam_Update.errcheck = cls.__errcheck + cls.__lib.Toupcam_set_Name.restype = ctypes.c_int + cls.__lib.Toupcam_set_Name.argtypes = [ctypes.c_void_p, ctypes.c_char_p] + cls.__lib.Toupcam_set_Name.errcheck = cls.__errcheck + cls.__lib.Toupcam_query_Name.restype = ctypes.c_int + cls.__lib.Toupcam_query_Name.argtypes = [ctypes.c_void_p, ctypes.c_char * 64] + cls.__lib.Toupcam_query_Name.errcheck = cls.__errcheck cls.__lib.Toupcam_OpenByIndex.restype = ctypes.c_void_p cls.__lib.Toupcam_OpenByIndex.argtypes = [ctypes.c_uint] cls.__lib.Toupcam_Close.restype = None @@ -1828,12 +2216,18 @@ def __initlib(cls): cls.__lib.Toupcam_StartPullModeWithCallback.restype = ctypes.c_int cls.__lib.Toupcam_StartPullModeWithCallback.errcheck = cls.__errcheck cls.__lib.Toupcam_StartPullModeWithCallback.argtypes = [ctypes.c_void_p, cls.__EVENT_CALLBACK, ctypes.py_object] + cls.__lib.Toupcam_PullImageV4.restype = ctypes.c_int + cls.__lib.Toupcam_PullImageV4.errcheck = cls.__errcheck + cls.__lib.Toupcam_PullImageV4.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.POINTER(cls.__FrameInfoV4)] + cls.__lib.Toupcam_WaitImageV4.restype = ctypes.c_int + cls.__lib.Toupcam_WaitImageV4.errcheck = cls.__errcheck + cls.__lib.Toupcam_WaitImageV4.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.POINTER(cls.__FrameInfoV4)] cls.__lib.Toupcam_PullImageV3.restype = ctypes.c_int cls.__lib.Toupcam_PullImageV3.errcheck = cls.__errcheck cls.__lib.Toupcam_PullImageV3.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.POINTER(cls.__FrameInfoV3)] cls.__lib.Toupcam_WaitImageV3.restype = ctypes.c_int cls.__lib.Toupcam_WaitImageV3.errcheck = cls.__errcheck - cls.__lib.Toupcam_WaitImageV3.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.POINTER(cls.__FrameInfoV3)] + cls.__lib.Toupcam_WaitImageV3.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.POINTER(cls.__FrameInfoV3)] cls.__lib.Toupcam_PullImageV2.restype = ctypes.c_int cls.__lib.Toupcam_PullImageV2.errcheck = cls.__errcheck cls.__lib.Toupcam_PullImageV2.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(cls.__FrameInfoV2)] @@ -1864,9 +2258,12 @@ def __initlib(cls): cls.__lib.Toupcam_Trigger.restype = ctypes.c_int cls.__lib.Toupcam_Trigger.errcheck = cls.__errcheck cls.__lib.Toupcam_Trigger.argtypes = [ctypes.c_void_p, ctypes.c_ushort] + cls.__lib.Toupcam_TriggerSyncV4.restype = ctypes.c_int + cls.__lib.Toupcam_TriggerSyncV4.errcheck = cls.__errcheck + cls.__lib.Toupcam_TriggerSyncV4.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.POINTER(cls.__FrameInfoV4)] cls.__lib.Toupcam_TriggerSync.restype = ctypes.c_int cls.__lib.Toupcam_TriggerSync.errcheck = cls.__errcheck - cls.__lib.Toupcam_TriggerSync.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.POINTER(cls.__FrameInfoV3)] + cls.__lib.Toupcam_TriggerSync.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int, ctypes.POINTER(cls.__FrameInfoV3)] cls.__lib.Toupcam_put_Size.restype = ctypes.c_int cls.__lib.Toupcam_put_Size.errcheck = cls.__errcheck cls.__lib.Toupcam_put_Size.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int] @@ -1981,6 +2378,9 @@ def __initlib(cls): cls.__lib.Toupcam_DfcOnce.restype = ctypes.c_int cls.__lib.Toupcam_DfcOnce.errcheck = cls.__errcheck cls.__lib.Toupcam_DfcOnce.argtypes = [ctypes.c_void_p] + cls.__lib.Toupcam_FpncOnce.restype = ctypes.c_int + cls.__lib.Toupcam_FpncOnce.errcheck = cls.__errcheck + cls.__lib.Toupcam_FpncOnce.argtypes = [ctypes.c_void_p] cls.__lib.Toupcam_FfcExport.restype = ctypes.c_int cls.__lib.Toupcam_FfcExport.errcheck = cls.__errcheck cls.__lib.Toupcam_FfcImport.restype = ctypes.c_int @@ -1989,16 +2389,24 @@ def __initlib(cls): cls.__lib.Toupcam_DfcExport.errcheck = cls.__errcheck cls.__lib.Toupcam_DfcImport.restype = ctypes.c_int cls.__lib.Toupcam_DfcImport.errcheck = cls.__errcheck + cls.__lib.Toupcam_FpncExport.restype = ctypes.c_int + cls.__lib.Toupcam_FpncExport.errcheck = cls.__errcheck + cls.__lib.Toupcam_FpncImport.restype = ctypes.c_int + cls.__lib.Toupcam_FpncImport.errcheck = cls.__errcheck if sys.platform == 'win32': cls.__lib.Toupcam_FfcExport.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p] cls.__lib.Toupcam_FfcImport.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p] cls.__lib.Toupcam_DfcExport.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p] cls.__lib.Toupcam_DfcImport.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p] + cls.__lib.Toupcam_FpncExport.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p] + cls.__lib.Toupcam_FpncImport.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p] else: cls.__lib.Toupcam_FfcExport.argtypes = [ctypes.c_void_p, ctypes.c_char_p] cls.__lib.Toupcam_FfcImport.argtypes = [ctypes.c_void_p, ctypes.c_char_p] cls.__lib.Toupcam_DfcExport.argtypes = [ctypes.c_void_p, ctypes.c_char_p] cls.__lib.Toupcam_DfcImport.argtypes = [ctypes.c_void_p, ctypes.c_char_p] + cls.__lib.Toupcam_FpncExport.argtypes = [ctypes.c_void_p, ctypes.c_char_p] + cls.__lib.Toupcam_FpncImport.argtypes = [ctypes.c_void_p, ctypes.c_char_p] cls.__lib.Toupcam_put_Hue.restype = ctypes.c_int cls.__lib.Toupcam_put_Hue.errcheck = cls.__errcheck cls.__lib.Toupcam_put_Hue.argtypes = [ctypes.c_void_p, ctypes.c_int] @@ -2185,15 +2593,50 @@ def __initlib(cls): cls.__lib.Toupcam_get_Option.restype = ctypes.c_int cls.__lib.Toupcam_get_Option.errcheck = cls.__errcheck cls.__lib.Toupcam_get_Option.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.POINTER(ctypes.c_int)] + cls.__lib.Toupcam_get_PixelFormatSupport.restype = ctypes.c_int + cls.__lib.Toupcam_get_PixelFormatSupport.errcheck = cls.__errcheck + cls.__lib.Toupcam_get_PixelFormatSupport.argtypes = [ctypes.c_void_p, ctypes.c_char, ctypes.POINTER(ctypes.c_int)] + cls.__lib.Toupcam_get_PixelFormatName.restype = ctypes.c_char_p + cls.__lib.Toupcam_get_PixelFormatName.argtypes = [ctypes.c_int] cls.__lib.Toupcam_put_Roi.restype = ctypes.c_int cls.__lib.Toupcam_put_Roi.errcheck = cls.__errcheck cls.__lib.Toupcam_put_Roi.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint] cls.__lib.Toupcam_get_Roi.restype = ctypes.c_int cls.__lib.Toupcam_get_Roi.errcheck = cls.__errcheck cls.__lib.Toupcam_get_Roi.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)] - cls.__lib.Toupcam_get_AfParam.restype = ctypes.c_int - cls.__lib.Toupcam_get_AfParam.errcheck = cls.__errcheck - cls.__lib.Toupcam_get_AfParam.argtypes = [ctypes.c_void_p, ctypes.POINTER(cls.__AfParam)] + cls.__lib.Toupcam_put_RoiN.restype = ctypes.c_int + cls.__lib.Toupcam_put_RoiN.errcheck = cls.__errcheck + cls.__lib.Toupcam_put_RoiN.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.c_uint] + cls.__lib.Toupcam_put_XY.restype = ctypes.c_int + cls.__lib.Toupcam_put_XY.errcheck = cls.__errcheck + cls.__lib.Toupcam_put_XY.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int] + cls.__lib.Toupcam_put_SelfTrigger.restype = ctypes.c_int + cls.__lib.Toupcam_put_SelfTrigger.errcheck = cls.__errcheck + cls.__lib.Toupcam_put_SelfTrigger.argtypes = [ctypes.c_void_p, ctypes.POINTER(cls.__SelfTrigger)] + cls.__lib.Toupcam_get_SelfTrigger.restype = ctypes.c_int + cls.__lib.Toupcam_get_SelfTrigger.errcheck = cls.__errcheck + cls.__lib.Toupcam_get_SelfTrigger.argtypes = [ctypes.c_void_p, ctypes.POINTER(cls.__SelfTrigger)] + cls.__lib.Toupcam_get_LensInfo.restype = ctypes.c_int + cls.__lib.Toupcam_get_LensInfo.errcheck = cls.__errcheck + cls.__lib.Toupcam_get_LensInfo.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint)] + cls.__lib.Toupcam_get_AFState.restype = ctypes.c_int + cls.__lib.Toupcam_get_AFState.errcheck = cls.__errcheck + cls.__lib.Toupcam_get_AFState.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint)] + cls.__lib.Toupcam_put_AFMode.restype = ctypes.c_int + cls.__lib.Toupcam_put_AFMode.errcheck = cls.__errcheck + cls.__lib.Toupcam_put_AFMode.argtypes = [ctypes.c_void_p, ctypes.c_uint] + cls.__lib.Toupcam_put_AFRoi.restype = ctypes.c_int + cls.__lib.Toupcam_put_AFRoi.errcheck = cls.__errcheck + cls.__lib.Toupcam_put_AFRoi.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint] + cls.__lib.Toupcam_put_AFAperture.restype = ctypes.c_int + cls.__lib.Toupcam_put_AFAperture.errcheck = cls.__errcheck + cls.__lib.Toupcam_put_AFAperture.argtypes = [ctypes.c_void_p, ctypes.c_int] + cls.__lib.Toupcam_put_AFFMPos.restype = ctypes.c_int + cls.__lib.Toupcam_put_AFFMPos.errcheck = cls.__errcheck + cls.__lib.Toupcam_put_AFFMPos.argtypes = [ctypes.c_void_p, ctypes.c_int] + cls.__lib.Toupcam_get_FocusMotor.restype = ctypes.c_int + cls.__lib.Toupcam_get_FocusMotor.errcheck = cls.__errcheck + cls.__lib.Toupcam_get_FocusMotor.argtypes = [ctypes.c_void_p, ctypes.POINTER(cls.__FocusMotor)] cls.__lib.Toupcam_IoControl.restype = ctypes.c_int cls.__lib.Toupcam_IoControl.errcheck = cls.__errcheck cls.__lib.Toupcam_IoControl.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int)] @@ -2233,4 +2676,4 @@ def __initlib(cls): cls.__lib.Toupcam_TempTint2Gain.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int * 3]; if sys.platform != 'win32' and sys.platform != 'android': cls.__lib.Toupcam_HotPlug.restype = None - cls.__lib.Toupcam_HotPlug.argtypes = [cls.__HOTPLUG_CALLBACK, ctypes.c_void_p] + cls.__lib.Toupcam_HotPlug.argtypes = [cls.__HOTPLUG_CALLBACK, ctypes.c_void_p] \ No newline at end of file diff --git a/software/control/utils.py b/software/control/utils.py index 0f0c061f0..f15ff3a8a 100644 --- a/software/control/utils.py +++ b/software/control/utils.py @@ -2,6 +2,7 @@ from numpy import std, square, mean import numpy as np from scipy.ndimage import label +import os def crop_image(image,crop_width,crop_height): image_height = image.shape[0] @@ -138,5 +139,6 @@ def interpolate_plane(triple1, triple2, triple3, point): return z - - +def create_done_file(path): + with open(os.path.join(path,'.done'), 'w') as file: + pass # This creates an empty file diff --git a/software/control/utils_config.py b/software/control/utils_config.py index 9f23903dc..e2fbe15af 100644 --- a/software/control/utils_config.py +++ b/software/control/utils_config.py @@ -14,34 +14,11 @@ def generate_default_configuration(filename): mode_1.set('ZOffset','0.0') mode_1.set('PixelFormat','default') mode_1.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') - - mode_2 = ET.SubElement(top,'mode') - mode_2.set('ID','2') - mode_2.set('Name','BF LED matrix left half') - mode_2.set('ExposureTime','16') - mode_2.set('AnalogGain','0') - mode_2.set('IlluminationSource','1') - mode_2.set('IlluminationIntensity','5') - mode_2.set('CameraSN','') - mode_2.set('ZOffset','0.0') - mode_2.set('PixelFormat','default') - mode_2.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') - - mode_3 = ET.SubElement(top,'mode') - mode_3.set('ID','3') - mode_3.set('Name','BF LED matrix right half') - mode_3.set('ExposureTime','16') - mode_3.set('AnalogGain','0') - mode_3.set('IlluminationSource','2') - mode_3.set('IlluminationIntensity','5') - mode_3.set('CameraSN','') - mode_3.set('ZOffset','0.0') - mode_3.set('PixelFormat','default') - mode_3.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_1.set('EmissionFilterPosition','1') mode_4 = ET.SubElement(top,'mode') mode_4.set('ID','4') - mode_4.set('Name','BF LED matrix color PDAF') + mode_4.set('Name','DF LED matrix') mode_4.set('ExposureTime','22') mode_4.set('AnalogGain','0') mode_4.set('IlluminationSource','3') @@ -50,6 +27,7 @@ def generate_default_configuration(filename): mode_4.set('ZOffset','0.0') mode_4.set('PixelFormat','default') mode_4.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_4.set('EmissionFilterPosition','1') mode_5 = ET.SubElement(top,'mode') mode_5.set('ID','5') @@ -62,6 +40,7 @@ def generate_default_configuration(filename): mode_5.set('ZOffset','0.0') mode_5.set('PixelFormat','default') mode_5.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_5.set('EmissionFilterPosition','1') mode_6 = ET.SubElement(top,'mode') mode_6.set('ID','6') @@ -74,6 +53,7 @@ def generate_default_configuration(filename): mode_6.set('ZOffset','0.0') mode_6.set('PixelFormat','default') mode_6.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_6.set('EmissionFilterPosition','1') mode_7 = ET.SubElement(top,'mode') mode_7.set('ID','7') @@ -86,6 +66,7 @@ def generate_default_configuration(filename): mode_7.set('ZOffset','0.0') mode_7.set('PixelFormat','default') mode_7.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_7.set('EmissionFilterPosition','1') mode_8 = ET.SubElement(top,'mode') mode_8.set('ID','8') @@ -98,6 +79,7 @@ def generate_default_configuration(filename): mode_8.set('ZOffset','0.0') mode_8.set('PixelFormat','default') mode_8.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_8.set('EmissionFilterPosition','1') mode_12 = ET.SubElement(top,'mode') mode_12.set('ID','12') @@ -110,6 +92,7 @@ def generate_default_configuration(filename): mode_12.set('ZOffset','0.0') mode_12.set('PixelFormat','default') mode_12.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_12.set('EmissionFilterPosition','1') mode_9 = ET.SubElement(top,'mode') mode_9.set('ID','9') @@ -122,30 +105,57 @@ def generate_default_configuration(filename): mode_9.set('ZOffset','0.0') mode_9.set('PixelFormat','default') mode_9.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_9.set('EmissionFilterPosition','1') + + # mode_10 = ET.SubElement(top,'mode') + # mode_10.set('ID','10') + # mode_10.set('Name','BF LED matrix left dot') + # mode_10.set('ExposureTime','20') + # mode_10.set('AnalogGain','0') + # mode_10.set('IlluminationSource','5') + # mode_10.set('IlluminationIntensity','20') + # mode_10.set('CameraSN','') + # mode_10.set('ZOffset','0.0') + # mode_10.set('PixelFormat','default') + # mode_10.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + + # mode_11 = ET.SubElement(top,'mode') + # mode_11.set('ID','11') + # mode_11.set('Name','BF LED matrix right dot') + # mode_11.set('ExposureTime','20') + # mode_11.set('AnalogGain','0') + # mode_11.set('IlluminationSource','6') + # mode_11.set('IlluminationIntensity','20') + # mode_11.set('CameraSN','') + # mode_11.set('ZOffset','0.0') + # mode_11.set('PixelFormat','default') + # mode_11.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') - mode_10 = ET.SubElement(top,'mode') - mode_10.set('ID','10') - mode_10.set('Name','BF LED matrix left dot') - mode_10.set('ExposureTime','20') - mode_10.set('AnalogGain','0') - mode_10.set('IlluminationSource','5') - mode_10.set('IlluminationIntensity','20') - mode_10.set('CameraSN','') - mode_10.set('ZOffset','0.0') - mode_10.set('PixelFormat','default') - mode_10.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') - - mode_11 = ET.SubElement(top,'mode') - mode_11.set('ID','11') - mode_11.set('Name','BF LED matrix right dot') - mode_11.set('ExposureTime','20') - mode_11.set('AnalogGain','0') - mode_11.set('IlluminationSource','6') - mode_11.set('IlluminationIntensity','20') - mode_11.set('CameraSN','') - mode_11.set('ZOffset','0.0') - mode_11.set('PixelFormat','default') - mode_11.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_2 = ET.SubElement(top,'mode') + mode_2.set('ID','2') + mode_2.set('Name','BF LED matrix left half') + mode_2.set('ExposureTime','16') + mode_2.set('AnalogGain','0') + mode_2.set('IlluminationSource','1') + mode_2.set('IlluminationIntensity','5') + mode_2.set('CameraSN','') + mode_2.set('ZOffset','0.0') + mode_2.set('PixelFormat','default') + mode_2.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_2.set('EmissionFilterPosition','1') + + mode_3 = ET.SubElement(top,'mode') + mode_3.set('ID','3') + mode_3.set('Name','BF LED matrix right half') + mode_3.set('ExposureTime','16') + mode_3.set('AnalogGain','0') + mode_3.set('IlluminationSource','2') + mode_3.set('IlluminationIntensity','5') + mode_3.set('CameraSN','') + mode_3.set('ZOffset','0.0') + mode_3.set('PixelFormat','default') + mode_3.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_3.set('EmissionFilterPosition','1') mode_12 = ET.SubElement(top,'mode') mode_12.set('ID','12') @@ -158,6 +168,7 @@ def generate_default_configuration(filename): mode_12.set('ZOffset','0.0') mode_12.set('PixelFormat','default') mode_12.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_12.set('EmissionFilterPosition','1') mode_13 = ET.SubElement(top,'mode') mode_13.set('ID','13') @@ -170,6 +181,59 @@ def generate_default_configuration(filename): mode_13.set('ZOffset','0.0') mode_13.set('PixelFormat','default') mode_13.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_13.set('EmissionFilterPosition','1') + + mode_14 = ET.SubElement(top,'mode') + mode_14.set('ID','1') + mode_14.set('Name','BF LED matrix full_R') + mode_14.set('ExposureTime','12') + mode_14.set('AnalogGain','0') + mode_14.set('IlluminationSource','0') + mode_14.set('IlluminationIntensity','5') + mode_14.set('CameraSN','') + mode_14.set('ZOffset','0.0') + mode_14.set('PixelFormat','default') + mode_14.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_14.set('EmissionFilterPosition','1') + + mode_15 = ET.SubElement(top,'mode') + mode_15.set('ID','1') + mode_15.set('Name','BF LED matrix full_G') + mode_15.set('ExposureTime','12') + mode_15.set('AnalogGain','0') + mode_15.set('IlluminationSource','0') + mode_15.set('IlluminationIntensity','5') + mode_15.set('CameraSN','') + mode_15.set('ZOffset','0.0') + mode_15.set('PixelFormat','default') + mode_15.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_15.set('EmissionFilterPosition','1') + + mode_16 = ET.SubElement(top,'mode') + mode_16.set('ID','1') + mode_16.set('Name','BF LED matrix full_B') + mode_16.set('ExposureTime','12') + mode_16.set('AnalogGain','0') + mode_16.set('IlluminationSource','0') + mode_16.set('IlluminationIntensity','5') + mode_16.set('CameraSN','') + mode_16.set('ZOffset','0.0') + mode_16.set('PixelFormat','default') + mode_16.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_16.set('EmissionFilterPosition','1') + + mode_21 = ET.SubElement(top,'mode') + mode_21.set('ID','21') + mode_21.set('Name','BF LED matrix full_RGB') + mode_21.set('ExposureTime','12') + mode_21.set('AnalogGain','0') + mode_21.set('IlluminationSource','0') + mode_21.set('IlluminationIntensity','5') + mode_21.set('CameraSN','') + mode_21.set('ZOffset','0.0') + mode_21.set('PixelFormat','default') + mode_21.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_21.set('EmissionFilterPosition','1') mode_20 = ET.SubElement(top,'mode') mode_20.set('ID','20') @@ -182,6 +246,7 @@ def generate_default_configuration(filename): mode_20.set('ZOffset','0.0') mode_20.set('PixelFormat','default') mode_20.set('_PixelFormat_options','[default,MONO8,MONO12,MONO14,MONO16,BAYER_RG8,BAYER_RG12]') + mode_20.set('EmissionFilterPosition','1') tree = ET.ElementTree(top) tree.write(filename,encoding="utf-8", xml_declaration=True, pretty_print=True) diff --git a/software/control/widgets.py b/software/control/widgets.py index 42dc86264..44d26f370 100644 --- a/software/control/widgets.py +++ b/software/control/widgets.py @@ -1,22 +1,32 @@ +import os +import sys + # set QT_API environment variable -import os os.environ["QT_API"] = "pyqt5" -import qtpy - -import locale # qt libraries +import qtpy from qtpy.QtCore import * from qtpy.QtWidgets import * from qtpy.QtGui import * import pyqtgraph as pg - import pandas as pd - +import napari +from napari.utils.colormaps import Colormap, AVAILABLE_COLORMAPS +import re +import cv2 +import math +import locale +import time from datetime import datetime - +import itertools +import numpy as np +from scipy.spatial import Delaunay +import shutil from control._def import * +from PIL import Image, ImageDraw, ImageFont + class WrapperWindow(QMainWindow): def __init__(self, content_widget, *args, **kwargs): @@ -31,6 +41,7 @@ def closeEvent(self, event): def closeForReal(self, event): super().closeEvent(event) + class CollapsibleGroupBox(QGroupBox): def __init__(self, title): super(CollapsibleGroupBox,self).__init__(title) @@ -48,12 +59,13 @@ def __init__(self, title): def toggle_content(self,state): self.content_widget.setVisible(state) + class ConfigEditorForAcquisitions(QDialog): def __init__(self, configManager, only_z_offset=True): super().__init__() self.config = configManager - + self.only_z_offset=only_z_offset self.scroll_area = QScrollArea() @@ -179,7 +191,6 @@ def load_config_from_file(self,only_z_offset=None): self.init_ui(only_z_offset) - class ConfigEditor(QDialog): def __init__(self, config): super().__init__() @@ -224,7 +235,7 @@ def init_ui(self): for option in self.config.options(section): if option.startswith('_') and option.endswith('_options'): - continue + continue option_value = self.config.get(section, option) option_name = QLabel(option) option_layout = QHBoxLayout() @@ -290,7 +301,7 @@ def __init__(self, config, original_filepath, main_window): super().__init__(config) self.original_filepath = original_filepath self.main_window = main_window - + self.apply_exit_button = QPushButton("Apply and Exit") self.apply_exit_button.clicked.connect(self.apply_and_exit) @@ -306,88 +317,142 @@ def apply_and_exit(self): pass self.close() + class SpinningDiskConfocalWidget(QWidget): def __init__(self, xlight, config_manager=None): super(SpinningDiskConfocalWidget,self).__init__() - + self.config_manager = config_manager self.xlight = xlight self.init_ui() - + self.dropdown_emission_filter.setCurrentText(str(self.xlight.get_emission_filter())) self.dropdown_dichroic.setCurrentText(str(self.xlight.get_dichroic())) self.dropdown_emission_filter.currentIndexChanged.connect(self.set_emission_filter) self.dropdown_dichroic.currentIndexChanged.connect(self.set_dichroic) - - self.disk_position_state = self.xlight.get_disk_position() + + self.disk_position_state = self.xlight.get_disk_position() if self.disk_position_state == 1: self.btn_toggle_widefield.setText("Switch to Widefield") if self.config_manager is not None: - if self.disk_position_state ==1: + if self.disk_position_state == 1: self.config_manager.config_filename = "confocal_configurations.xml" else: self.config_manager.config_filename = "widefield_configurations.xml" - self.config_manager.configurations = [] + self.config_manager.configurations = [] self.config_manager.read_configurations() - - self.btn_toggle_widefield.clicked.connect(self.toggle_disk_position) + self.btn_toggle_widefield.clicked.connect(self.toggle_disk_position) self.btn_toggle_motor.clicked.connect(self.toggle_motor) + self.slider_illumination_iris.valueChanged.connect(self.update_illumination_iris) + self.spinbox_illumination_iris.valueChanged.connect(self.update_illumination_iris) + self.slider_emission_iris.valueChanged.connect(self.update_emission_iris) + self.spinbox_emission_iris.valueChanged.connect(self.update_emission_iris) + self.dropdown_filter_slider.valueChanged.connect(self.set_filter_slider) + def init_ui(self): - - emissionFilterLayout = QHBoxLayout() - emissionFilterLayout.addWidget(QLabel("Emission Filter Position")) + emissionFilterLayout = QHBoxLayout() + emissionFilterLayout.addWidget(QLabel("Emission Position")) self.dropdown_emission_filter = QComboBox(self) self.dropdown_emission_filter.addItems([str(i+1) for i in range(8)]) - emissionFilterLayout.addWidget(self.dropdown_emission_filter) - dichroicLayout = QHBoxLayout() dichroicLayout.addWidget(QLabel("Dichroic Position")) - self.dropdown_dichroic = QComboBox(self) self.dropdown_dichroic.addItems([str(i+1) for i in range(5)]) - dichroicLayout.addWidget(self.dropdown_dichroic) - dropdownLayout = QVBoxLayout() - - dropdownLayout.addLayout(dichroicLayout) - dropdownLayout.addLayout(emissionFilterLayout) - dropdownLayout.addStretch() - + illuminationIrisLayout = QHBoxLayout() + illuminationIrisLayout.addWidget(QLabel("Illumination Iris")) + self.slider_illumination_iris = QSlider(Qt.Horizontal) + self.slider_illumination_iris.setRange(0, 100) + self.spinbox_illumination_iris = QSpinBox() + self.spinbox_illumination_iris.setRange(0, 100) + self.spinbox_illumination_iris.setKeyboardTracking(False) + illuminationIrisLayout.addWidget(self.slider_illumination_iris) + illuminationIrisLayout.addWidget(self.spinbox_illumination_iris) + + emissionIrisLayout = QHBoxLayout() + emissionIrisLayout.addWidget(QLabel("Emission Iris")) + self.slider_emission_iris = QSlider(Qt.Horizontal) + self.slider_emission_iris.setRange(0, 100) + self.spinbox_emission_iris = QSpinBox() + self.spinbox_emission_iris.setRange(0, 100) + self.spinbox_emission_iris.setKeyboardTracking(False) + emissionIrisLayout.addWidget(self.slider_emission_iris) + emissionIrisLayout.addWidget(self.spinbox_emission_iris) + + filterSliderLayout = QHBoxLayout() + filterSliderLayout.addWidget(QLabel("Filter Slider")) + #self.dropdown_filter_slider = QComboBox(self) + #self.dropdown_filter_slider.addItems(["0", "1", "2", "3"]) + self.dropdown_filter_slider = QSlider(Qt.Horizontal) + self.dropdown_filter_slider.setRange(0, 3) + self.dropdown_filter_slider.setTickPosition(QSlider.TicksBelow) + self.dropdown_filter_slider.setTickInterval(1) + filterSliderLayout.addWidget(self.dropdown_filter_slider) self.btn_toggle_widefield = QPushButton("Switch to Confocal") self.btn_toggle_motor = QPushButton("Disk Motor On") self.btn_toggle_motor.setCheckable(True) - layout = QVBoxLayout(self) - layout.addWidget(self.btn_toggle_motor) - - layout.addWidget(self.btn_toggle_widefield) - layout.addLayout(dropdownLayout) + layout = QGridLayout(self) + + # row 1 + if self.xlight.has_dichroic_filter_slider: + layout.addLayout(filterSliderLayout,0,0,1,2) + layout.addWidget(self.btn_toggle_motor,0,2) + layout.addWidget(self.btn_toggle_widefield,0,3) + + # row 2 + if self.xlight.has_dichroic_filters_wheel: + layout.addWidget(QLabel("Dichroic Filter Wheel"),1,0) + layout.addWidget(self.dropdown_dichroic,1,1) + if self.xlight.has_illumination_iris_diaphragm: + layout.addLayout(illuminationIrisLayout,1,2,1,2) + + # row 3 + if self.xlight.has_emission_filters_wheel: + layout.addWidget(QLabel("Emission Filter Wheel"),2,0) + layout.addWidget(self.dropdown_emission_filter,2,1) + if self.xlight.has_emission_iris_diaphragm: + layout.addLayout(emissionIrisLayout,2,2,1,2) + + layout.setColumnStretch(2,1) + layout.setColumnStretch(3,1) self.setLayout(layout) + def disable_all_buttons(self): self.dropdown_emission_filter.setEnabled(False) self.dropdown_dichroic.setEnabled(False) self.btn_toggle_widefield.setEnabled(False) self.btn_toggle_motor.setEnabled(False) + self.slider_illumination_iris.setEnabled(False) + self.spinbox_illumination_iris.setEnabled(False) + self.slider_emission_iris.setEnabled(False) + self.spinbox_emission_iris.setEnabled(False) + self.dropdown_filter_slider.setEnabled(False) def enable_all_buttons(self): self.dropdown_emission_filter.setEnabled(True) self.dropdown_dichroic.setEnabled(True) self.btn_toggle_widefield.setEnabled(True) self.btn_toggle_motor.setEnabled(True) + self.slider_illumination_iris.setEnabled(True) + self.spinbox_illumination_iris.setEnabled(True) + self.slider_emission_iris.setEnabled(True) + self.spinbox_emission_iris.setEnabled(True) + self.dropdown_filter_slider.setEnabled(True) def toggle_disk_position(self): self.disable_all_buttons() @@ -402,7 +467,7 @@ def toggle_disk_position(self): self.config_manager.config_filename = "confocal_configurations.xml" else: self.config_manager.config_filename = "widefield_configurations.xml" - self.config_manager.configurations = [] + self.config_manager.configurations = [] self.config_manager.read_configurations() self.enable_all_buttons() @@ -419,48 +484,60 @@ def set_emission_filter(self, index): selected_pos = self.dropdown_emission_filter.currentText() self.xlight.set_emission_filter(selected_pos) self.enable_all_buttons() - + def set_dichroic(self, index): self.disable_all_buttons() selected_pos = self.dropdown_dichroic.currentText() self.xlight.set_dichroic(selected_pos) self.enable_all_buttons() - + + def update_illumination_iris(self, value): + self.disable_all_buttons() + # Update both slider and spinbox to ensure they're in sync + self.slider_illumination_iris.setValue(value) + self.spinbox_illumination_iris.setValue(value) + self.xlight.set_illumination_iris(value) + self.enable_all_buttons() + + def update_emission_iris(self, value): + self.disable_all_buttons() + # Update both slider and spinbox to ensure they're in sync + self.slider_emission_iris.setValue(value) + self.spinbox_emission_iris.setValue(value) + self.xlight.set_emission_iris(value) + self.enable_all_buttons() + + def set_filter_slider(self, index): + self.disable_all_buttons() + position = str(self.dropdown_filter_slider.value()) + self.xlight.set_filter_slider(position) + self.enable_all_buttons() + + class ObjectivesWidget(QWidget): + signal_objective_changed = Signal() + def __init__(self, objective_store): super(ObjectivesWidget, self).__init__() - self.objectiveStore = objective_store - self.init_ui() - self.dropdown.setCurrentText(self.objectiveStore.current_objective) def init_ui(self): - # Dropdown for selecting keys self.dropdown = QComboBox(self) + self.dropdown.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) self.dropdown.addItems(self.objectiveStore.objectives_dict.keys()) - self.dropdown.currentIndexChanged.connect(self.display_objective) - - # TextBrowser to display key-value pairs - #self.text_browser = QTextBrowser(self) - # Layout - dropdownLayout = QHBoxLayout() - dropdownLabel = QLabel("Objectives:") - dropdownLayout.addWidget(dropdownLabel) - dropdownLayout.addWidget(self.dropdown) - #textLayout = QHBoxLayout() - #textLayout.addWidget(self.text_browser) - layout = QVBoxLayout(self) - layout.addLayout(dropdownLayout) - #layout.addLayout(textLayout) + self.dropdown.currentTextChanged.connect(self.on_objective_changed) + + layout = QHBoxLayout() + layout.addWidget(QLabel("Objective Lens")) + layout.addWidget(self.dropdown) + self.setLayout(layout) + + def on_objective_changed(self, objective_name): + self.objectiveStore.set_current_objective(objective_name) + self.signal_objective_changed.emit() - def display_objective(self, index): - selected_key = self.dropdown.currentText() - objective_data = self.objectiveStore.objectives_dict.get(selected_key, {}) - #text = "\n".join([f"{key}: {value}" for key, value in objective_data.items()]) - self.objectiveStore.current_objective = selected_key - #self.text_browser.setPlainText(text) class FocusMapWidget(QWidget): @@ -486,7 +563,7 @@ def init_ui(self): button_layout.addWidget(self.btn_clear_focusmap) layout.addLayout(button_layout) - + layout.addWidget(self.btn_enable_focusmap) self.setLayout(layout) @@ -532,8 +609,6 @@ def update_focusmap_display(self): except IndexError: pass - - def enable_focusmap(self): self.disable_all_buttons() if self.autofocusController.use_focus_map == False: @@ -555,29 +630,30 @@ def add_to_focusmap(self): self.update_focusmap_display() self.enable_all_buttons() + class CameraSettingsWidget(QFrame): - def __init__(self, camera, include_gain_exposure_time = True, include_camera_temperature_setting = False, main=None, *args, **kwargs): + def __init__(self, camera, include_gain_exposure_time = False, include_camera_temperature_setting = False, include_camera_auto_wb_setting = False, main=None, *args, **kwargs): super().__init__(*args, **kwargs) self.camera = camera - self.add_components(include_gain_exposure_time,include_camera_temperature_setting) + self.add_components(include_gain_exposure_time,include_camera_temperature_setting,include_camera_auto_wb_setting) # set frame style self.setFrameStyle(QFrame.Panel | QFrame.Raised) - def add_components(self,include_gain_exposure_time,include_camera_temperature_setting): + def add_components(self,include_gain_exposure_time,include_camera_temperature_setting,include_camera_auto_wb_setting): # add buttons and input fields self.entry_exposureTime = QDoubleSpinBox() - self.entry_exposureTime.setMinimum(self.camera.EXPOSURE_TIME_MS_MIN) - self.entry_exposureTime.setMaximum(self.camera.EXPOSURE_TIME_MS_MAX) + self.entry_exposureTime.setMinimum(self.camera.EXPOSURE_TIME_MS_MIN) + self.entry_exposureTime.setMaximum(self.camera.EXPOSURE_TIME_MS_MAX) self.entry_exposureTime.setSingleStep(1) self.entry_exposureTime.setValue(20) self.camera.set_exposure_time(20) self.entry_analogGain = QDoubleSpinBox() - self.entry_analogGain.setMinimum(self.camera.GAIN_MIN) - self.entry_analogGain.setMaximum(self.camera.GAIN_MAX) + self.entry_analogGain.setMinimum(self.camera.GAIN_MIN) + self.entry_analogGain.setMaximum(self.camera.GAIN_MAX) self.entry_analogGain.setSingleStep(self.camera.GAIN_STEP) self.entry_analogGain.setValue(0) self.camera.set_analog_gain(0) @@ -590,6 +666,7 @@ def add_components(self,include_gain_exposure_time,include_camera_temperature_se print("setting camera's default pixel format") self.camera.set_pixel_format(DEFAULT_PIXEL_FORMAT) self.dropdown_pixelFormat.setCurrentText(DEFAULT_PIXEL_FORMAT) + self.dropdown_pixelFormat.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)) # to do: load and save pixel format in configurations self.entry_ROI_offset_x = QSpinBox() @@ -638,55 +715,104 @@ def add_components(self,include_gain_exposure_time,include_camera_temperature_se self.entry_ROI_width.valueChanged.connect(self.set_Width) # layout - grid_ctrl = QGridLayout() + self.camera_layout = QVBoxLayout() if include_gain_exposure_time: - grid_ctrl.addWidget(QLabel('Exposure Time (ms)'), 0,0) - grid_ctrl.addWidget(self.entry_exposureTime, 0,1) - grid_ctrl.addWidget(QLabel('Analog Gain'), 1,0) - grid_ctrl.addWidget(self.entry_analogGain, 1,1) - grid_ctrl.addWidget(QLabel('Pixel Format'), 2,0) - grid_ctrl.addWidget(self.dropdown_pixelFormat, 2,1) + exposure_line = QHBoxLayout() + exposure_line.addWidget(QLabel('Exposure Time (ms)')) + exposure_line.addWidget(self.entry_exposureTime) + self.camera_layout.addLayout(exposure_line) + gain_line = QHBoxLayout() + gain_line.addWidget(QLabel('Analog Gain')) + gain_line.addWidget(self.entry_analogGain) + self.camera_layout.addLayout(gain_line) + + format_line = QHBoxLayout() + format_line.addWidget(QLabel('Pixel Format')) + format_line.addWidget(self.dropdown_pixelFormat) try: current_res = self.camera.resolution current_res_string = "x".join([str(current_res[0]),str(current_res[1])]) - res_options = [f"{res[0]}x{res[1]}" for res in self.camera.res_list] + res_options = [f"{res[0]} x {res[1]}" for res in self.camera.res_list] self.dropdown_res = QComboBox() self.dropdown_res.addItems(res_options) self.dropdown_res.setCurrentText(current_res_string) self.dropdown_res.currentTextChanged.connect(self.change_full_res) - grid_ctrl.addWidget(QLabel("Full Resolution"), 2,2) - grid_ctrl.addWidget(self.dropdown_res, 2,3) - except AttributeError: + except AttributeError as ae: + print(ae) + self.dropdown_res = QComboBox() + self.dropdown_res.setEnabled(False) pass + format_line.addWidget(QLabel(" FOV Resolution")) + format_line.addWidget(self.dropdown_res) + self.camera_layout.addLayout(format_line) + if include_camera_temperature_setting: - grid_ctrl.addWidget(QLabel('Set Temperature (C)'),3,0) - grid_ctrl.addWidget(self.entry_temperature,3,1) - grid_ctrl.addWidget(QLabel('Actual Temperature (C)'),3,2) - grid_ctrl.addWidget(self.label_temperature_measured,3,3) + temp_line = QHBoxLayout() + temp_line.addWidget(QLabel('Set Temperature (C)')) + temp_line.addWidget(self.entry_temperature) + temp_line.addWidget(QLabel('Actual Temperature (C)')) + temp_line.addWidget(self.label_temperature_measured) try: self.entry_temperature.valueChanged.connect(self.set_temperature) self.camera.set_temperature_reading_callback(self.update_measured_temperature) except AttributeError: pass + self.camera_layout.addLayout(temp_line) + + roi_line = QHBoxLayout() + roi_line.addWidget(QLabel('Height')) + roi_line.addWidget(self.entry_ROI_height) + roi_line.addStretch() + roi_line.addWidget(QLabel('Y-offset')) + roi_line.addWidget(self.entry_ROI_offset_y) + roi_line.addStretch() + roi_line.addWidget(QLabel('Width')) + roi_line.addWidget(self.entry_ROI_width) + roi_line.addStretch() + roi_line.addWidget(QLabel('X-offset')) + roi_line.addWidget(self.entry_ROI_offset_x) + self.camera_layout.addLayout(roi_line) + + if DISPLAY_TOUPCAMER_BLACKLEVEL_SETTINGS is True: + blacklevel_line = QHBoxLayout() + blacklevel_line.addWidget(QLabel('Black Level')) + + self.label_blackLevel = QSpinBox() + self.label_blackLevel.setMinimum(0) + self.label_blackLevel.setMaximum(31) + self.label_blackLevel.valueChanged.connect(self.update_blacklevel) + self.label_blackLevel.setSuffix(" ") + + blacklevel_line.addWidget(self.label_blackLevel) + + self.camera_layout.addLayout(blacklevel_line) + + if include_camera_auto_wb_setting: + is_color = False + try: + is_color = self.camera.get_is_color() + except AttributeError: + pass - hbox1 = QHBoxLayout() - hbox1.addWidget(QLabel('ROI')) - hbox1.addStretch() - hbox1.addWidget(QLabel('height')) - hbox1.addWidget(self.entry_ROI_height) - hbox1.addWidget(QLabel('width')) - hbox1.addWidget(self.entry_ROI_width) - - hbox1.addWidget(QLabel('offset y')) - hbox1.addWidget(self.entry_ROI_offset_y) - hbox1.addWidget(QLabel('offset x')) - hbox1.addWidget(self.entry_ROI_offset_x) + if is_color is True: + # auto white balance + self.btn_auto_wb = QPushButton('Auto White Balance') + self.btn_auto_wb.setCheckable(True) + self.btn_auto_wb.setChecked(False) + self.btn_auto_wb.clicked.connect(self.toggle_auto_wb) + print(self.camera.get_balance_white_auto()) - self.grid = QGridLayout() - self.grid.addLayout(grid_ctrl,0,0) - self.grid.addLayout(hbox1,1,0) - self.setLayout(self.grid) + self.camera_layout.addLayout(grid_camera_setting_wb) + + self.setLayout(self.camera_layout) + + def toggle_auto_wb(self,pressed): + # 0: OFF 1:CONTINUOUS 2:ONCE + if pressed: + self.camera.set_balance_white_auto(1) + else: + self.camera.set_balance_white_auto(0) def set_exposure_time(self,exposure_time): self.entry_exposureTime.setValue(exposure_time) @@ -745,7 +871,7 @@ def change_full_res(self, index): self.entry_ROI_offset_x.setMaximum(self.camera.WidthMax) self.entry_ROI_offset_y.setMaximum(self.camera.HeightMax) - + self.entry_ROI_offset_x.setValue(int(8*self.camera.OffsetX//8)) self.entry_ROI_offset_y.setValue(int(8*self.camera.OffsetY//8)) self.entry_ROI_height.setValue(int(8*self.camera.Height//8)) @@ -756,12 +882,22 @@ def change_full_res(self, index): self.entry_ROI_height.blockSignals(False) self.entry_ROI_width.blockSignals(False) + def update_blacklevel(self, blacklevel): + try: + self.camera.set_blacklevel(blacklevel) + except AttributeError: + pass + class LiveControlWidget(QFrame): + signal_newExposureTime = Signal(float) signal_newAnalogGain = Signal(float) signal_autoLevelSetting = Signal(bool) - def __init__(self, streamHandler, liveController, configurationManager=None, show_trigger_options=True, show_display_options=True, show_autolevel = False, autolevel=False, main=None, *args, **kwargs): + signal_live_configuration = Signal(object) + signal_start_live = Signal() + + def __init__(self, streamHandler, liveController, configurationManager=None, show_trigger_options=True, show_display_options=False, show_autolevel = False, autolevel=False, stretch=True, main=None, *args, **kwargs): super().__init__(*args, **kwargs) self.liveController = liveController self.streamHandler = streamHandler @@ -770,73 +906,85 @@ def __init__(self, streamHandler, liveController, configurationManager=None, sho self.fps_display = 10 self.liveController.set_trigger_fps(self.fps_trigger) self.streamHandler.set_display_fps(self.fps_display) - + self.triggerMode = TriggerMode.SOFTWARE # note that this references the object in self.configurationManager.configurations self.currentConfiguration = self.configurationManager.configurations[0] - self.add_components(show_trigger_options,show_display_options,show_autolevel,autolevel) + self.add_components(show_trigger_options,show_display_options,show_autolevel,autolevel,stretch) self.setFrameStyle(QFrame.Panel | QFrame.Raised) self.update_microscope_mode_by_name(self.currentConfiguration.name) self.is_switching_mode = False # flag used to prevent from settings being set by twice - from both mode change slot and value change slot; another way is to use blockSignals(True) - def add_components(self,show_trigger_options,show_display_options,show_autolevel,autolevel): + def add_components(self,show_trigger_options,show_display_options,show_autolevel,autolevel,stretch): # line 0: trigger mode self.triggerMode = None self.dropdown_triggerManu = QComboBox() self.dropdown_triggerManu.addItems([TriggerMode.SOFTWARE,TriggerMode.HARDWARE,TriggerMode.CONTINUOUS]) + sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) + self.dropdown_triggerManu.setSizePolicy(sizePolicy) # line 1: fps self.entry_triggerFPS = QDoubleSpinBox() - self.entry_triggerFPS.setMinimum(0.02) - self.entry_triggerFPS.setMaximum(1000) + self.entry_triggerFPS.setMinimum(0.02) + self.entry_triggerFPS.setMaximum(1000) self.entry_triggerFPS.setSingleStep(1) self.entry_triggerFPS.setValue(self.fps_trigger) + self.entry_triggerFPS.setDecimals(0) - # line 2: choose microscope mode / toggle live mode + # line 2: choose microscope mode / toggle live mode self.dropdown_modeSelection = QComboBox() for microscope_configuration in self.configurationManager.configurations: self.dropdown_modeSelection.addItems([microscope_configuration.name]) self.dropdown_modeSelection.setCurrentText(self.currentConfiguration.name) + self.dropdown_modeSelection.setSizePolicy(sizePolicy) - self.btn_live = QPushButton("Live") + self.btn_live = QPushButton("Start Live") self.btn_live.setCheckable(True) self.btn_live.setChecked(False) self.btn_live.setDefault(False) + self.btn_live.setStyleSheet("background-color: #C2C2FF") + self.btn_live.setSizePolicy(sizePolicy) # line 3: exposure time and analog gain associated with the current mode self.entry_exposureTime = QDoubleSpinBox() - self.entry_exposureTime.setMinimum(self.liveController.camera.EXPOSURE_TIME_MS_MIN) - self.entry_exposureTime.setMaximum(self.liveController.camera.EXPOSURE_TIME_MS_MAX) + self.entry_exposureTime.setMinimum(self.liveController.camera.EXPOSURE_TIME_MS_MIN) + self.entry_exposureTime.setMaximum(self.liveController.camera.EXPOSURE_TIME_MS_MAX) self.entry_exposureTime.setSingleStep(1) + self.entry_exposureTime.setSuffix(' ms') self.entry_exposureTime.setValue(0) + self.entry_exposureTime.setSizePolicy(sizePolicy) self.entry_analogGain = QDoubleSpinBox() self.entry_analogGain = QDoubleSpinBox() - self.entry_analogGain.setMinimum(0) - self.entry_analogGain.setMaximum(24) + self.entry_analogGain.setMinimum(0) + self.entry_analogGain.setMaximum(24) + # self.entry_analogGain.setSuffix('x') self.entry_analogGain.setSingleStep(0.1) self.entry_analogGain.setValue(0) + self.entry_analogGain.setSizePolicy(sizePolicy) self.slider_illuminationIntensity = QSlider(Qt.Horizontal) self.slider_illuminationIntensity.setTickPosition(QSlider.TicksBelow) self.slider_illuminationIntensity.setMinimum(0) self.slider_illuminationIntensity.setMaximum(100) self.slider_illuminationIntensity.setValue(100) - self.slider_illuminationIntensity.setSingleStep(1) + self.slider_illuminationIntensity.setSingleStep(2) self.entry_illuminationIntensity = QDoubleSpinBox() - self.entry_illuminationIntensity.setMinimum(0) - self.entry_illuminationIntensity.setMaximum(100) + self.entry_illuminationIntensity.setMinimum(0) + self.entry_illuminationIntensity.setMaximum(100) self.entry_illuminationIntensity.setSingleStep(1) + self.entry_illuminationIntensity.setSuffix('%') self.entry_illuminationIntensity.setValue(100) # line 4: display fps and resolution scaling self.entry_displayFPS = QDoubleSpinBox() - self.entry_displayFPS.setMinimum(1) - self.entry_displayFPS.setMaximum(240) + self.entry_displayFPS.setMinimum(1) + self.entry_displayFPS.setMaximum(240) self.entry_displayFPS.setSingleStep(1) + self.entry_displayFPS.setDecimals(0) self.entry_displayFPS.setValue(self.fps_display) self.slider_resolutionScaling = QSlider(Qt.Horizontal) @@ -846,11 +994,34 @@ def add_components(self,show_trigger_options,show_display_options,show_autolevel self.slider_resolutionScaling.setValue(DEFAULT_DISPLAY_CROP) self.slider_resolutionScaling.setSingleStep(10) + self.label_resolutionScaling = QSpinBox() + self.label_resolutionScaling.setMinimum(10) + self.label_resolutionScaling.setMaximum(100) + self.label_resolutionScaling.setValue(self.slider_resolutionScaling.value()) + self.label_resolutionScaling.setSuffix(" %") + self.slider_resolutionScaling.setSingleStep(5) + + self.slider_resolutionScaling.valueChanged.connect(lambda v: self.label_resolutionScaling.setValue(round(v))) + self.label_resolutionScaling.valueChanged.connect(lambda v: self.slider_resolutionScaling.setValue(round(v))) + # autolevel self.btn_autolevel = QPushButton('Autolevel') self.btn_autolevel.setCheckable(True) self.btn_autolevel.setChecked(autolevel) - + + # Determine the maximum width needed + self.entry_illuminationIntensity.setMinimumWidth(self.btn_live.sizeHint().width()) + self.btn_autolevel.setMinimumWidth(self.btn_autolevel.sizeHint().width()) + + max_width = max( + self.btn_autolevel.minimumWidth(), + self.entry_illuminationIntensity.minimumWidth() + ) + + # Set the fixed width for all three widgets + self.entry_illuminationIntensity.setFixedWidth(max_width) + self.btn_autolevel.setFixedWidth(max_width) + # connections self.entry_triggerFPS.valueChanged.connect(self.liveController.set_trigger_fps) self.entry_displayFPS.valueChanged.connect(self.streamHandler.set_display_fps) @@ -864,38 +1035,48 @@ def add_components(self,show_trigger_options,show_display_options,show_autolevel self.entry_illuminationIntensity.valueChanged.connect(self.update_config_illumination_intensity) self.entry_illuminationIntensity.valueChanged.connect(lambda x: self.slider_illuminationIntensity.setValue(int(x))) self.slider_illuminationIntensity.valueChanged.connect(self.entry_illuminationIntensity.setValue) - self.btn_autolevel.clicked.connect(self.signal_autoLevelSetting.emit) + self.btn_autolevel.toggled.connect(self.signal_autoLevelSetting.emit) # layout - grid_line0 = QGridLayout() - grid_line0.addWidget(QLabel('Trigger Mode'), 0,0) - grid_line0.addWidget(self.dropdown_triggerManu, 0,1) - grid_line0.addWidget(QLabel('Trigger FPS'), 0,2) - grid_line0.addWidget(self.entry_triggerFPS, 0,3) - - grid_line1 = QGridLayout() - grid_line1.addWidget(QLabel('Microscope Configuration'), 0,0) - grid_line1.addWidget(self.dropdown_modeSelection, 0,1) - grid_line1.addWidget(self.btn_live, 0,2) + grid_line1 = QHBoxLayout() + grid_line1.addWidget(QLabel('Live Configuration')) + grid_line1.addWidget(self.dropdown_modeSelection, 2) + grid_line1.addWidget(self.btn_live, 1) + + grid_line2 = QHBoxLayout() + grid_line2.addWidget(QLabel('Exposure Time')) + grid_line2.addWidget(self.entry_exposureTime) + gain_label = QLabel(' Analog Gain') + gain_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter) + grid_line2.addWidget(gain_label) + grid_line2.addWidget(self.entry_analogGain) + if show_autolevel: + grid_line2.addWidget(self.btn_autolevel) - grid_line2 = QGridLayout() - grid_line2.addWidget(QLabel('Exposure Time (ms)'), 0,0) - grid_line2.addWidget(self.entry_exposureTime, 0,1) - grid_line2.addWidget(QLabel('Analog Gain'), 0,2) - grid_line2.addWidget(self.entry_analogGain, 0,3) + grid_line4 = QHBoxLayout() + grid_line4.addWidget(QLabel('Illumination')) + grid_line4.addWidget(self.slider_illuminationIntensity) + grid_line4.addWidget(self.entry_illuminationIntensity) - grid_line4 = QGridLayout() - grid_line4.addWidget(QLabel('Illumination'), 0,0) - grid_line4.addWidget(self.slider_illuminationIntensity, 0,1) - grid_line4.addWidget(self.entry_illuminationIntensity, 0,2) + grid_line0 = QHBoxLayout() + if show_trigger_options: + grid_line0.addWidget(QLabel('Trigger Mode')) + grid_line0.addWidget(self.dropdown_triggerManu) + grid_line0.addWidget(QLabel('Trigger FPS')) + grid_line0.addWidget(self.entry_triggerFPS) - grid_line3 = QGridLayout() - grid_line3.addWidget(QLabel('Display FPS'), 0,0) - grid_line3.addWidget(self.entry_displayFPS, 0,1) - grid_line3.addWidget(QLabel('Display Resolution'), 0,2) - grid_line3.addWidget(self.slider_resolutionScaling,0,3) - if show_autolevel: - grid_line3.addWidget(self.btn_autolevel,0,4) + grid_line05 = QHBoxLayout() + show_dislpay_fps = False + if show_display_options: + resolution_label = QLabel('Display Resolution') + resolution_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter) + grid_line05.addWidget(resolution_label) + grid_line05.addWidget(self.slider_resolutionScaling) + if show_dislpay_fps: + grid_line05.addWidget(QLabel('Display FPS')) + grid_line05.addWidget(self.entry_displayFPS) + else: + grid_line05.addWidget(self.label_resolutionScaling) self.grid = QVBoxLayout() if show_trigger_options: @@ -904,15 +1085,23 @@ def add_components(self,show_trigger_options,show_display_options,show_autolevel self.grid.addLayout(grid_line2) self.grid.addLayout(grid_line4) if show_display_options: - self.grid.addLayout(grid_line3) - self.grid.addStretch() + self.grid.addLayout(grid_line05) + if not stretch: + self.grid.addStretch() self.setLayout(self.grid) + def toggle_live(self,pressed): if pressed: self.liveController.start_live() + self.btn_live.setText('Stop Live') + self.signal_start_live.emit() else: self.liveController.stop_live() + self.btn_live.setText('Start Live') + + def toggle_autolevel(self,autolevel_on): + self.btn_autolevel.setChecked(autolevel_on) def update_camera_settings(self): self.signal_newAnalogGain.emit(self.entry_analogGain.value()) @@ -922,6 +1111,7 @@ def update_microscope_mode_by_name(self,current_microscope_mode_name): self.is_switching_mode = True # identify the mode selected (note that this references the object in self.configurationManager.configurations) self.currentConfiguration = next((config for config in self.configurationManager.configurations if config.name == current_microscope_mode_name), None) + self.signal_live_configuration.emit(self.currentConfiguration) # update the microscope to the current configuration self.liveController.set_microscope_mode(self.currentConfiguration) # update the exposure time and analog gain settings according to the selected configuration @@ -959,6 +1149,109 @@ def set_trigger_mode(self,trigger_mode): self.dropdown_triggerManu.setCurrentText(trigger_mode) self.liveController.set_trigger_mode(self.dropdown_triggerManu.currentText()) + +class PiezoWidget(QFrame): + def __init__(self, navigationController, *args, **kwargs): + super().__init__(*args, **kwargs) + self.navigationController = navigationController + self.slider_value = 0.00 + self.add_components() + + def add_components(self): + # Row 1: Slider and Double Spin Box for direct control + self.slider = QSlider(Qt.Horizontal, self) + self.slider.setMinimum(0) + self.slider.setMaximum(int(OBJECTIVE_PIEZO_RANGE_UM * 100)) # Multiplied by 100 for 0.01 precision + + self.spinBox = QDoubleSpinBox(self) + self.spinBox.setRange(0.0, OBJECTIVE_PIEZO_RANGE_UM) + self.spinBox.setDecimals(2) + self.spinBox.setSingleStep(0.01) + self.spinBox.setSuffix(' μm') + + # Row 3: Home Button + self.home_btn = QPushButton(f" Set to {OBJECTIVE_PIEZO_HOME_UM} μm ", self) + + hbox1 = QHBoxLayout() + hbox1.addWidget(self.home_btn) + hbox1.addWidget(self.slider) + hbox1.addWidget(self.spinBox) + + # Row 2: Increment Double Spin Box, Move Up and Move Down Buttons + self.increment_spinBox = QDoubleSpinBox(self) + self.increment_spinBox.setRange(0.0, 100.0) + self.increment_spinBox.setDecimals(2) + self.increment_spinBox.setSingleStep(1) + self.increment_spinBox.setValue(1.00) + self.increment_spinBox.setSuffix(' μm') + self.move_up_btn = QPushButton("Move Up", self) + self.move_down_btn = QPushButton("Move Down", self) + + hbox2 = QHBoxLayout() + hbox2.addWidget(self.increment_spinBox) + hbox2.addWidget(self.move_up_btn) + hbox2.addWidget(self.move_down_btn) + + # Vertical Layout to include all HBoxes + vbox = QVBoxLayout() + vbox.addLayout(hbox1) + vbox.addLayout(hbox2) + + self.setLayout(vbox) + + # Connect signals and slots + self.slider.valueChanged.connect(self.update_from_slider) + self.spinBox.valueChanged.connect(self.update_from_spinBox) + self.move_up_btn.clicked.connect(lambda: self.adjust_position(True)) + self.move_down_btn.clicked.connect(lambda: self.adjust_position(False)) + self.home_btn.clicked.connect(self.home) + + def update_from_slider(self, value): + self.slider_value = value / 100 # Convert back to float with two decimal places + self.update_spinBox() + self.update_piezo_position() + + def update_from_spinBox(self, value): + self.slider_value = value + self.update_slider() + self.update_piezo_position() + + def update_spinBox(self): + self.spinBox.blockSignals(True) + self.spinBox.setValue(self.slider_value) + self.spinBox.blockSignals(False) + + def update_slider(self): + self.slider.blockSignals(True) + self.slider.setValue(int(self.slider_value * 100)) + self.slider.blockSignals(False) + + def update_piezo_position(self): + displacement_um = self.slider_value + self.navigationController.set_piezo_um(displacement_um) + + def adjust_position(self, up): + increment = self.increment_spinBox.value() + if up: + self.slider_value = min(OBJECTIVE_PIEZO_RANGE_UM, self.slider_value + increment) + else: + self.slider_value = max(0, self.slider_value - increment) + self.update_spinBox() + self.update_slider() + self.update_piezo_position() + + def home(self): + self.slider_value = OBJECTIVE_PIEZO_HOME_UM + self.update_spinBox() + self.update_slider() + self.update_piezo_position() + + def update_displacement_um_display(self, displacement): + self.slider_value = round(displacement, 2) + self.update_spinBox() + self.update_slider() + + class RecordingWidget(QFrame): def __init__(self, streamHandler, imageSaver, main=None, *args, **kwargs): super().__init__(*args, **kwargs) @@ -972,7 +1265,7 @@ def add_components(self): self.btn_setSavingDir = QPushButton('Browse') self.btn_setSavingDir.setDefault(False) self.btn_setSavingDir.setIcon(QIcon('icon/folder.png')) - + self.lineEdit_savingDir = QLineEdit() self.lineEdit_savingDir.setReadOnly(True) self.lineEdit_savingDir.setText('Choose a base saving directory') @@ -983,15 +1276,15 @@ def add_components(self): self.lineEdit_experimentID = QLineEdit() self.entry_saveFPS = QDoubleSpinBox() - self.entry_saveFPS.setMinimum(0.02) - self.entry_saveFPS.setMaximum(1000) + self.entry_saveFPS.setMinimum(0.02) + self.entry_saveFPS.setMaximum(1000) self.entry_saveFPS.setSingleStep(1) self.entry_saveFPS.setValue(1) self.streamHandler.set_save_fps(1) self.entry_timeLimit = QSpinBox() - self.entry_timeLimit.setMinimum(-1) - self.entry_timeLimit.setMaximum(60*60*24*30) + self.entry_timeLimit.setMinimum(-1) + self.entry_timeLimit.setMaximum(60*60*24*30) self.entry_timeLimit.setSingleStep(1) self.entry_timeLimit.setValue(-1) @@ -1014,12 +1307,12 @@ def add_components(self): grid_line3.addWidget(self.entry_saveFPS, 0,1) grid_line3.addWidget(QLabel('Time Limit (s)'), 0,2) grid_line3.addWidget(self.entry_timeLimit, 0,3) - grid_line3.addWidget(self.btn_record, 0,4) - self.grid = QGridLayout() - self.grid.addLayout(grid_line1,0,0) - self.grid.addLayout(grid_line2,1,0) - self.grid.addLayout(grid_line3,2,0) + self.grid = QVBoxLayout() + self.grid.addLayout(grid_line1) + self.grid.addLayout(grid_line2) + self.grid.addLayout(grid_line3) + self.grid.addWidget(self.btn_record) self.setLayout(self.grid) # add and display a timer - to be implemented @@ -1063,6 +1356,7 @@ def stop_recording(self): self.streamHandler.stop_recording() self.btn_setSavingDir.setEnabled(True) + class NavigationWidget(QFrame): def __init__(self, navigationController, slidePositionController=None, main=None, widget_configuration = 'full', *args, **kwargs): super().__init__(*args, **kwargs) @@ -1070,19 +1364,23 @@ def __init__(self, navigationController, slidePositionController=None, main=None self.slidePositionController = slidePositionController self.widget_configuration = widget_configuration self.slide_position = None + self.flag_click_to_move = navigationController.click_to_move self.add_components() self.setFrameStyle(QFrame.Panel | QFrame.Raised) def add_components(self): + x_label = QLabel('X :') + x_label.setFixedWidth(20) self.label_Xpos = QLabel() self.label_Xpos.setNum(0) self.label_Xpos.setFrameStyle(QFrame.Panel | QFrame.Sunken) self.entry_dX = QDoubleSpinBox() - self.entry_dX.setMinimum(0) - self.entry_dX.setMaximum(25) + self.entry_dX.setMinimum(0) + self.entry_dX.setMaximum(25) self.entry_dX.setSingleStep(0.2) self.entry_dX.setValue(0) self.entry_dX.setDecimals(3) + self.entry_dX.setSuffix(' mm') self.entry_dX.setKeyboardTracking(False) self.btn_moveX_forward = QPushButton('Forward') self.btn_moveX_forward.setDefault(False) @@ -1094,10 +1392,13 @@ def add_components(self): self.btn_home_X.setEnabled(HOMING_ENABLED_X) self.btn_zero_X = QPushButton('Zero X') self.btn_zero_X.setDefault(False) - - self.checkbox_clickToMove = QCheckBox('Click to move') + + self.checkbox_clickToMove = QCheckBox('Click to Move') self.checkbox_clickToMove.setChecked(False) + self.checkbox_clickToMove.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)) + y_label = QLabel('Y :') + y_label.setFixedWidth(20) self.label_Ypos = QLabel() self.label_Ypos.setNum(0) self.label_Ypos.setFrameStyle(QFrame.Panel | QFrame.Sunken) @@ -1107,6 +1408,8 @@ def add_components(self): self.entry_dY.setSingleStep(0.2) self.entry_dY.setValue(0) self.entry_dY.setDecimals(3) + self.entry_dY.setSuffix(' mm') + self.entry_dY.setKeyboardTracking(False) self.btn_moveY_forward = QPushButton('Forward') self.btn_moveY_forward.setDefault(False) @@ -1119,15 +1422,18 @@ def add_components(self): self.btn_zero_Y = QPushButton('Zero Y') self.btn_zero_Y.setDefault(False) + z_label = QLabel('Z :') + z_label.setFixedWidth(20) self.label_Zpos = QLabel() self.label_Zpos.setNum(0) self.label_Zpos.setFrameStyle(QFrame.Panel | QFrame.Sunken) self.entry_dZ = QDoubleSpinBox() - self.entry_dZ.setMinimum(0) - self.entry_dZ.setMaximum(1000) + self.entry_dZ.setMinimum(0) + self.entry_dZ.setMaximum(1000) self.entry_dZ.setSingleStep(0.2) self.entry_dZ.setValue(0) self.entry_dZ.setDecimals(3) + self.entry_dZ.setSuffix(' μm') self.entry_dZ.setKeyboardTracking(False) self.btn_moveZ_forward = QPushButton('Forward') self.btn_moveZ_forward.setDefault(False) @@ -1140,62 +1446,48 @@ def add_components(self): self.btn_zero_Z = QPushButton('Zero Z') self.btn_zero_Z.setDefault(False) - self.btn_load_slide = QPushButton('To Slide Loading Position') - + self.btn_load_slide = QPushButton('Move To Loading Position') + self.btn_load_slide.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) + grid_line0 = QGridLayout() - grid_line0.addWidget(QLabel('X (mm)'), 0,0) + grid_line0.addWidget(x_label, 0,0) grid_line0.addWidget(self.label_Xpos, 0,1) grid_line0.addWidget(self.entry_dX, 0,2) grid_line0.addWidget(self.btn_moveX_forward, 0,3) grid_line0.addWidget(self.btn_moveX_backward, 0,4) - - grid_line1 = QGridLayout() - grid_line1.addWidget(QLabel('Y (mm)'), 0,0) - grid_line1.addWidget(self.label_Ypos, 0,1) - grid_line1.addWidget(self.entry_dY, 0,2) - grid_line1.addWidget(self.btn_moveY_forward, 0,3) - grid_line1.addWidget(self.btn_moveY_backward, 0,4) - grid_line2 = QGridLayout() - grid_line2.addWidget(QLabel('Z (um)'), 0,0) - grid_line2.addWidget(self.label_Zpos, 0,1) - grid_line2.addWidget(self.entry_dZ, 0,2) - grid_line2.addWidget(self.btn_moveZ_forward, 0,3) - grid_line2.addWidget(self.btn_moveZ_backward, 0,4) - + grid_line0.addWidget(y_label, 1,0) + grid_line0.addWidget(self.label_Ypos, 1,1) + grid_line0.addWidget(self.entry_dY, 1,2) + grid_line0.addWidget(self.btn_moveY_forward, 1,3) + grid_line0.addWidget(self.btn_moveY_backward, 1,4) + + grid_line0.addWidget(z_label, 2,0) + grid_line0.addWidget(self.label_Zpos, 2,1) + grid_line0.addWidget(self.entry_dZ, 2,2) + grid_line0.addWidget(self.btn_moveZ_forward, 2,3) + grid_line0.addWidget(self.btn_moveZ_backward, 2,4) + grid_line3 = QHBoxLayout() - grid_line3_buttons = QGridLayout() if self.widget_configuration == 'full': - grid_line3_buttons.addWidget(self.btn_zero_X, 0,3) - grid_line3_buttons.addWidget(self.btn_zero_Y, 0,4) - grid_line3_buttons.addWidget(self.btn_zero_Z, 0,5) - grid_line3_buttons.addWidget(self.btn_home_X, 0,0) - grid_line3_buttons.addWidget(self.btn_home_Y, 0,1) - grid_line3_buttons.addWidget(self.btn_home_Z, 0,2) - elif self.widget_configuration == 'malaria': - grid_line3_buttons.addWidget(self.btn_load_slide, 0,0,1,2) - grid_line3_buttons.addWidget(self.btn_home_Z, 0,2,1,1) - grid_line3_buttons.addWidget(self.btn_zero_Z, 0,3,1,1) - elif self.widget_configuration == '384 well plate': - grid_line3_buttons.addWidget(self.btn_load_slide, 0,0,1,2) - grid_line3_buttons.addWidget(self.btn_home_Z, 0,2,1,1) - grid_line3_buttons.addWidget(self.btn_zero_Z, 0,3,1,1) - elif self.widget_configuration == '96 well plate': - grid_line3_buttons.addWidget(self.btn_load_slide, 0,0,1,2) - grid_line3_buttons.addWidget(self.btn_home_Z, 0,2,1,1) - grid_line3_buttons.addWidget(self.btn_zero_Z, 0,3,1,1) - - grid_line3.addLayout(grid_line3_buttons) - - grid_line3.addWidget(self.checkbox_clickToMove) - + grid_line3.addWidget(self.btn_home_X) + grid_line3.addWidget(self.btn_home_Y) + grid_line3.addWidget(self.btn_home_Z) + grid_line3.addWidget(self.btn_zero_X) + grid_line3.addWidget(self.btn_zero_Y) + grid_line3.addWidget(self.btn_zero_Z) + else: + grid_line3.addWidget(self.btn_load_slide, 1) + grid_line3.addWidget(self.btn_home_Z, 1) + grid_line3.addWidget(self.btn_zero_Z, 1) - self.grid = QGridLayout() - self.grid.addLayout(grid_line0,0,0) - self.grid.addLayout(grid_line1,1,0) - self.grid.addLayout(grid_line2,2,0) - self.grid.addLayout(grid_line3,3,0) + if not ENABLE_CLICK_TO_MOVE_BY_DEFAULT: + grid_line3.addWidget(self.checkbox_clickToMove, 1) + + self.grid = QVBoxLayout() + self.grid.addLayout(grid_line0) + self.grid.addLayout(grid_line3) self.setLayout(self.grid) self.entry_dX.valueChanged.connect(self.set_deltaX) @@ -1219,8 +1511,37 @@ def add_components(self): self.checkbox_clickToMove.stateChanged.connect(self.navigationController.set_flag_click_to_move) self.btn_load_slide.clicked.connect(self.switch_position) - self.btn_load_slide.setStyleSheet("background-color: #C2C2FF"); - + self.btn_load_slide.setStyleSheet("background-color: #C2C2FF") + + def toggle_click_to_move(self, started): + if started: + self.flag_click_to_move = self.navigationController.get_flag_click_to_move() + self.setEnabled_all(False) + self.checkbox_clickToMove.setChecked(False) # should set navigationController.click_to_move to False + self.navigationController.click_to_move = False + print("set click to move off") + else: + self.setEnabled_all(True) + self.checkbox_clickToMove.setChecked(self.flag_click_to_move) + self.navigationController.click_to_move = self.flag_click_to_move + print("restored click to move to", "on" if self.flag_click_to_move else "off") + + def setEnabled_all(self, enabled): + self.checkbox_clickToMove.setEnabled(enabled) + self.btn_home_X.setEnabled(enabled) + self.btn_zero_X.setEnabled(enabled) + self.btn_moveX_forward.setEnabled(enabled) + self.btn_moveX_backward.setEnabled(enabled) + self.btn_home_Y.setEnabled(enabled) + self.btn_zero_Y.setEnabled(enabled) + self.btn_moveY_forward.setEnabled(enabled) + self.btn_moveY_backward.setEnabled(enabled) + self.btn_home_Z.setEnabled(enabled) + self.btn_zero_Z.setEnabled(enabled) + self.btn_moveZ_forward.setEnabled(enabled) + self.btn_moveZ_backward.setEnabled(enabled) + self.btn_load_slide.setEnabled(enabled) + def move_x_forward(self): self.navigationController.move_x(self.entry_dX.value()) def move_x_backward(self): @@ -1232,18 +1553,18 @@ def move_y_backward(self): def move_z_forward(self): self.navigationController.move_z(self.entry_dZ.value()/1000) def move_z_backward(self): - self.navigationController.move_z(-self.entry_dZ.value()/1000) + self.navigationController.move_z(-self.entry_dZ.value()/1000) def set_deltaX(self,value): - mm_per_ustep = SCREW_PITCH_X_MM/(self.navigationController.x_microstepping*FULLSTEPS_PER_REV_X) # to implement a get_x_microstepping() in multipointController + mm_per_ustep = self.navigationController.get_mm_per_ustep_X() deltaX = round(value/mm_per_ustep)*mm_per_ustep self.entry_dX.setValue(deltaX) def set_deltaY(self,value): - mm_per_ustep = SCREW_PITCH_Y_MM/(self.navigationController.y_microstepping*FULLSTEPS_PER_REV_Y) + mm_per_ustep = self.navigationController.get_mm_per_ustep_Y() deltaY = round(value/mm_per_ustep)*mm_per_ustep self.entry_dY.setValue(deltaY) def set_deltaZ(self,value): - mm_per_ustep = SCREW_PITCH_Z_MM/(self.navigationController.z_microstepping*FULLSTEPS_PER_REV_Z) + mm_per_ustep = self.navigationController.get_mm_per_ustep_Z() deltaZ = round(value/1000/mm_per_ustep)*mm_per_ustep*1000 self.entry_dZ.setValue(deltaZ) @@ -1294,8 +1615,8 @@ def zero_z(self): def slot_slide_loading_position_reached(self): self.slide_position = 'loading' - self.btn_load_slide.setStyleSheet("background-color: #C2FFC2"); - self.btn_load_slide.setText('To Scanning Position') + self.btn_load_slide.setStyleSheet("background-color: #C2FFC2") + self.btn_load_slide.setText('Move to Scanning Position') self.btn_moveX_forward.setEnabled(False) self.btn_moveX_backward.setEnabled(False) self.btn_moveY_forward.setEnabled(False) @@ -1306,8 +1627,8 @@ def slot_slide_loading_position_reached(self): def slot_slide_scanning_position_reached(self): self.slide_position = 'scanning' - self.btn_load_slide.setStyleSheet("background-color: #C2C2FF"); - self.btn_load_slide.setText('To Loading Position') + self.btn_load_slide.setStyleSheet("background-color: #C2C2FF") + self.btn_load_slide.setText('Move to Loading Position') self.btn_moveX_forward.setEnabled(True) self.btn_moveX_backward.setEnabled(True) self.btn_moveY_forward.setEnabled(True) @@ -1323,6 +1644,152 @@ def switch_position(self): self.slidePositionController.move_to_slide_scanning_position() self.btn_load_slide.setEnabled(False) + def replace_slide_controller(self, slidePositionController): + self.slidePositionController = slidePositionController + self.slidePositionController.signal_slide_loading_position_reached.connect(self.slot_slide_loading_position_reached) + self.slidePositionController.signal_slide_scanning_position_reached.connect(self.slot_slide_scanning_position_reached) + + +class NavigationBarWidget(QWidget): + def __init__(self, navigationController=None, slidePositionController=None, add_z_buttons=True,*args, **kwargs): + super().__init__(*args, **kwargs) + self.navigationController = navigationController + self.slidePositionController = slidePositionController + self.add_z_buttons = add_z_buttons + self.initUI() + + def initUI(self): + layout = QHBoxLayout() + layout.setContentsMargins(5, 2, 5, 4) # Reduce vertical margins to make it thinner + + # Move to Loading Position button + self.btn_load_slide = QPushButton('Move To Loading Position') + self.btn_load_slide.setStyleSheet("background-color: #C2C2FF") + self.btn_load_slide.clicked.connect(self.switch_position) + + # Click to Move checkbox + self.checkbox_clickToMove = QCheckBox('Click to Move') + self.checkbox_clickToMove.setChecked(False) + + # Home Z and Zero Z + if self.add_z_buttons: + if self.slidePositionController is not None: + layout.addWidget(self.btn_load_slide) + layout.addSpacing(10) + + self.btn_home_Z = QPushButton('Home Z') + self.btn_home_Z.clicked.connect(self.home_z) + layout.addWidget(self.btn_home_Z) + layout.addSpacing(20) + + self.btn_zero_Z = QPushButton('Zero Z') + self.btn_zero_Z.clicked.connect(self.zero_z) + layout.addWidget(self.btn_zero_Z) + layout.addSpacing(20) + + if self.navigationController is not None: + layout.addWidget(self.checkbox_clickToMove) + layout.addSpacing(10) + + # X position + x_label = QLabel('X:') + self.label_Xpos = QLabel('00.000 mm') + self.label_Xpos.setFixedWidth(self.label_Xpos.sizeHint().width()) + #self.label_Xpos.setFrameStyle(QFrame.Panel | QFrame.Sunken) + + # Y position + y_label = QLabel('Y:') + self.label_Ypos = QLabel('00.000 mm') + self.label_Ypos.setFixedWidth(self.label_Ypos.sizeHint().width()) + #self.label_Ypos.setFrameStyle(QFrame.Panel | QFrame.Sunken) + + # Z position + z_label = QLabel('Z:') + self.label_Zpos = QLabel('0000.000 μm') + self.label_Zpos.setFixedWidth(self.label_Zpos.sizeHint().width()) + #self.label_Zpos.setFrameStyle(QFrame.Panel | QFrame.Sunken) + + # Add widgets to layout + layout.addStretch(1) + layout.addSpacing(10) + layout.addWidget(x_label) + layout.addWidget(self.label_Xpos) + layout.addSpacing(10) + layout.addWidget(y_label) + layout.addWidget(self.label_Ypos) + layout.addSpacing(10) + layout.addWidget(z_label) + layout.addWidget(self.label_Zpos) + layout.addSpacing(10) + layout.addStretch(1) + + self.setLayout(layout) + self.setFixedHeight(self.sizeHint().height()) # Set fixed height to make it as thin as possible + self.connect_signals() + + def update_x_position(self, x): + self.label_Xpos.setText(f"{x:.3f} mm") + + def update_y_position(self, y): + self.label_Ypos.setText(f"{y:.3f} mm") + + def update_z_position(self, z): + self.label_Zpos.setText(f"{z:.3f} μm") + + def home_z(self): + msg = QMessageBox() + msg.setIcon(QMessageBox.Information) + msg.setText("Confirm your action") + msg.setInformativeText("Click OK to run homing\n(Sets current Z to 0 μm)") + msg.setWindowTitle("Confirmation") + msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel) + msg.setDefaultButton(QMessageBox.Cancel) + retval = msg.exec_() + if QMessageBox.Ok == retval: + self.navigationController.home_z() + + def zero_z(self): + msg = QMessageBox() + msg.setIcon(QMessageBox.Information) + msg.setText("Confirm your action") + msg.setInformativeText("Click OK to zero\n(Moves Z to 0 μm)") + msg.setWindowTitle("Confirmation") + msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel) + msg.setDefaultButton(QMessageBox.Cancel) + retval = msg.exec_() + if QMessageBox.Ok == retval: + self.navigationController.zero_z() + + def switch_position(self): + if self.btn_load_slide.text() == 'Move To Loading Position': + self.slidePositionController.move_to_slide_loading_position() + else: + self.slidePositionController.move_to_slide_scanning_position() + self.btn_load_slide.setEnabled(False) + + def slot_slide_loading_position_reached(self): + self.btn_load_slide.setText('Move to Scanning Position') + self.btn_load_slide.setStyleSheet("background-color: #C2FFC2") + self.btn_load_slide.setEnabled(True) + + def slot_slide_scanning_position_reached(self): + self.btn_load_slide.setText('Move To Loading Position') + self.btn_load_slide.setStyleSheet("background-color: #C2C2FF") + self.btn_load_slide.setEnabled(True) + + def replace_slide_controller(self, slidePositionController): + self.slidePositionController = slidePositionController + self.slidePositionController.signal_slide_loading_position_reached.connect(self.slot_slide_loading_position_reached) + self.slidePositionController.signal_slide_scanning_position_reached.connect(self.slot_slide_scanning_position_reached) + + def connect_signals(self): + if self.navigationController is not None: + self.checkbox_clickToMove.stateChanged.connect(self.navigationController.set_flag_click_to_move) + if self.slidePositionController is not None: + self.slidePositionController.signal_slide_loading_position_reached.connect(self.slot_slide_loading_position_reached) + self.slidePositionController.signal_slide_scanning_position_reached.connect(self.slot_slide_scanning_position_reached) + + class DACControWidget(QFrame): def __init__(self, microcontroller ,*args, **kwargs): super().__init__(*args, **kwargs) @@ -1339,8 +1806,8 @@ def add_components(self): self.slider_DAC0.setValue(0) self.entry_DAC0 = QDoubleSpinBox() - self.entry_DAC0.setMinimum(0) - self.entry_DAC0.setMaximum(100) + self.entry_DAC0.setMinimum(0) + self.entry_DAC0.setMaximum(100) self.entry_DAC0.setSingleStep(0.1) self.entry_DAC0.setValue(0) self.entry_DAC0.setKeyboardTracking(False) @@ -1353,8 +1820,8 @@ def add_components(self): self.slider_DAC1.setSingleStep(1) self.entry_DAC1 = QDoubleSpinBox() - self.entry_DAC1.setMinimum(0) - self.entry_DAC1.setMaximum(100) + self.entry_DAC1.setMinimum(0) + self.entry_DAC1.setMaximum(100) self.entry_DAC1.setSingleStep(0.1) self.entry_DAC1.setValue(0) self.entry_DAC1.setKeyboardTracking(False) @@ -1368,25 +1835,28 @@ def add_components(self): self.slider_DAC1.valueChanged.connect(self.entry_DAC1.setValue) # layout - grid_line1 = QGridLayout() - grid_line1.addWidget(QLabel('DAC0'), 0,0) - grid_line1.addWidget(self.slider_DAC0, 0,1) - grid_line1.addWidget(self.entry_DAC0, 0,2) - grid_line1.addWidget(QLabel('DAC1'), 1,0) - grid_line1.addWidget(self.slider_DAC1, 1,1) - grid_line1.addWidget(self.entry_DAC1, 1,2) + grid_line1 = QHBoxLayout() + grid_line1.addWidget(QLabel('DAC0')) + grid_line1.addWidget(self.slider_DAC0) + grid_line1.addWidget(self.entry_DAC0) + grid_line1.addWidget(QLabel('DAC1')) + grid_line1.addWidget(self.slider_DAC1) + grid_line1.addWidget(self.entry_DAC1) self.grid = QGridLayout() self.grid.addLayout(grid_line1,1,0) self.setLayout(self.grid) def set_DAC0(self,value): - self.microcontroller.analog_write_onboard_DAC(0,int(value*65535/100)) + self.microcontroller.analog_write_onboard_DAC(0,round(value*65535/100)) def set_DAC1(self,value): - self.microcontroller.analog_write_onboard_DAC(1,int(value*65535/100)) + self.microcontroller.analog_write_onboard_DAC(1,round(value*65535/100)) + class AutoFocusWidget(QFrame): + signal_autoLevelSetting = Signal(bool) + def __init__(self, autofocusController, main=None, *args, **kwargs): super().__init__(*args, **kwargs) self.autofocusController = autofocusController @@ -1395,20 +1865,25 @@ def __init__(self, autofocusController, main=None, *args, **kwargs): def add_components(self): self.entry_delta = QDoubleSpinBox() - self.entry_delta.setMinimum(0) - self.entry_delta.setMaximum(20) + self.entry_delta.setMinimum(0) + self.entry_delta.setMaximum(20) self.entry_delta.setSingleStep(0.2) self.entry_delta.setDecimals(3) + self.entry_delta.setSuffix(' μm') self.entry_delta.setValue(1.524) self.entry_delta.setKeyboardTracking(False) + self.entry_delta.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) self.autofocusController.set_deltaZ(1.524) self.entry_N = QSpinBox() - self.entry_N.setMinimum(3) - self.entry_N.setMaximum(20) + self.entry_N.setMinimum(3) + self.entry_N.setMaximum(10000) + self.entry_N.setFixedWidth(self.entry_N.sizeHint().width()) + self.entry_N.setMaximum(20) self.entry_N.setSingleStep(1) self.entry_N.setValue(10) self.entry_N.setKeyboardTracking(False) + self.entry_N.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) self.autofocusController.set_N(10) self.btn_autofocus = QPushButton('Autofocus') @@ -1416,26 +1891,35 @@ def add_components(self): self.btn_autofocus.setCheckable(True) self.btn_autofocus.setChecked(False) + self.btn_autolevel = QPushButton('Autolevel') + self.btn_autolevel.setCheckable(True) + self.btn_autolevel.setChecked(False) + self.btn_autolevel.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) + # layout - grid_line0 = QGridLayout() - grid_line0.addWidget(QLabel('delta Z (um)'), 0,0) - grid_line0.addWidget(self.entry_delta, 0,1) - grid_line0.addWidget(QLabel('N Z planes'), 0,2) - grid_line0.addWidget(self.entry_N, 0,3) - grid_line0.addWidget(self.btn_autofocus, 0,4) + self.grid = QVBoxLayout() + grid_line0 = QHBoxLayout() + grid_line0.addWidget(QLabel('\u0394 Z')) + grid_line0.addWidget(self.entry_delta) + grid_line0.addSpacing(20) + grid_line0.addWidget(QLabel('# of Z-Planes')) + grid_line0.addWidget(self.entry_N) + grid_line0.addSpacing(20) + grid_line0.addWidget(self.btn_autolevel) - self.grid = QGridLayout() - self.grid.addLayout(grid_line0,0,0) + self.grid.addLayout(grid_line0) + self.grid.addWidget(self.btn_autofocus) self.setLayout(self.grid) - + # connections - self.btn_autofocus.clicked.connect(lambda : self.autofocusController.autofocus(False)) + self.btn_autofocus.toggled.connect(lambda : self.autofocusController.autofocus(False)) + self.btn_autolevel.toggled.connect(self.signal_autoLevelSetting.emit) self.entry_delta.valueChanged.connect(self.set_deltaZ) self.entry_N.valueChanged.connect(self.autofocusController.set_N) self.autofocusController.autofocusFinished.connect(self.autofocus_is_finished) def set_deltaZ(self,value): - mm_per_ustep = SCREW_PITCH_Z_MM/(self.autofocusController.navigationController.z_microstepping*FULLSTEPS_PER_REV_Z) + mm_per_ustep = self.autofocusController.navigationController.get_mm_per_ustep_Z() deltaZ = round(value/1000/mm_per_ustep)*mm_per_ustep*1000 self.entry_delta.setValue(deltaZ) self.autofocusController.set_deltaZ(deltaZ) @@ -1443,14 +1927,51 @@ def set_deltaZ(self,value): def autofocus_is_finished(self): self.btn_autofocus.setChecked(False) -class StatsDisplayWidget(QFrame): - def __init__(self, *args, **kwargs): + +class FilterControllerWidget(QFrame): + def __init__(self, filterController, liveController, main=None, *args, **kwargs): super().__init__(*args, **kwargs) - self.initUI() + self.filterController = filterController + self.liveController = liveController + self.add_components() self.setFrameStyle(QFrame.Panel | QFrame.Raised) - def initUI(self): - self.layout = QVBoxLayout() + def add_components(self): + self.comboBox = QComboBox() + for i in range(1, 8): # Assuming 7 filter positions + self.comboBox.addItem(f"Position {i}") + self.checkBox = QCheckBox("Disable filter wheel movement on changing Microscope Configuration", self) + + layout = QGridLayout() + layout.addWidget(QLabel('Filter wheel position:'), 0,0) + layout.addWidget(self.comboBox, 0,1) + layout.addWidget(self.checkBox, 2,0) + + self.setLayout(layout) + + self.comboBox.currentIndexChanged.connect(self.on_selection_change) # Connecting to selection change + self.checkBox.stateChanged.connect(self.disable_movement_by_switching_channels) + + def on_selection_change(self, index): + # The 'index' parameter is the new index of the combo box + if index >= 0 and index <= 7: # Making sure the index is valid + self.filterController.set_emission_filter(index+1) + + def disable_movement_by_switching_channels(self, state): + if state: + self.liveController.enable_channel_auto_filter_switching = False + else: + self.liveController.enable_channel_auto_filter_switching = True + + +class StatsDisplayWidget(QFrame): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.initUI() + self.setFrameStyle(QFrame.Panel | QFrame.Raised) + + def initUI(self): + self.layout = QVBoxLayout() self.table_widget = QTableWidget() self.table_widget.setColumnCount(2) self.table_widget.verticalHeader().hide() @@ -1460,6 +1981,7 @@ def initUI(self): self.setLayout(self.layout) def display_stats(self, stats): + print("displaying parasite stats") locale.setlocale(locale.LC_ALL, '') self.table_widget.setRowCount(len(stats)) row = 0 @@ -1476,10 +1998,18 @@ def display_stats(self, stats): class MultiPointWidget(QFrame): + + signal_acquisition_started = Signal(bool) + signal_acquisition_channels = Signal(list) + signal_acquisition_z_levels = Signal(int) + signal_acquisition_shape = Signal(int, int, int, float, float, float) + signal_stitcher_widget = Signal(bool) + def __init__(self, multipointController, configurationManager = None, main=None, *args, **kwargs): super().__init__(*args, **kwargs) self.multipointController = multipointController self.configurationManager = configurationManager + self.well_selected = False self.base_path_is_set = False self.add_components() self.setFrameStyle(QFrame.Panel | QFrame.Raised) @@ -1489,7 +2019,7 @@ def add_components(self): self.btn_setSavingDir = QPushButton('Browse') self.btn_setSavingDir.setDefault(False) self.btn_setSavingDir.setIcon(QIcon('icon/folder.png')) - + self.lineEdit_savingDir = QLineEdit() self.lineEdit_savingDir.setReadOnly(True) self.lineEdit_savingDir.setText('Choose a base saving directory') @@ -1501,60 +2031,64 @@ def add_components(self): self.lineEdit_experimentID = QLineEdit() self.entry_deltaX = QDoubleSpinBox() - self.entry_deltaX.setMinimum(0) - self.entry_deltaX.setMaximum(5) + self.entry_deltaX.setMinimum(0) + self.entry_deltaX.setMaximum(5) self.entry_deltaX.setSingleStep(0.1) self.entry_deltaX.setValue(Acquisition.DX) self.entry_deltaX.setDecimals(3) + self.entry_deltaX.setSuffix(' mm') self.entry_deltaX.setKeyboardTracking(False) self.entry_NX = QSpinBox() - self.entry_NX.setMinimum(1) - self.entry_NX.setMaximum(50) + self.entry_NX.setMinimum(1) + self.entry_NX.setMaximum(50) self.entry_NX.setSingleStep(1) self.entry_NX.setValue(Acquisition.NX) self.entry_NX.setKeyboardTracking(False) self.entry_deltaY = QDoubleSpinBox() - self.entry_deltaY.setMinimum(0) - self.entry_deltaY.setMaximum(5) + self.entry_deltaY.setMinimum(0) + self.entry_deltaY.setMaximum(5) self.entry_deltaY.setSingleStep(0.1) self.entry_deltaY.setValue(Acquisition.DX) self.entry_deltaY.setDecimals(3) + self.entry_deltaY.setSuffix(' mm') self.entry_deltaY.setKeyboardTracking(False) - + self.entry_NY = QSpinBox() - self.entry_NY.setMinimum(1) - self.entry_NY.setMaximum(50) + self.entry_NY.setMinimum(1) + self.entry_NY.setMaximum(50) self.entry_NY.setSingleStep(1) self.entry_NY.setValue(Acquisition.NY) self.entry_NY.setKeyboardTracking(False) self.entry_deltaZ = QDoubleSpinBox() - self.entry_deltaZ.setMinimum(0) - self.entry_deltaZ.setMaximum(1000) + self.entry_deltaZ.setMinimum(0) + self.entry_deltaZ.setMaximum(1000) self.entry_deltaZ.setSingleStep(0.2) self.entry_deltaZ.setValue(Acquisition.DZ) self.entry_deltaZ.setDecimals(3) + self.entry_deltaZ.setSuffix(' μm') self.entry_deltaZ.setKeyboardTracking(False) - + self.entry_NZ = QSpinBox() - self.entry_NZ.setMinimum(1) - self.entry_NZ.setMaximum(100) + self.entry_NZ.setMinimum(1) + self.entry_NZ.setMaximum(2000) self.entry_NZ.setSingleStep(1) self.entry_NZ.setValue(1) self.entry_NZ.setKeyboardTracking(False) - + self.entry_dt = QDoubleSpinBox() - self.entry_dt.setMinimum(0) - self.entry_dt.setMaximum(12*3600) + self.entry_dt.setMinimum(0) + self.entry_dt.setMaximum(12*3600) self.entry_dt.setSingleStep(1) self.entry_dt.setValue(0) + self.entry_dt.setSuffix(' s') self.entry_dt.setKeyboardTracking(False) self.entry_Nt = QSpinBox() - self.entry_Nt.setMinimum(1) - self.entry_Nt.setMaximum(50000) # @@@ to be changed + self.entry_Nt.setMinimum(1) + self.entry_Nt.setMaximum(5000) # @@@ to be changed self.entry_Nt.setSingleStep(1) self.entry_Nt.setValue(1) self.entry_Nt.setKeyboardTracking(False) @@ -1565,17 +2099,22 @@ def add_components(self): self.list_configurations.setSelectionMode(QAbstractItemView.MultiSelection) # ref: https://doc.qt.io/qt-5/qabstractitemview.html#SelectionMode-enum self.checkbox_withAutofocus = QCheckBox('Contrast AF') - self.checkbox_withAutofocus.setChecked(MULTIPOINT_AUTOFOCUS_ENABLE_BY_DEFAULT) - self.multipointController.set_af_flag(MULTIPOINT_AUTOFOCUS_ENABLE_BY_DEFAULT) + self.checkbox_withAutofocus.setChecked(MULTIPOINT_CONTRAST_AUTOFOCUS_ENABLE_BY_DEFAULT) + self.multipointController.set_af_flag(MULTIPOINT_CONTRAST_AUTOFOCUS_ENABLE_BY_DEFAULT) - self.checkbox_genFocusMap = QCheckBox('Generate focus map') + self.checkbox_genFocusMap = QCheckBox('Focus Map') self.checkbox_genFocusMap.setChecked(False) self.checkbox_withReflectionAutofocus = QCheckBox('Reflection AF') self.checkbox_withReflectionAutofocus.setChecked(MULTIPOINT_REFLECTION_AUTOFOCUS_ENABLE_BY_DEFAULT) + self.checkbox_stitchOutput = QCheckBox('Stitch Scans') + self.checkbox_stitchOutput.setChecked(False) + self.multipointController.set_reflection_af_flag(MULTIPOINT_REFLECTION_AUTOFOCUS_ENABLE_BY_DEFAULT) - self.btn_startAcquisition = QPushButton('Start Acquisition') + + self.btn_startAcquisition = QPushButton('Start\n Acquisition ') + self.btn_startAcquisition.setStyleSheet("background-color: #C2C2FF") self.btn_startAcquisition.setCheckable(True) self.btn_startAcquisition.setChecked(False) @@ -1590,35 +2129,43 @@ def add_components(self): grid_line1.addWidget(self.lineEdit_experimentID,0,1) grid_line2 = QGridLayout() - grid_line2.addWidget(QLabel('dx (mm)'), 0,0) + grid_line2.addWidget(QLabel('dx'), 0,0) grid_line2.addWidget(self.entry_deltaX, 0,1) - grid_line2.addWidget(QLabel('Nx'), 0,2) - grid_line2.addWidget(self.entry_NX, 0,3) - grid_line2.addWidget(QLabel('dy (mm)'), 0,4) - grid_line2.addWidget(self.entry_deltaY, 0,5) - grid_line2.addWidget(QLabel('Ny'), 0,6) - grid_line2.addWidget(self.entry_NY, 0,7) - - grid_line2.addWidget(QLabel('dz (um)'), 1,0) + grid_line2.addWidget(QLabel('Nx'), 0,3) + grid_line2.addWidget(self.entry_NX, 0,4) + grid_line2.addWidget(QLabel('dy'), 0,6) + grid_line2.addWidget(self.entry_deltaY, 0,7) + grid_line2.addWidget(QLabel('Ny'), 0,9) + grid_line2.addWidget(self.entry_NY, 0,10) + + grid_line2.addWidget(QLabel('dz'), 1,0) grid_line2.addWidget(self.entry_deltaZ, 1,1) - grid_line2.addWidget(QLabel('Nz'), 1,2) - grid_line2.addWidget(self.entry_NZ, 1,3) - grid_line2.addWidget(QLabel('dt (s)'), 1,4) - grid_line2.addWidget(self.entry_dt, 1,5) - grid_line2.addWidget(QLabel('Nt'), 1,6) - grid_line2.addWidget(self.entry_Nt, 1,7) - - grid_af = QVBoxLayout() - grid_af.addWidget(self.checkbox_withAutofocus) - grid_af.addWidget(self.checkbox_genFocusMap) + grid_line2.addWidget(QLabel('Nz'), 1,3) + grid_line2.addWidget(self.entry_NZ, 1,4) + grid_line2.addWidget(QLabel('dt'), 1,6) + grid_line2.addWidget(self.entry_dt, 1,7) + grid_line2.addWidget(QLabel('Nt'), 1,9) + grid_line2.addWidget(self.entry_Nt, 1,10) + + grid_line2.setColumnStretch(2, 1) + grid_line2.setColumnStretch(5, 1) + grid_line2.setColumnStretch(8, 1) + + grid_af = QGridLayout() + grid_af.addItem(QSpacerItem(7, 1, QSizePolicy.Fixed, QSizePolicy.Minimum), 0, 0) + grid_af.addWidget(self.checkbox_withAutofocus,0,1) if SUPPORT_LASER_AUTOFOCUS: - grid_af.addWidget(self.checkbox_withReflectionAutofocus) + grid_af.addWidget(self.checkbox_withReflectionAutofocus,1,1) + grid_af.addWidget(self.checkbox_genFocusMap,2,1) + if ENABLE_STITCHER: + grid_af.addWidget(self.checkbox_stitchOutput,3,1) + grid_af.addItem(QSpacerItem(6, 1, QSizePolicy.Fixed, QSizePolicy.Minimum), 0, 2) grid_line3 = QHBoxLayout() - grid_line3.addWidget(self.list_configurations) + grid_line3.addWidget(self.list_configurations, 2) # grid_line3.addWidget(self.checkbox_withAutofocus) - grid_line3.addLayout(grid_af) - grid_line3.addWidget(self.btn_startAcquisition) + grid_line3.addLayout(grid_af, 1) + grid_line3.addWidget(self.btn_startAcquisition, 1) self.grid = QGridLayout() self.grid.addLayout(grid_line0,0,0) @@ -1638,28 +2185,31 @@ def add_components(self): self.entry_NX.valueChanged.connect(self.multipointController.set_NX) self.entry_NY.valueChanged.connect(self.multipointController.set_NY) self.entry_NZ.valueChanged.connect(self.multipointController.set_NZ) + self.entry_NZ.valueChanged.connect(self.signal_acquisition_z_levels.emit) self.entry_Nt.valueChanged.connect(self.multipointController.set_Nt) self.checkbox_withAutofocus.stateChanged.connect(self.multipointController.set_af_flag) self.checkbox_withReflectionAutofocus.stateChanged.connect(self.multipointController.set_reflection_af_flag) self.checkbox_genFocusMap.stateChanged.connect(self.multipointController.set_gen_focus_map_flag) + self.checkbox_stitchOutput.toggled.connect(self.display_stitcher_widget) self.btn_setSavingDir.clicked.connect(self.set_saving_dir) self.btn_startAcquisition.clicked.connect(self.toggle_acquisition) self.multipointController.acquisitionFinished.connect(self.acquisition_is_finished) + self.list_configurations.itemSelectionChanged.connect(self.emit_selected_channels) def set_deltaX(self,value): - mm_per_ustep = SCREW_PITCH_X_MM/(self.multipointController.navigationController.x_microstepping*FULLSTEPS_PER_REV_X) # to implement a get_x_microstepping() in multipointController + mm_per_ustep = self.multipointController.navigationController.get_mm_per_ustep_X() deltaX = round(value/mm_per_ustep)*mm_per_ustep self.entry_deltaX.setValue(deltaX) self.multipointController.set_deltaX(deltaX) def set_deltaY(self,value): - mm_per_ustep = SCREW_PITCH_Y_MM/(self.multipointController.navigationController.y_microstepping*FULLSTEPS_PER_REV_Y) + mm_per_ustep = self.multipointController.navigationController.get_mm_per_ustep_Y() deltaY = round(value/mm_per_ustep)*mm_per_ustep self.entry_deltaY.setValue(deltaY) self.multipointController.set_deltaY(deltaY) def set_deltaZ(self,value): - mm_per_ustep = SCREW_PITCH_Z_MM/(self.multipointController.navigationController.z_microstepping*FULLSTEPS_PER_REV_Z) + mm_per_ustep = self.multipointController.navigationController.get_mm_per_ustep_Z() deltaZ = round(value/1000/mm_per_ustep)*mm_per_ustep*1000 self.entry_deltaZ.setValue(deltaZ) self.multipointController.set_deltaZ(deltaZ) @@ -1671,6 +2221,14 @@ def set_saving_dir(self): self.lineEdit_savingDir.setText(save_dir_base) self.base_path_is_set = True + def set_well_selected(self, selected): + self.well_selected = selected + + def emit_selected_channels(self): + selected_channels = [item.text() for item in self.list_configurations.selectedItems()] + print(selected_channels) + self.signal_acquisition_channels.emit(selected_channels) + def toggle_acquisition(self,pressed): if self.base_path_is_set == False: self.btn_startAcquisition.setChecked(False) @@ -1678,13 +2236,26 @@ def toggle_acquisition(self,pressed): msg.setText("Please choose base saving directory first") msg.exec_() return + if self.well_selected == False and self.multipointController.scanCoordinates.format != 0: + self.btn_startAcquisition.setChecked(False) + msg = QMessageBox() + msg.setText("Please select a well to scan first") + msg.exec_() + return + if not self.list_configurations.selectedItems(): # no channel selected + self.btn_startAcquisition.setChecked(False) + msg = QMessageBox() + msg.setText("Please select at least one imaging channel first") + msg.exec_() + return if pressed: - # @@@ to do: add a widgetManger to enable and disable widget + # @@@ to do: add a widgetManger to enable and disable widget # @@@ to do: emit signal to widgetManager to disable other widgets self.setEnabled_all(False) - self.multipointController.set_selected_configurations((item.text() for item in self.list_configurations.selectedItems())) - self.multipointController.start_new_experiment(self.lineEdit_experimentID.text()) + # set parameters + if self.multipointController.scanCoordinates is not None: + self.multipointController.scanCoordinates.grid_skip_positions = [] self.multipointController.set_deltaX(self.entry_deltaX.value()) self.multipointController.set_deltaY(self.entry_deltaY.value()) self.multipointController.set_deltaZ(self.entry_deltaZ.value()) @@ -1696,12 +2267,25 @@ def toggle_acquisition(self,pressed): self.multipointController.set_af_flag(self.checkbox_withAutofocus.isChecked()) self.multipointController.set_reflection_af_flag(self.checkbox_withReflectionAutofocus.isChecked()) self.multipointController.set_base_path(self.lineEdit_savingDir.text()) + self.multipointController.set_selected_configurations((item.text() for item in self.list_configurations.selectedItems())) + self.multipointController.start_new_experiment(self.lineEdit_experimentID.text()) + + # emit acquisition data + self.signal_acquisition_started.emit(True) + self.signal_acquisition_shape.emit(self.entry_NX.value(), + self.entry_NY.value(), + self.entry_NZ.value(), + self.entry_deltaX.value(), + self.entry_deltaY.value(), + self.entry_deltaZ.value()) + self.multipointController.run_acquisition() else: self.multipointController.request_abort_aquisition() self.setEnabled_all(True) def acquisition_is_finished(self): + self.signal_acquisition_started.emit(False) self.btn_startAcquisition.setChecked(False) self.setEnabled_all(True) @@ -1721,28 +2305,47 @@ def setEnabled_all(self,enabled,exclude_btn_startAcquisition=True): self.checkbox_withAutofocus.setEnabled(enabled) self.checkbox_withReflectionAutofocus.setEnabled(enabled) self.checkbox_genFocusMap.setEnabled(enabled) + self.checkbox_stitchOutput.setEnabled(enabled) if exclude_btn_startAcquisition is not True: self.btn_startAcquisition.setEnabled(enabled) + def display_stitcher_widget(self, checked): + self.signal_stitcher_widget.emit(checked) + def disable_the_start_aquisition_button(self): self.btn_startAcquisition.setEnabled(False) def enable_the_start_aquisition_button(self): self.btn_startAcquisition.setEnabled(True) -class MultiPointWidget2(QFrame): - def __init__(self, navigationController, navigationViewer, multipointController, configurationManager = None, main=None, *args, **kwargs): + +class FlexibleMultiPointWidget(QFrame): + + signal_acquisition_started = Signal(bool) # true = started, false = finished + signal_acquisition_channels = Signal(list) # list channels + signal_acquisition_shape = Signal(int, float) # Nz, dz + signal_stitcher_z_levels = Signal(int) # live Nz + signal_stitcher_widget = Signal(bool) # signal start stitcher + + def __init__(self, navigationController, navigationViewer, multipointController, objectiveStore, configurationManager = None, main=None, scanCoordinates=None, *args, **kwargs): super().__init__(*args, **kwargs) self.last_used_locations = None + self.last_used_location_ids = None self.multipointController = multipointController + self.objectiveStore = objectiveStore self.configurationManager = configurationManager self.navigationController = navigationController self.navigationViewer = navigationViewer + self.scanCoordinates = scanCoordinates self.base_path_is_set = False self.location_list = np.empty((0, 3), dtype=float) + self.location_ids = np.empty((0,), dtype=' self.entry_maxZ.value(): + self.entry_maxZ.setValue(z_pos_um) + + def update_Nz(self): + z_min = self.entry_minZ.value() + z_max = self.entry_maxZ.value() + dz = self.entry_deltaZ.value() + nz = math.ceil((z_max - z_min) / dz) + 1 + self.entry_NZ.setValue(nz) + + def update_region_progress(self, current_fov, num_fovs): + self.progress_bar.setMaximum(num_fovs) + self.progress_bar.setValue(current_fov) + + if self.acquisition_start_time is not None and current_fov > 0: + elapsed_time = time.time() - self.acquisition_start_time + Nt = self.entry_Nt.value() + dt = self.entry_dt.value() + + # Calculate total processed FOVs and total FOVs + processed_fovs = (self.current_region - 1) * num_fovs + current_fov + self.current_time_point * self.num_regions * num_fovs + total_fovs = self.num_regions * num_fovs * Nt + remaining_fovs = total_fovs - processed_fovs + + # Calculate ETA + fov_per_second = processed_fovs / elapsed_time + self.eta_seconds = remaining_fovs / fov_per_second + (Nt - 1 - self.current_time_point) * dt if fov_per_second > 0 else 0 + self.update_eta_display() + + # Start or restart the timer + self.eta_timer.start(1000) # Update every 1000 ms (1 second) + + def update_acquisition_progress(self, current_region, num_regions, current_time_point): + self.current_region = current_region + self.current_time_point = current_time_point + + if self.current_region == 1 and self.current_time_point == 0: # First region + self.acquisition_start_time = time.time() + self.num_regions = num_regions + + progress_parts = [] + # Update timepoint progress if there are multiple timepoints and the timepoint has changed + if self.entry_Nt.value() > 1: + progress_parts.append(f"Time {current_time_point + 1}/{self.entry_Nt.value()}") + + # Update region progress if there are multiple regions + if num_regions > 1: + progress_parts.append(f"Region {current_region}/{num_regions}") + + # Set the progress label text, ensuring it's not empty + progress_text = " ".join(progress_parts) + self.progress_label.setText(progress_text if progress_text else "Progress") + + self.progress_bar.setValue(0) + + def update_eta_display(self): + if self.eta_seconds > 0: + self.eta_seconds -= 1 # Decrease by 1 second + hours, remainder = divmod(int(self.eta_seconds), 3600) + minutes, seconds = divmod(remainder, 60) + if hours > 0: + eta_str = f"{hours:02d}:{minutes:02d}:{seconds:02d}" + else: + eta_str = f"{minutes:02d}:{seconds:02d}" + self.eta_label.setText(f"{eta_str}") + else: + self.eta_timer.stop() + self.eta_label.setText("00:00") + + def display_progress_bar(self, show): + self.progress_label.setVisible(show) + self.progress_bar.setVisible(show) + self.eta_label.setVisible(show) + if show: + self.progress_bar.setValue(0) + self.progress_label.setText("Region 0/0") + self.eta_label.setText("--:--") + self.acquisition_start_time = None + else: + self.eta_timer.stop() + + def create_region_coordinates(self, x_center, y_center, overlap_percent=10): + """Convert grid parameters (NX, NY) to FOV coordinates based on overlap""" + fov_size_mm = (self.objectiveStore.get_pixel_size() / 1000) * Acquisition.CROP_WIDTH + step_size_mm = fov_size_mm * (1 - overlap_percent/100) + + # Calculate total grid size + grid_width_mm = (self.entry_NX.value() - 1) * step_size_mm + grid_height_mm = (self.entry_NY.value() - 1) * step_size_mm + + scan_coordinates = [] + for i in range(self.entry_NY.value()): + row = [] + y = y_center - grid_height_mm/2 + i * step_size_mm + for j in range(self.entry_NX.value()): + x = x_center - grid_width_mm/2 + j * step_size_mm + row.append((x, y)) + self.navigationViewer.register_fov_to_image(x, y) + + if i % 2 == 1: # reverse even rows + row.reverse() + scan_coordinates.extend(row) + + # Region coordinates are already centered since x_center, y_center is grid center + region_id = f'R{len(self.location_list)-1}' + if region_id in self.region_coordinates: + self.region_coordinates[region_id] = [x_center, y_center] + + return scan_coordinates + + def create_region_coordinates_with_step_size(self, x_center, y_center): + grid_width_mm = (self.entry_NX.value() - 1) * self.entry_deltaX.value() + grid_height_mm = (self.entry_NY.value() - 1) * self.entry_deltaY.value() + + # Pre-calculate step sizes and ranges + x_steps = [x_center - grid_width_mm/2 + j * self.entry_deltaX.value() + for j in range(self.entry_NX.value())] + y_steps = [y_center - grid_height_mm/2 + i * self.entry_deltaY.value() + for i in range(self.entry_NY.value())] + + scan_coordinates = [] + for i, y in enumerate(y_steps): + row = [(x, y) for x in (x_steps if i % 2 == 0 else reversed(x_steps))] + scan_coordinates.extend(row) + for x, y in row: + self.navigationViewer.register_fov_to_image(x, y) + + return scan_coordinates + + def update_fov_positions(self): + self.navigationViewer.clear_overlay() + self.region_coordinates.clear() + self.region_fov_coordinates_dict.clear() + + for i, (x, y, z) in enumerate(self.location_list): + region_id = self.location_ids[i] + self.region_coordinates[region_id] = [x, y, z] + if self.use_overlap: + scan_coordinates = self.create_region_coordinates(x, y, overlap_percent=self.entry_overlap.value()) + else: + scan_coordinates = self.create_region_coordinates_with_step_size(x, y) + self.region_fov_coordinates_dict[region_id] = scan_coordinates def set_deltaZ(self,value): - mm_per_ustep = SCREW_PITCH_Z_MM/(self.multipointController.navigationController.z_microstepping*FULLSTEPS_PER_REV_Z) + mm_per_ustep = self.multipointController.navigationController.get_mm_per_ustep_Z() deltaZ = round(value/1000/mm_per_ustep)*mm_per_ustep*1000 self.entry_deltaZ.setValue(deltaZ) self.multipointController.set_deltaZ(deltaZ) @@ -1968,6 +2985,13 @@ def set_saving_dir(self): self.lineEdit_savingDir.setText(save_dir_base) self.base_path_is_set = True + def emit_selected_channels(self): + selected_channels = [item.text() for item in self.list_configurations.selectedItems()] + self.signal_acquisition_channels.emit(selected_channels) + + def display_stitcher_widget(self, checked): + self.signal_stitcher_widget.emit(checked) + def toggle_acquisition(self,pressed): if self.base_path_is_set == False: self.btn_startAcquisition.setChecked(False) @@ -1975,30 +2999,48 @@ def toggle_acquisition(self,pressed): msg.setText("Please choose base saving directory first") msg.exec_() return + if not self.list_configurations.selectedItems(): # no channel selected + self.btn_startAcquisition.setChecked(False) + msg = QMessageBox() + msg.setText("Please select at least one imaging channel first") + msg.exec_() + return if pressed: - # @@@ to do: add a widgetManger to enable and disable widget + # @@@ to do: add a widgetManger to enable and disable widget # @@@ to do: emit signal to widgetManager to disable other widgets # add the current location to the location list if the list is empty if len(self.location_list) == 0: self.add_location() - self.acquisition_in_place =True + self.acquisition_in_place = True + + self.update_fov_positions() + + if self.checkbox_set_z_range.isChecked(): + # Set Z-range (convert from μm to mm) + minZ = self.entry_minZ.value() / 1000 + maxZ = self.entry_maxZ.value() / 1000 + self.multipointController.set_z_range(minZ, maxZ) + self.setEnabled_all(False) - self.multipointController.set_selected_configurations((item.text() for item in self.list_configurations.selectedItems())) - self.multipointController.start_new_experiment(self.lineEdit_experimentID.text()) - # set parameters - self.multipointController.set_deltaX(self.entry_deltaX.value()) - self.multipointController.set_deltaY(self.entry_deltaY.value()) + # Set acquisition parameters self.multipointController.set_deltaZ(self.entry_deltaZ.value()) - self.multipointController.set_deltat(self.entry_dt.value()) - self.multipointController.set_NX(self.entry_NX.value()) - self.multipointController.set_NY(self.entry_NY.value()) self.multipointController.set_NZ(self.entry_NZ.value()) + self.multipointController.set_deltat(self.entry_dt.value()) self.multipointController.set_Nt(self.entry_Nt.value()) + self.multipointController.set_use_piezo(self.checkbox_usePiezo.isChecked()) self.multipointController.set_af_flag(self.checkbox_withAutofocus.isChecked()) self.multipointController.set_reflection_af_flag(self.checkbox_withReflectionAutofocus.isChecked()) self.multipointController.set_base_path(self.lineEdit_savingDir.text()) - self.multipointController.run_acquisition(self.location_list) + self.multipointController.set_selected_configurations((item.text() for item in self.list_configurations.selectedItems())) + self.multipointController.start_new_experiment(self.lineEdit_experimentID.text()) + + # emit signals + self.signal_acquisition_started.emit(True) + self.signal_acquisition_shape.emit(self.entry_NZ.value(), self.entry_deltaZ.value()) + + # Start coordinate-based acquisition + self.multipointController.run_acquisition(location_list=self.region_coordinates, coordinate_dict=self.region_fov_coordinates_dict) else: self.multipointController.request_abort_aquisition() self.setEnabled_all(True) @@ -2008,30 +3050,37 @@ def load_last_used_locations(self): return self.clear_only_location_list() - for row in self.last_used_locations: + for row, row_ind in zip(self.last_used_locations, self.last_used_location_ids): x = row[0] y = row[1] z = row[2] + name = row_ind[0] if not np.any(np.all(self.location_list[:, :2] == [x, y], axis=1)): - location_str = 'x: ' + str(round(x,3)) + ' mm, y: ' + str(round(y,3)) + ' mm, z: ' + str(round(1000*z,1)) + ' um' + location_str = 'x:' + str(round(x,3)) + 'mm y:' + str(round(y,3)) + 'mm z:' + str(round(1000*z,1)) + 'μm' self.dropdown_location_list.addItem(location_str) + self.location_list = np.vstack((self.location_list, [[x,y,z]])) + self.location_ids = np.append(self.location_ids, name) + self.table_location_list.insertRow(self.table_location_list.rowCount()) + self.table_location_list.setItem(self.table_location_list.rowCount()-1,0, QTableWidgetItem(str(round(x,3)))) + self.table_location_list.setItem(self.table_location_list.rowCount()-1,1, QTableWidgetItem(str(round(y,3)))) + self.table_location_list.setItem(self.table_location_list.rowCount()-1,2, QTableWidgetItem(str(round(z*1000,1)))) + self.table_location_list.setItem(self.table_location_list.rowCount()-1,3, QTableWidgetItem(name)) index = self.dropdown_location_list.count() - 1 self.dropdown_location_list.setCurrentIndex(index) - self.location_list = np.vstack((self.location_list, [[x,y,z]])) print(self.location_list) - self.navigationViewer.register_fov_to_image(x,y) + # self.navigationViewer.register_fov_to_image(x,y) else: print("Duplicate values not added based on x and y.") #to-do: update z coordinate - - def acquisition_is_finished(self): if not self.acquisition_in_place: self.last_used_locations = self.location_list.copy() + self.last_used_location_ids = self.location_ids.copy() else: - self.clear() + self.clear_only_location_list() self.acquisition_in_place = False + self.signal_acquisition_started.emit(False) self.btn_startAcquisition.setChecked(False) self.setEnabled_all(True) @@ -2039,20 +3088,26 @@ def setEnabled_all(self,enabled,exclude_btn_startAcquisition=True): self.btn_setSavingDir.setEnabled(enabled) self.lineEdit_savingDir.setEnabled(enabled) self.lineEdit_experimentID.setEnabled(enabled) - self.entry_deltaX.setEnabled(enabled) self.entry_NX.setEnabled(enabled) - self.entry_deltaY.setEnabled(enabled) self.entry_NY.setEnabled(enabled) self.entry_deltaZ.setEnabled(enabled) self.entry_NZ.setEnabled(enabled) self.entry_dt.setEnabled(enabled) self.entry_Nt.setEnabled(enabled) + if not self.use_overlap: + self.entry_deltaX.setEnabled(enabled) + self.entry_deltaY.setEnabled(enabled) + else: + self.entry_overlap.setEnabled(enabled) self.list_configurations.setEnabled(enabled) + self.checkbox_genFocusMap.setEnabled(enabled) self.checkbox_withAutofocus.setEnabled(enabled) self.checkbox_withReflectionAutofocus.setEnabled(enabled) + self.checkbox_stitchOutput.setEnabled(enabled) if exclude_btn_startAcquisition is not True: self.btn_startAcquisition.setEnabled(enabled) + def disable_the_start_aquisition_button(self): self.btn_startAcquisition.setEnabled(False) @@ -2060,38 +3115,111 @@ def enable_the_start_aquisition_button(self): self.btn_startAcquisition.setEnabled(True) def add_location(self): + # Get raw positions without rounding x = self.navigationController.x_pos_mm y = self.navigationController.y_pos_mm z = self.navigationController.z_pos_mm - if not np.any(np.all(self.location_list[:, :2] == [x, y], axis=1)): - location_str = 'x: ' + str(round(x,3)) + ' mm, y: ' + str(round(y,3)) + ' mm, z: ' + str(round(1000*z,1)) + ' um' + name = f'R{len(self.location_ids)}' + + # Check for duplicates using rounded values for comparison + if not np.any(np.all(self.location_list[:, :2] == [round(x,3), round(y,3)], axis=1)): + # Store actual values in location_list + self.location_list = np.vstack((self.location_list, [[x, y, z]])) + self.location_ids = np.append(self.location_ids, name) + + # Display rounded values in UI + location_str = f"x:{round(x,3)} mm y:{round(y,3)} mm z:{round(z*1000,1)} μm" self.dropdown_location_list.addItem(location_str) - index = self.dropdown_location_list.count() - 1 - self.dropdown_location_list.setCurrentIndex(index) - self.location_list = np.vstack((self.location_list, [[self.navigationController.x_pos_mm,self.navigationController.y_pos_mm,self.navigationController.z_pos_mm]])) - print(self.location_list) - self.navigationViewer.register_fov_to_image(x,y) + + # Update table with rounded display values + row = self.table_location_list.rowCount() + self.table_location_list.insertRow(row) + self.table_location_list.setItem(row, 0, QTableWidgetItem(str(round(x,3)))) + self.table_location_list.setItem(row, 1, QTableWidgetItem(str(round(y,3)))) + self.table_location_list.setItem(row, 2, QTableWidgetItem(str(round(z*1000,1)))) + self.table_location_list.setItem(row, 3, QTableWidgetItem(name)) + + # Store actual values in region coordinates + self.region_coordinates[name] = [x, y, z] + if self.use_overlap: + scan_coordinates = self.create_region_coordinates(x, y, overlap_percent=self.entry_overlap.value()) + else: + scan_coordinates = self.create_region_coordinates_with_step_size(x, y) + self.region_fov_coordinates_dict[name] = scan_coordinates + + print(f"Added Region: {name} - x={x}, y={y}, z={z}") else: - print("Duplicate values not added based on x and y.") - #to-do: update z coordinate + print("Duplicate location not added.") def remove_location(self): index = self.dropdown_location_list.currentIndex() - if index >=0: - self.dropdown_location_list.removeItem(index) - x = self.location_list[index,0] - y = self.location_list[index,1] - z = self.location_list[index,2] - self.navigationViewer.deregister_fov_to_image(x,y) + if index >= 0: + # Get the region ID + region_id = self.location_ids[index] + print("Before Removal:") + print(f"Location IDs: {self.location_ids}") + print(f"Region FOV Coordinates Dict Keys: {list(self.region_fov_coordinates_dict.keys())}") + + # Remove overlays using actual stored coordinates + if region_id in self.region_fov_coordinates_dict: + for coord in self.region_fov_coordinates_dict[region_id]: + self.navigationViewer.deregister_fov_to_image(coord[0], coord[1]) + del self.region_fov_coordinates_dict[region_id] + + # Remove from data structures self.location_list = np.delete(self.location_list, index, axis=0) + self.location_ids = np.delete(self.location_ids, index) + if region_id in self.region_coordinates: + del self.region_coordinates[region_id] + + # Update remaining IDs and UI + for i in range(index, len(self.location_ids)): + old_id = self.location_ids[i] + new_id = f'R{i}' + self.location_ids[i] = new_id + + # Update dictionaries + self.region_coordinates[new_id] = self.region_coordinates.pop(old_id) + self.region_fov_coordinates_dict[new_id] = self.region_fov_coordinates_dict.pop(old_id) + + # Update UI with rounded display values + self.table_location_list.setItem(i, 3, QTableWidgetItem(new_id)) + x, y, z = self.location_list[i] + location_str = f"x:{round(x,3)} mm y:{round(y,3)} mm z:{round(z*1000,1)} μm" + self.dropdown_location_list.setItemText(i, location_str) + + # Update UI + self.dropdown_location_list.removeItem(index) + self.table_location_list.removeRow(index) + + print("After Removal:") + print(f"Location IDs: {self.location_ids}") + print(f"Region FOV Coordinates Dict Keys: {list(self.region_fov_coordinates_dict.keys())}") + + # Clear overlay if no locations remain if len(self.location_list) == 0: - self.navigationViewer.clear_slide() - print(self.location_list) + self.navigationViewer.clear_overlay() + + def create_point_id(self): + self.scanCoordinates.get_selected_wells() + if len(self.scanCoordinates.name) == 0: + print('Select a well first.') + return None + + name = self.scanCoordinates.name[0] + location_split_names = [int(x.split('-')[1]) for x in self.location_ids if x.split('-')[0] == name] + if len(location_split_names) > 0: + new_id = f'{name}-{np.max(location_split_names)+1}' + else: + new_id = f'{name}-0' + return new_id def next(self): index = self.dropdown_location_list.currentIndex() - max_index = self.dropdown_location_list.count() - 1 - index = min(index + 1, max_index) + # max_index = self.dropdown_location_list.count() - 1 + # index = min(index + 1, max_index) + num_regions = self.dropdown_location_list.count() + index = (index + 1) % (num_regions) self.dropdown_location_list.setCurrentIndex(index) x = self.location_list[index,0] y = self.location_list[index,1] @@ -2113,12 +3241,21 @@ def previous(self): def clear(self): self.location_list = np.empty((0, 3), dtype=float) + self.location_ids = np.empty((0,), dtype=' 0: + elapsed_time = time.time() - self.acquisition_start_time + Nt = self.entry_Nt.value() + dt = self.entry_dt.value() + + # Calculate total processed FOVs and total FOVs + processed_fovs = (self.current_region - 1) * num_fovs + current_fov + self.current_time_point * self.num_regions * num_fovs + total_fovs = self.num_regions * num_fovs * Nt + remaining_fovs = total_fovs - processed_fovs + + # Calculate ETA + fov_per_second = processed_fovs / elapsed_time + self.eta_seconds = remaining_fovs / fov_per_second + (Nt - 1 - self.current_time_point) * dt if fov_per_second > 0 else 0 + self.update_eta_display() + + # Start or restart the timer + self.eta_timer.start(1000) # Update every 1000 ms (1 second) + + def update_acquisition_progress(self, current_region, num_regions, current_time_point): + self.current_region = current_region + self.current_time_point = current_time_point + + if self.current_region == 1 and self.current_time_point == 0: # First region + self.acquisition_start_time = time.time() + self.num_regions = num_regions + + progress_parts = [] + # Update timepoint progress if there are multiple timepoints and the timepoint has changed + if self.entry_Nt.value() > 1: + progress_parts.append(f"Time {current_time_point + 1}/{self.entry_Nt.value()}") + + # Update region progress if there are multiple regions + if num_regions > 1: + progress_parts.append(f"Region {current_region}/{num_regions}") + + # Set the progress label text, ensuring it's not empty + progress_text = " ".join(progress_parts) + self.progress_label.setText(progress_text if progress_text else "Progress") + + self.progress_bar.setValue(0) + + def update_eta_display(self): + if self.eta_seconds > 0: + self.eta_seconds -= 1 # Decrease by 1 second + hours, remainder = divmod(int(self.eta_seconds), 3600) + minutes, seconds = divmod(remainder, 60) + if hours > 0: + eta_str = f"{hours:02d}:{minutes:02d}:{seconds:02d}" + else: + eta_str = f"{minutes:02d}:{seconds:02d}" + self.eta_label.setText(f"{eta_str}") + else: + self.eta_timer.stop() + self.eta_label.setText("00:00") + + def display_progress_bar(self, show): + self.progress_label.setVisible(show) + self.progress_bar.setVisible(show) + self.eta_label.setVisible(show) + if show: + self.progress_bar.setValue(0) + self.progress_label.setText("Region 0/0") + self.eta_label.setText("--:--") + self.acquisition_start_time = None + else: + self.eta_timer.stop() + + def toggle_z_range_controls(self, is_visible): + # Efficiently set visibility for all widgets in both layouts + for layout in (self.z_min_layout, self.z_max_layout): + for i in range(layout.count()): + widget = layout.itemAt(i).widget() + if widget: + widget.setVisible(is_visible) + + # Enable/disable NZ entry based on the inverse of is_visible + self.entry_NZ.setEnabled(not is_visible) + current_z = self.navigationController.z_pos_mm * 1000 + self.entry_minZ.setValue(current_z) + self.entry_maxZ.setValue(current_z) + + # Safely connect or disconnect signals + try: + if is_visible: + self.entry_minZ.valueChanged.connect(self.update_z_max) + self.entry_maxZ.valueChanged.connect(self.update_z_min) + self.entry_minZ.valueChanged.connect(self.update_Nz) + self.entry_maxZ.valueChanged.connect(self.update_Nz) + self.entry_deltaZ.valueChanged.connect(self.update_Nz) + else: + self.entry_minZ.valueChanged.disconnect(self.update_z_max) + self.entry_maxZ.valueChanged.disconnect(self.update_z_min) + self.entry_minZ.valueChanged.disconnect(self.update_Nz) + self.entry_maxZ.valueChanged.disconnect(self.update_Nz) + self.entry_deltaZ.valueChanged.disconnect(self.update_Nz) + except TypeError: + # Handle case where signals might not be connected/disconnected + pass + + # Update the layout + self.updateGeometry() + self.update() + + def set_default_scan_size(self): + self.set_default_shape() + print(self.navigationViewer.sample) + if 'glass slide' in self.navigationViewer.sample: + self.entry_scan_size.setEnabled(True) + self.entry_well_coverage.setEnabled(False) + else: + self.entry_well_coverage.setEnabled(True) + self.entry_well_coverage.setValue(100) + self.update_scan_size_from_coverage() + + def set_default_shape(self): + if self.scanCoordinates.format in ['384 well plate', '1536 well plate']: + self.combobox_shape.setCurrentText('Square') + elif self.scanCoordinates.format != 0: + self.combobox_shape.setCurrentText('Circle') + + def get_effective_well_size(self): + well_size = self.scanCoordinates.well_size_mm + if self.combobox_shape.currentText() == 'Circle': + fov_size_mm = (self.objectiveStore.get_pixel_size() / 1000) * Acquisition.CROP_WIDTH + return well_size + fov_size_mm * (1 + math.sqrt(2)) + return well_size + + def on_set_shape(self): + shape = self.combobox_shape.currentText() + if shape == 'Manual': + self.signal_draw_shape.emit(True) + else: + self.signal_draw_shape.emit(False) + self.update_coverage_from_scan_size() + self.update_coordinates() + + def update_manual_shape(self, shapes_data_mm): + self.clear_regions() + if shapes_data_mm and len(shapes_data_mm) > 0: + self.manual_shapes = shapes_data_mm + print(f"Manual ROIs updated with {len(self.manual_shapes)} shapes") + else: + self.manual_shapes = None + print("No valid shapes found, cleared manual ROIs") + self.update_coordinates() + + def convert_pixel_to_mm(self, pixel_coords): + # Convert pixel coordinates to millimeter coordinates + mm_coords = pixel_coords * self.napariMosaicWidget.viewer_pixel_size_mm + mm_coords += np.array([self.napariMosaicWidget.top_left_coordinate[1], self.napariMosaicWidget.top_left_coordinate[0]]) + return mm_coords + + def update_coverage_from_scan_size(self): + if 'glass slide' not in self.navigationViewer.sample and hasattr(self.navigationViewer, 'well_size_mm'): + effective_well_size = self.get_effective_well_size() + scan_size = self.entry_scan_size.value() + coverage = round((scan_size / effective_well_size) * 100, 2) + print('COVERAGE', coverage) + self.entry_well_coverage.setValue(coverage) + + def update_scan_size_from_coverage(self): + if hasattr(self.navigationViewer, 'well_size_mm'): + effective_well_size = self.get_effective_well_size() + coverage = self.entry_well_coverage.value() + scan_size = round((coverage / 100) * effective_well_size, 3) + print('SIZE', scan_size) + self.entry_scan_size.setValue(scan_size) + + def update_dz(self): + z_min = self.entry_minZ.value() + z_max = self.entry_maxZ.value() + nz = self.entry_NZ.value() + dz = (z_max - z_min) / (nz - 1) if nz > 1 else 0 + self.entry_deltaZ.setValue(dz) + + def update_Nz(self): + z_min = self.entry_minZ.value() + z_max = self.entry_maxZ.value() + dz = self.entry_deltaZ.value() + nz = math.ceil((z_max - z_min) / dz) + 1 + self.entry_NZ.setValue(nz) + + def set_z_min(self): + z_value = self.navigationController.z_pos_mm * 1000 # Convert to μm + self.entry_minZ.setValue(z_value) + + def set_z_max(self): + z_value = self.navigationController.z_pos_mm * 1000 # Convert to μm + self.entry_maxZ.setValue(z_value) + + def update_z_min(self, z_pos_um): + if z_pos_um < self.entry_minZ.value(): + self.entry_minZ.setValue(z_pos_um) + + def update_z_max(self, z_pos_um): + if z_pos_um > self.entry_maxZ.value(): + self.entry_maxZ.setValue(z_pos_um) + + def init_z(self, z_pos_mm=None): + # sets initial z range form the current z position used after startup of the GUI + if z_pos_mm is None: + z_pos_mm = self.navigationController.z_pos_mm + + # block entry update signals + self.entry_minZ.blockSignals(True) + self.entry_maxZ.blockSignals(True) + + # set entry range values bith to current z pos + self.entry_minZ.setValue(z_pos_mm*1000) + self.entry_maxZ.setValue(z_pos_mm*1000) + print("init z-level wellplate:", self.entry_minZ.value()) + + # reallow updates from entry sinals (signal enforces min <= max when we update either entry) + self.entry_minZ.blockSignals(False) + self.entry_maxZ.blockSignals(False) + + def set_live_scan_coordinates(self, x_mm, y_mm): + parent = self.multipointController.parent + is_current_widget = (parent is not None and hasattr(parent, 'recordTabWidget') and + parent.recordTabWidget.currentWidget() == self) + + if self.combobox_shape.currentText() != 'Manual' and self.scanCoordinates.format == 'glass slide' and (parent is None or is_current_widget): + + if self.region_coordinates: + self.clear_regions() + + self.add_region('current', x_mm, y_mm) + + def set_well_coordinates(self, selected): + self.well_selected = selected and bool(self.scanCoordinates.get_selected_wells()) + if hasattr(self.multipointController.parent, 'recordTabWidget') and self.multipointController.parent.recordTabWidget.currentWidget() == self: + if self.scanCoordinates.format == 'glass slide': + x = self.navigationController.x_pos_mm + y = self.navigationController.y_pos_mm + self.set_live_scan_coordinates(x, y) + + elif self.well_selected: + # Get the set of currently selected well IDs + selected_well_ids = set(self.scanCoordinates.name) + + # Remove regions that are no longer selected + for well_id in list(self.region_coordinates.keys()): + if well_id not in selected_well_ids: + self.remove_region(well_id) + + # Add regions for selected wells + for well_id, (x, y) in zip(self.scanCoordinates.name, self.scanCoordinates.coordinates_mm): + if well_id not in self.region_coordinates: + self.add_region(well_id, x, y) + + self.signal_update_navigation_viewer.emit() + print(f"Updated region coordinates: {len(self.region_coordinates)} wells") + + else: + print("Clear well coordinates") + self.clear_regions() + + def update_coordinates(self): + shape = self.combobox_shape.currentText() + if shape == 'Manual': + self.region_fov_coordinates_dict.clear() + self.region_coordinates.clear() + if self.manual_shapes is not None: + # Handle manual ROIs + for i, manual_shape in enumerate(self.manual_shapes): + scan_coordinates = self.create_manual_region_coordinates( + self.objectiveStore, + manual_shape, + overlap_percent=self.entry_overlap.value() + ) + if scan_coordinates: + if len(self.manual_shapes) <= 1: + region_name = f'manual' + else: + region_name = f'manual_{i}' + self.region_fov_coordinates_dict[region_name] = scan_coordinates + # Set the region coordinates to the center of the manual ROI + center = np.mean(manual_shape, axis=0) + self.region_coordinates[region_name] = [center[0], center[1]] + else: + print("No Manual ROI found") + + elif 'glass slide' in self.navigationViewer.sample: + x = self.navigationController.x_pos_mm + y = self.navigationController.y_pos_mm + self.set_live_scan_coordinates(x, y) + else: + if len(self.region_coordinates) > 0: + self.clear_regions() + self.set_well_coordinates(True) + + def update_region_z_level(self, well_id, new_z): + if len(self.region_coordinates[well_id]) == 3: + # [x, y, z] -> [x, y, new_z] + self.region_coordinates[well_id][2] = new_z + else: + # [x, y] -> [x, y, new_z] + self.region_coordinates[well_id].append[new_z] + print(f"Updated z-level to {new_z} for region {well_id}") + + def add_region(self, well_id, x, y): + z = self.navigationController.z_pos_mm + action = "Updated" if well_id in self.region_coordinates else "Added" + + self.region_coordinates[well_id] = [float(x), float(y)] #, float(z)] + + scan_coordinates = self.create_region_coordinates( + self.objectiveStore, + x, y, + scan_size_mm=self.entry_scan_size.value(), + overlap_percent=self.entry_overlap.value(), + shape=self.combobox_shape.currentText() + ) + self.region_fov_coordinates_dict[well_id] = scan_coordinates + + print(f"{action} Region: {well_id} - x={x:.3f}, y={y:.3f}") #, z={z:.3f}") + # print("Size:", self.entry_scan_size.value()) + # print("Shape:", self.combobox_shape.currentText()) + # print("# fovs in region:", len(scan_coordinates)) + + def remove_region(self, well_id): + if well_id in self.region_coordinates: + del self.region_coordinates[well_id] + + if well_id in self.region_fov_coordinates_dict: + region_scan_coordinates = self.region_fov_coordinates_dict.pop(well_id) + for coord in region_scan_coordinates: + self.navigationViewer.deregister_fov_to_image(coord[0], coord[1]) + + print(f"Removed Region: {well_id}") + + def clear_regions(self): + self.navigationViewer.clear_overlay() + self.region_coordinates.clear() + self.region_fov_coordinates_dict.clear() + print("Cleared All Regions") + + def create_region_coordinates(self, objectiveStore, center_x, center_y, scan_size_mm=None, overlap_percent=10, shape='Square'): + if shape == 'Manual': + return self.create_manual_region_coordinates(objectiveStore, self.manual_shapes, overlap_percent) + + if scan_size_mm is None: + scan_size_mm = self.scanCoordinates.well_size_mm + pixel_size_um = objectiveStore.get_pixel_size() + fov_size_mm = (pixel_size_um / 1000) * Acquisition.CROP_WIDTH + step_size_mm = fov_size_mm * (1 - overlap_percent / 100) + + steps = math.floor(scan_size_mm / step_size_mm) + if shape == 'Circle': + tile_diagonal = math.sqrt(2) * fov_size_mm + if steps % 2 == 1: # for odd steps + actual_scan_size_mm = (steps - 1) * step_size_mm + tile_diagonal + else: # for even steps + actual_scan_size_mm = math.sqrt(((steps - 1) * step_size_mm + fov_size_mm)**2 + (step_size_mm + fov_size_mm)**2) + + if actual_scan_size_mm > scan_size_mm: + actual_scan_size_mm -= step_size_mm + steps -= 1 + else: + actual_scan_size_mm = (steps - 1) * step_size_mm + fov_size_mm + + steps = max(1, steps) # Ensure at least one step + # print(f"steps: {steps}, step_size_mm: {step_size_mm}") + # print(f"scan size mm: {scan_size_mm}") + # print(f"actual scan size mm: {actual_scan_size_mm}") + + scan_coordinates = [] + half_steps = (steps - 1) / 2 + radius_squared = (scan_size_mm / 2) ** 2 + fov_size_mm_half = fov_size_mm / 2 + + for i in range(steps): + row = [] + y = center_y + (i - half_steps) * step_size_mm + for j in range(steps): + x = center_x + (j - half_steps) * step_size_mm + if shape == 'Square' or (shape == 'Circle' and self._is_in_circle(x, y, center_x, center_y, radius_squared, fov_size_mm_half)): + row.append((x, y)) + self.navigationViewer.register_fov_to_image(x, y) + + if self.fov_pattern == 'S-Pattern' and i % 2 == 1: + row.reverse() + scan_coordinates.extend(row) + + if not scan_coordinates and shape == 'Circle': + scan_coordinates.append((center_x, center_y)) + self.navigationViewer.register_fov_to_image(center_x, center_y) + + self.signal_update_navigation_viewer.emit() + return scan_coordinates + + def _is_in_circle(self, x, y, center_x, center_y, radius_squared, fov_size_mm_half): + corners = [ + (x - fov_size_mm_half, y - fov_size_mm_half), + (x + fov_size_mm_half, y - fov_size_mm_half), + (x - fov_size_mm_half, y + fov_size_mm_half), + (x + fov_size_mm_half, y + fov_size_mm_half) + ] + return all((cx - center_x)**2 + (cy - center_y)**2 <= radius_squared for cx, cy in corners) + + def create_scan_grid(self, objectiveStore, scan_size_mm=None, overlap_percent=10, shape='Square'): + if scan_size_mm is None: + scan_size_mm = self.scanCoordinates.well_size_mm + + pixel_size_um = objectiveStore.get_pixel_size() + fov_size_mm = (pixel_size_um / 1000) * Acquisition.CROP_WIDTH + step_size_mm = fov_size_mm * (1 - overlap_percent / 100) + + steps = math.floor(scan_size_mm / step_size_mm) + if shape == 'Circle': + # check if corners of middle row/col all fit + if steps % 2 == 1: # for odd steps + tile_diagonal = math.sqrt(2) * fov_size_mm + actual_scan_size_mm = (steps - 1) * step_size_mm + tile_diagonal + else: # for even steps + actual_scan_size_mm = math.sqrt(((steps - 1) * step_size_mm + fov_size_mm)**2 + (step_size_mm + fov_size_mm)**2) + + if actual_scan_size_mm > scan_size_mm: + actual_scan_size_mm -= step_size_mm + steps -= 1 + else: + actual_scan_size_mm = (steps - 1) * step_size_mm + fov_size_mm + + steps = max(1, steps) # Ensure at least one step + # print("steps:", steps) + # print("scan size mm:", scan_size_mm) + # print("actual scan size mm:", actual_scan_size_mm) + + region_skip_positions = [] + + if shape == 'Circle': + radius = scan_size_mm / 2 + for i in range(steps): + for j in range(steps): + x_rel = (j - (steps - 1) / 2) * step_size_mm + y_rel = (i - (steps - 1) / 2) * step_size_mm + corners = [ + (x_rel - fov_size_mm / 2, y_rel - fov_size_mm / 2), # Top-left + (x_rel + fov_size_mm / 2, y_rel - fov_size_mm / 2), # Top-right + (x_rel - fov_size_mm / 2, y_rel + fov_size_mm / 2), # Bottom-left + (x_rel + fov_size_mm / 2, y_rel + fov_size_mm / 2) # Bottom-right + ] + if any(math.sqrt(cx**2 + cy**2) > radius for cx, cy in corners): + region_skip_positions.append((i, j)) + + # If all positions were skipped, clear the list and set steps to 1 + if len(region_skip_positions) == steps * steps: + region_skip_positions.clear() + steps = 1 + + # self.scanCoordinates.grid_skip_positions = region_skip_positions + return steps, step_size_mm + + def create_manual_region_coordinates(self, objectiveStore, shape_coords, overlap_percent): + if shape_coords is None or len(shape_coords) < 3: + print("Invalid manual ROI data") + return [] + + pixel_size_um = objectiveStore.get_pixel_size() + fov_size_mm = (pixel_size_um / 1000) * Acquisition.CROP_WIDTH + step_size_mm = fov_size_mm * (1 - overlap_percent / 100) + + # Ensure shape_coords is a numpy array + shape_coords = np.array(shape_coords) + if shape_coords.ndim == 1: + shape_coords = shape_coords.reshape(-1, 2) + elif shape_coords.ndim > 2: + print(f"Unexpected shape of manual_shape: {shape_coords.shape}") + return [] + + # Calculate bounding box + x_min, y_min = np.min(shape_coords, axis=0) + x_max, y_max = np.max(shape_coords, axis=0) + + # Create a grid of points within the bounding box + x_range = np.arange(x_min, x_max + step_size_mm, step_size_mm) + y_range = np.arange(y_min, y_max + step_size_mm, step_size_mm) + xx, yy = np.meshgrid(x_range, y_range) + grid_points = np.column_stack((xx.ravel(), yy.ravel())) + + # # Use Delaunay triangulation for efficient point-in-polygon test + # hull = Delaunay(shape_coords) + # mask = hull.find_simplex(grid_points) >= 0 + + # Use Ray Casting for point-in-polygon test + mask = np.array([self.point_inside_polygon(x, y, shape_coords) for x, y in grid_points]) + + # Filter points inside the polygon + valid_points = grid_points[mask] + + # Sort points + sorted_indices = np.lexsort((valid_points[:, 0], valid_points[:, 1])) + sorted_points = valid_points[sorted_indices] + + # Apply S-Pattern if needed + if self.fov_pattern == 'S-Pattern': + unique_y = np.unique(sorted_points[:, 1]) + for i in range(1, len(unique_y), 2): + mask = sorted_points[:, 1] == unique_y[i] + sorted_points[mask] = sorted_points[mask][::-1] + + # Register FOVs + for x, y in sorted_points: + self.navigationViewer.register_fov_to_image(x, y) + + self.signal_update_navigation_viewer.emit() + return sorted_points.tolist() + + def point_inside_polygon(self, x, y, poly): + n = len(poly) + inside = False + p1x, p1y = poly[0] + for i in range(n + 1): + p2x, p2y = poly[i % n] + if y > min(p1y, p2y): + if y <= max(p1y, p2y): + if x <= max(p1x, p2x): + if p1y != p2y: + xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x + if p1x == p2x or x <= xinters: + inside = not inside + p1x, p1y = p2x, p2y + return inside + + def sort_coordinates(self): + print(f"Acquisition pattern: {self.acquisition_pattern}") + + if len(self.region_coordinates) <= 1: + return + + def sort_key(item): + key, coord = item + if 'manual' in key: + return (0, coord[1], coord[0]) # Manual coords: sort by y, then x + else: + row, col = key[0], int(key[1:]) + return (1, ord(row), col) # Well coords: sort by row, then column + + sorted_items = sorted(self.region_coordinates.items(), key=sort_key) + + if self.acquisition_pattern == 'S-Pattern': + # Group by row and reverse alternate rows + rows = itertools.groupby(sorted_items, key=lambda x: x[1][1] if 'manual' in x[0] else x[0][0]) + sorted_items = [] + for i, (_, group) in enumerate(rows): + row = list(group) + if i % 2 == 1: + row.reverse() + sorted_items.extend(row) + + # Update dictionaries efficiently + self.region_coordinates = {k: v for k, v in sorted_items} + self.region_fov_coordinates_dict = {k: self.region_fov_coordinates_dict[k] + for k, _ in sorted_items + if k in self.region_fov_coordinates_dict} + + def toggle_acquisition(self, pressed): + if not self.base_path_is_set: + self.btn_startAcquisition.setChecked(False) + QMessageBox.warning(self, "Warning", "Please choose base saving directory first") + return + + # if 'glass slide' in self.navigationViewer.sample and not self.well_selected: # will use current location + # self.btn_startAcquisition.setChecked(False) + # msg = QMessageBox() + # msg.setText("Please select a well to scan first") + # msg.exec_() + # return + + if not self.list_configurations.selectedItems(): + self.btn_startAcquisition.setChecked(False) + QMessageBox.warning(self, "Warning", "Please select at least one imaging channel") + return + + if pressed: + self.setEnabled_all(False) + + scan_size_mm = self.entry_scan_size.value() + overlap_percent = self.entry_overlap.value() + shape = self.combobox_shape.currentText() + + self.sort_coordinates() + + if len(self.region_coordinates) == 0: + # Use current location if no regions added + x = self.navigationController.x_pos_mm + y = self.navigationController.y_pos_mm + z = self.navigationController.z_pos_mm + self.region_coordinates['current'] = [x, y, z] + scan_coordinates = self.create_region_coordinates( + self.objectiveStore, + x, y, + scan_size_mm=scan_size_mm, + overlap_percent=overlap_percent, + shape=shape + ) + self.region_fov_coordinates_dict['current'] = scan_coordinates + + # Calculate total number of positions for signal emission # not needed ever + total_positions = sum(len(coords) for coords in self.region_fov_coordinates_dict.values()) + Nx = Ny = int(math.sqrt(total_positions)) + dx_mm = dy_mm = scan_size_mm / (Nx - 1) if Nx > 1 else scan_size_mm + + if self.checkbox_set_z_range.isChecked(): + # Set Z-range (convert from μm to mm) + minZ = self.entry_minZ.value() / 1000 # Convert from μm to mm + maxZ = self.entry_maxZ.value() / 1000 # Convert from μm to mm + self.multipointController.set_z_range(minZ, maxZ) + print("set z-range", (minZ, maxZ)) + else: + z = self.navigationController.z_pos_mm + self.multipointController.set_z_range(z, z) + + self.multipointController.set_deltaZ(self.entry_deltaZ.value()) + self.multipointController.set_NZ(self.entry_NZ.value()) + self.multipointController.set_deltat(self.entry_dt.value()) + self.multipointController.set_Nt(self.entry_Nt.value()) + self.multipointController.set_use_piezo(self.checkbox_usePiezo.isChecked()) + self.multipointController.set_af_flag(self.checkbox_withAutofocus.isChecked()) + self.multipointController.set_reflection_af_flag(self.checkbox_withReflectionAutofocus.isChecked()) + self.multipointController.set_selected_configurations([item.text() for item in self.list_configurations.selectedItems()]) + self.multipointController.start_new_experiment(self.lineEdit_experimentID.text()) + + # Emit signals + self.signal_acquisition_started.emit(True) + self.signal_acquisition_shape.emit(self.entry_NZ.value(), self.entry_deltaZ.value()) + + # Start acquisition + self.multipointController.run_acquisition(location_list=self.region_coordinates, coordinate_dict=self.region_fov_coordinates_dict) + + else: + self.multipointController.request_abort_aquisition() + self.setEnabled_all(True) + + def acquisition_is_finished(self): + self.signal_acquisition_started.emit(False) + self.btn_startAcquisition.setChecked(False) + self.set_well_coordinates(self.well_selected) + if self.combobox_shape.currentText() == 'Manual': + self.signal_draw_shape.emit(True) + self.setEnabled_all(True) + + def setEnabled_all(self, enabled): + for widget in self.findChildren(QWidget): + if (widget != self.btn_startAcquisition and + widget != self.progress_bar and + widget != self.progress_label and + widget != self.eta_label): + widget.setEnabled(enabled) + + if self.scanCoordinates.format == 'glass slide': + self.entry_well_coverage.setEnabled(False) + + def set_saving_dir(self): + dialog = QFileDialog() + save_dir_base = dialog.getExistingDirectory(None, "Select Folder") + self.multipointController.set_base_path(save_dir_base) + self.lineEdit_savingDir.setText(save_dir_base) + self.base_path_is_set = True + + def set_deltaZ(self, value): + mm_per_ustep = SCREW_PITCH_Z_MM/(self.multipointController.navigationController.z_microstepping*FULLSTEPS_PER_REV_Z) + deltaZ = round(value/1000/mm_per_ustep)*mm_per_ustep*1000 + self.entry_deltaZ.setValue(deltaZ) + self.multipointController.set_deltaZ(deltaZ) + + def emit_selected_channels(self): + selected_channels = [item.text() for item in self.list_configurations.selectedItems()] + self.signal_acquisition_channels.emit(selected_channels) + + def display_stitcher_widget(self, checked): + self.signal_stitcher_widget.emit(checked) + + +class StitcherWidget(QFrame): + + def __init__(self, configurationManager, contrastManager, *args, **kwargs): + super(StitcherWidget, self).__init__(*args, **kwargs) + self.configurationManager = configurationManager + self.contrastManager = contrastManager + self.stitcherThread = None + self.output_path = "" + self.initUI() + + def initUI(self): + self.setFrameStyle(QFrame.Panel | QFrame.Raised) # Set frame style + self.layout = QVBoxLayout(self) + self.rowLayout1 = QHBoxLayout() + self.rowLayout2 = QHBoxLayout() + + # Use registration checkbox + self.useRegistrationCheck = QCheckBox("Registration") + self.useRegistrationCheck.toggled.connect(self.onRegistrationCheck) + self.rowLayout1.addWidget(self.useRegistrationCheck) + self.rowLayout1.addStretch() + + # Apply flatfield correction checkbox + self.applyFlatfieldCheck = QCheckBox("Flatfield Correction") + self.rowLayout1.addWidget(self.applyFlatfieldCheck) + self.rowLayout1.addStretch() + + # Output format dropdown + self.outputFormatLabel = QLabel('Output Format', self) + self.outputFormatCombo = QComboBox(self) + self.outputFormatCombo.addItem("OME-ZARR") + self.outputFormatCombo.addItem("OME-TIFF") + self.rowLayout1.addWidget(self.outputFormatLabel) + self.rowLayout1.addWidget(self.outputFormatCombo) + + # Select registration channel + self.registrationChannelLabel = QLabel("Registration Configuration", self) + self.registrationChannelLabel.setVisible(False) + self.rowLayout2.addWidget(self.registrationChannelLabel) + self.registrationChannelCombo = QComboBox(self) + self.registrationChannelLabel.setVisible(False) + self.registrationChannelCombo.setVisible(False) + self.registrationChannelCombo.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) + self.rowLayout2.addWidget(self.registrationChannelCombo) + + # Select registration cz-level + self.registrationZLabel = QLabel(" Z-Level", self) + self.registrationZLabel.setVisible(False) + self.rowLayout2.addWidget(self.registrationZLabel) + self.registrationZCombo = QSpinBox(self) + self.registrationZCombo.setSingleStep(1) + self.registrationZCombo.setMinimum(0) + self.registrationZCombo.setMaximum(0) + self.registrationZCombo.setValue(0) + self.registrationZLabel.setVisible(False) + self.registrationZCombo.setVisible(False) + self.rowLayout2.addWidget(self.registrationZCombo) + + self.layout.addLayout(self.rowLayout1) + self.layout.addLayout(self.rowLayout2) + self.setLayout(self.layout) + + # Button to view output in Napari + self.viewOutputButton = QPushButton("View Output in Napari") + self.viewOutputButton.setEnabled(False) # Initially disabled + self.viewOutputButton.setVisible(False) + self.viewOutputButton.clicked.connect(self.viewOutputNapari) + self.layout.addWidget(self.viewOutputButton) + + # Progress bar + progress_row = QHBoxLayout() + + # Status label + self.statusLabel = QLabel("Status: Image Acquisition") + progress_row.addWidget(self.statusLabel) + self.statusLabel.setVisible(False) + + self.progressBar = QProgressBar() + progress_row.addWidget(self.progressBar) + self.progressBar.setVisible(False) # Initially hidden + self.layout.addLayout(progress_row) + + def setStitcherThread(self, thread): + self.stitcherThread = thread + + def onRegistrationCheck(self, checked): + self.registrationChannelLabel.setVisible(checked) + self.registrationChannelCombo.setVisible(checked) + self.registrationZLabel.setVisible(checked) + self.registrationZCombo.setVisible(checked) + + def updateRegistrationChannels(self, selected_channels): + self.registrationChannelCombo.clear() # Clear existing items + self.registrationChannelCombo.addItems(selected_channels) + + def updateRegistrationZLevels(self, Nz): + self.registrationZCombo.setMinimum(0) + self.registrationZCombo.setMaximum(Nz - 1) + + def gettingFlatfields(self): + self.statusLabel.setText('Status: Calculating Flatfields') + self.viewOutputButton.setVisible(False) + self.viewOutputButton.setStyleSheet("") + self.progressBar.setValue(0) + self.statusLabel.setVisible(True) + self.progressBar.setVisible(True) + + def startingStitching(self): + self.statusLabel.setText('Status: Stitching Scans') + self.viewOutputButton.setVisible(False) + self.progressBar.setValue(0) + self.statusLabel.setVisible(True) + self.progressBar.setVisible(True) + + def updateProgressBar(self, value, total): + self.progressBar.setMaximum(total) + self.progressBar.setValue(value) + self.progressBar.setVisible(True) + + def startingSaving(self, stitch_complete=False): + if stitch_complete: + self.statusLabel.setText('Status: Saving Stitched Acquisition') + else: + self.statusLabel.setText('Status: Saving Stitched Region') + self.statusLabel.setVisible(True) + self.progressBar.setRange(0, 0) # indeterminate mode. + self.progressBar.setVisible(True) + + def finishedSaving(self, output_path, dtype): + if self.stitcherThread is not None: + self.stitcherThread.quit() + self.stitcherThread.deleteLater() + self.statusLabel.setVisible(False) + self.progressBar.setVisible(False) + self.viewOutputButton.setVisible(True) + self.viewOutputButton.setStyleSheet("background-color: #C2C2FF") + self.viewOutputButton.setEnabled(True) + try: + self.viewOutputButton.clicked.disconnect() + except TypeError: + pass + self.viewOutputButton.clicked.connect(self.viewOutputNapari) + + self.output_path = output_path + + def extractWavelength(self, name): + # Split the string and find the wavelength number immediately after "Fluorescence" + parts = name.split() + if 'Fluorescence' in parts: + index = parts.index('Fluorescence') + 1 + if index < len(parts): + return parts[index].split()[0] # Assuming '488 nm Ex' and taking '488' + for color in ['R', 'G', 'B']: + if color in parts or "full_" + color in parts: + return color + return None + + def generateColormap(self, channel_info): + """Convert a HEX value to a normalized RGB tuple.""" + c0 = (0, 0, 0) + c1 = (((channel_info['hex'] >> 16) & 0xFF) / 255, # Normalize the Red component + ((channel_info['hex'] >> 8) & 0xFF) / 255, # Normalize the Green component + (channel_info['hex'] & 0xFF) / 255) # Normalize the Blue component + return Colormap(colors=[c0, c1], controls=[0, 1], name=channel_info['name']) + + def updateContrastLimits(self, channel, min_val, max_val): + self.contrastManager.update_limits(channel, min_val, max_val) + + def viewOutputNapari(self): + try: + napari_viewer = napari.Viewer() + if ".ome.zarr" in self.output_path: + napari_viewer.open(self.output_path, plugin='napari-ome-zarr') + else: + napari_viewer.open(self.output_path) + + for layer in napari_viewer.layers: + layer_name = layer.name.replace("_", " ").replace("full ", "full_") + channel_info = CHANNEL_COLORS_MAP.get(self.extractWavelength(layer_name), {'hex': 0xFFFFFF, 'name': 'gray'}) + + if channel_info['name'] in AVAILABLE_COLORMAPS: + layer.colormap = AVAILABLE_COLORMAPS[channel_info['name']] + else: + layer.colormap = self.generateColormap(channel_info) + + min_val, max_val = self.contrastManager.get_limits(layer_name) + layer.contrast_limits = (min_val, max_val) + + except Exception as e: + QMessageBox.critical(self, "Error Opening in Napari", str(e)) + print(f"An error occurred while opening output in Napari: {e}") + + def resetUI(self): + self.output_path = "" + + # Reset UI components to their default states + self.applyFlatfieldCheck.setChecked(False) + self.outputFormatCombo.setCurrentIndex(0) # Assuming the first index is the default + self.useRegistrationCheck.setChecked(False) + self.registrationChannelCombo.clear() # Clear existing items + self.registrationChannelLabel.setVisible(False) + self.registrationChannelCombo.setVisible(False) + + # Reset the visibility and state of buttons and labels + self.viewOutputButton.setEnabled(False) + self.viewOutputButton.setVisible(False) + self.progressBar.setValue(0) + self.progressBar.setVisible(False) + self.statusLabel.setText("Status: Image Acquisition") + self.statusLabel.setVisible(False) + + def closeEvent(self, event): + if self.stitcherThread is not None: + self.stitcherThread.quit() + self.stitcherThread.wait() + self.stitcherThread.deleteLater() + self.stitcherThread = None + super().closeEvent(event) + + +class NapariLiveWidget(QWidget): + signal_coordinates_clicked = Signal(int, int, int, int) + signal_newExposureTime = Signal(float) + signal_newAnalogGain = Signal(float) + signal_autoLevelSetting = Signal(bool) + + def __init__(self, streamHandler, liveController, navigationController, configurationManager, contrastManager, wellSelectionWidget=None, show_trigger_options=True, show_display_options=True, show_autolevel=False, autolevel=False, parent=None): + super().__init__(parent) + self.streamHandler = streamHandler + self.liveController = liveController + self.navigationController = navigationController + self.configurationManager = configurationManager + self.wellSelectionWidget = wellSelectionWidget + self.live_configuration = self.liveController.currentConfiguration + self.image_width = 0 + self.image_height = 0 + self.dtype = np.uint8 + self.channels = set() + self.init_live = False + self.init_live_rgb = False + self.init_scale = False + self.previous_scale = None + self.previous_center = None + self.last_was_autofocus = False + self.fps_trigger = 10 + self.fps_display = 10 + self.contrastManager = contrastManager + + self.initNapariViewer() + self.addNapariGrayclipColormap() + self.initControlWidgets(show_trigger_options, show_display_options, show_autolevel, autolevel) + self.update_microscope_mode_by_name(self.live_configuration.name) + + def initNapariViewer(self): + self.viewer = napari.Viewer(show=False) + self.viewerWidget = self.viewer.window._qt_window + self.viewer.dims.axis_labels = ['Y-axis', 'X-axis'] + self.layout = QVBoxLayout() + self.layout.addWidget(self.viewerWidget) + self.setLayout(self.layout) + self.customizeViewer() + + def customizeViewer(self): + # Hide the status bar (which includes the activity button) + if hasattr(self.viewer.window, '_status_bar'): + self.viewer.window._status_bar.hide() + + # Hide the layer buttons + if hasattr(self.viewer.window._qt_viewer, 'layerButtons'): + self.viewer.window._qt_viewer.layerButtons.hide() + + def updateHistogram(self, layer): + if self.histogram_widget is not None and layer.data is not None: + self.pg_image_item.setImage(layer.data, autoLevels=False) + self.histogram_widget.setLevels(*layer.contrast_limits) + self.histogram_widget.setHistogramRange(layer.data.min(), layer.data.max()) + + # Set the histogram widget's region to match the layer's contrast limits + self.histogram_widget.region.setRegion(layer.contrast_limits) + + # Update colormap only if it has changed + if hasattr(self, 'last_colormap') and self.last_colormap != layer.colormap.name: + self.histogram_widget.gradient.setColorMap(self.createColorMap(layer.colormap)) + self.last_colormap = layer.colormap.name + + def createColorMap(self, colormap): + colors = colormap.colors + positions = np.linspace(0, 1, len(colors)) + return pg.ColorMap(positions, colors) + + def initControlWidgets(self, show_trigger_options, show_display_options, show_autolevel, autolevel): + # Initialize histogram widget + self.pg_image_item = pg.ImageItem() + self.histogram_widget = pg.HistogramLUTWidget(image=self.pg_image_item) + self.histogram_widget.setFixedWidth(100) + self.histogram_dock = self.viewer.window.add_dock_widget( + self.histogram_widget, area='right', name="hist" + ) + self.histogram_dock.setFeatures(QDockWidget.NoDockWidgetFeatures) + self.histogram_dock.setTitleBarWidget(QWidget()) + self.histogram_widget.region.sigRegionChanged.connect(self.on_histogram_region_changed) + self.histogram_widget.region.sigRegionChangeFinished.connect(self.on_histogram_region_changed) + + # Microscope Configuration + self.dropdown_modeSelection = QComboBox() + for config in self.configurationManager.configurations: + self.dropdown_modeSelection.addItem(config.name) + self.dropdown_modeSelection.setCurrentText(self.live_configuration.name) + self.dropdown_modeSelection.currentTextChanged.connect(self.update_microscope_mode_by_name) + + # Live button + self.btn_live = QPushButton("Start Live") + self.btn_live.setCheckable(True) + gradient_style = """ + QPushButton { + background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, + stop:0 #D6D6FF, stop:1 #C2C2FF); + border-radius: 5px; + color: black; + border: 1px solid #A0A0A0; + } + QPushButton:checked { + background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, + stop:0 #FFD6D6, stop:1 #FFC2C2); + border: 1px solid #A0A0A0; + } + QPushButton:hover { + background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, + stop:0 #E0E0FF, stop:1 #D0D0FF); + } + QPushButton:pressed { + background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, + stop:0 #9090C0, stop:1 #8080B0); + } + """ + self.btn_live.setStyleSheet(gradient_style) + #self.btn_live.setStyleSheet("font-weight: bold; background-color: #7676F7") #6666D3 + current_height = self.btn_live.sizeHint().height() + self.btn_live.setFixedHeight(int(current_height * 1.5)) + self.btn_live.clicked.connect(self.toggle_live) + + # Exposure Time + self.entry_exposureTime = QDoubleSpinBox() + self.entry_exposureTime.setRange(self.liveController.camera.EXPOSURE_TIME_MS_MIN, self.liveController.camera.EXPOSURE_TIME_MS_MAX) + self.entry_exposureTime.setValue(self.live_configuration.exposure_time) + self.entry_exposureTime.setSuffix(" ms") + self.entry_exposureTime.valueChanged.connect(self.update_config_exposure_time) + + # Analog Gain + self.entry_analogGain = QDoubleSpinBox() + self.entry_analogGain.setRange(0, 24) + self.entry_analogGain.setSingleStep(0.1) + self.entry_analogGain.setValue(self.live_configuration.analog_gain) + # self.entry_analogGain.setSuffix('x') + self.entry_analogGain.valueChanged.connect(self.update_config_analog_gain) + + # Illumination Intensity + self.slider_illuminationIntensity = QSlider(Qt.Horizontal) + self.slider_illuminationIntensity.setRange(0, 100) + self.slider_illuminationIntensity.setValue(int(self.live_configuration.illumination_intensity)) + self.slider_illuminationIntensity.setTickPosition(QSlider.TicksBelow) + self.slider_illuminationIntensity.setTickInterval(10) + self.slider_illuminationIntensity.valueChanged.connect(self.update_config_illumination_intensity) + self.label_illuminationIntensity = QLabel(str(self.slider_illuminationIntensity.value()) + "%") + self.slider_illuminationIntensity.valueChanged.connect(lambda v: self.label_illuminationIntensity.setText(str(v) + "%")) + + # Trigger mode + self.dropdown_triggerMode = QComboBox() + trigger_modes = [ + ('Software', TriggerMode.SOFTWARE), + ('Hardware', TriggerMode.HARDWARE), + ('Continuous', TriggerMode.CONTINUOUS) + ] + for display_name, mode in trigger_modes: + self.dropdown_triggerMode.addItem(display_name, mode) + self.dropdown_triggerMode.currentIndexChanged.connect(self.on_trigger_mode_changed) + # self.dropdown_triggerMode = QComboBox() + # self.dropdown_triggerMode.addItems([TriggerMode.SOFTWARE, TriggerMode.HARDWARE, TriggerMode.CONTINUOUS]) + # self.dropdown_triggerMode.currentTextChanged.connect(self.liveController.set_trigger_mode) + + # Trigger FPS + self.entry_triggerFPS = QDoubleSpinBox() + self.entry_triggerFPS.setRange(0.02, 1000) + self.entry_triggerFPS.setValue(self.fps_trigger) + #self.entry_triggerFPS.setSuffix(" fps") + self.entry_triggerFPS.valueChanged.connect(self.liveController.set_trigger_fps) + + # Display FPS + self.entry_displayFPS = QDoubleSpinBox() + self.entry_displayFPS.setRange(1, 240) + self.entry_displayFPS.setValue(self.fps_display) + #self.entry_displayFPS.setSuffix(" fps") + self.entry_displayFPS.valueChanged.connect(self.streamHandler.set_display_fps) + + # Resolution Scaling + self.slider_resolutionScaling = QSlider(Qt.Horizontal) + self.slider_resolutionScaling.setRange(10, 100) + self.slider_resolutionScaling.setValue(int(DEFAULT_DISPLAY_CROP)) + self.slider_resolutionScaling.setTickPosition(QSlider.TicksBelow) + self.slider_resolutionScaling.setTickInterval(10) + self.slider_resolutionScaling.valueChanged.connect(self.update_resolution_scaling) + self.label_resolutionScaling = QLabel(str(self.slider_resolutionScaling.value()) + "%") + self.slider_resolutionScaling.valueChanged.connect(lambda v: self.label_resolutionScaling.setText(str(v) + "%")) + + # Autolevel + self.btn_autolevel = QPushButton('Autolevel') + self.btn_autolevel.setCheckable(True) + self.btn_autolevel.setChecked(autolevel) + self.btn_autolevel.clicked.connect(self.signal_autoLevelSetting.emit) + + def make_row(label_widget, entry_widget, value_label=None): + row = QHBoxLayout() + row.addWidget(label_widget) + row.addWidget(entry_widget) + if value_label: + row.addWidget(value_label) + return row + + control_layout = QVBoxLayout() + + # Add widgets to layout + control_layout.addWidget(self.dropdown_modeSelection) + control_layout.addWidget(self.btn_live) + control_layout.addSpacerItem(QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)) + + row1 = make_row(QLabel('Exposure Time'), self.entry_exposureTime) + control_layout.addLayout(row1) + + row2 = make_row(QLabel('Illumination'), self.slider_illuminationIntensity, self.label_illuminationIntensity) + control_layout.addLayout(row2) + + row3 = make_row((QLabel('Analog Gain')), self.entry_analogGain) + control_layout.addLayout(row3) + control_layout.addSpacerItem(QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)) + + if show_trigger_options: + row0 = make_row(QLabel('Trigger Mode'), self.dropdown_triggerMode) + control_layout.addLayout(row0) + row00 = make_row(QLabel('Trigger FPS'), self.entry_triggerFPS) + control_layout.addLayout(row00) + control_layout.addSpacerItem(QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)) + + if show_display_options: + row4 = make_row((QLabel('Display FPS')), self.entry_displayFPS) + control_layout.addLayout(row4) + row5 = make_row(QLabel('Display Resolution'), self.slider_resolutionScaling, self.label_resolutionScaling) + control_layout.addLayout(row5) + control_layout.addSpacerItem(QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)) + + if show_autolevel: + control_layout.addWidget(self.btn_autolevel) + control_layout.addSpacerItem(QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)) + + control_layout.addStretch(1) + + add_live_controls = False + if USE_NAPARI_FOR_LIVE_CONTROL or add_live_controls: + live_controls_widget = QWidget() + live_controls_widget.setLayout(control_layout) + # layer_list_widget.setFixedWidth(270) + + layer_controls_widget = self.viewer.window._qt_viewer.dockLayerControls.widget() + layer_list_widget = self.viewer.window._qt_viewer.dockLayerList.widget() + + self.viewer.window._qt_viewer.layerButtons.hide() + self.viewer.window.remove_dock_widget(self.viewer.window._qt_viewer.dockLayerControls) + self.viewer.window.remove_dock_widget(self.viewer.window._qt_viewer.dockLayerList) + + # Add the actual dock widgets + self.dock_layer_controls = self.viewer.window.add_dock_widget(layer_controls_widget, area='left', name='layer controls', tabify=True) + self.dock_layer_list = self.viewer.window.add_dock_widget(layer_list_widget, area='left', name='layer list', tabify=True) + self.dock_live_controls = self.viewer.window.add_dock_widget(live_controls_widget, area='left', name='live controls', tabify=True) + + self.viewer.window.window_menu.addAction(self.dock_live_controls.toggleViewAction()) + + if USE_NAPARI_WELL_SELECTION: + well_selector_layout = QVBoxLayout() + #title_label = QLabel("Well Selector") + #title_label.setAlignment(Qt.AlignCenter) # Center the title + #title_label.setStyleSheet("font-weight: bold;") # Optional: style the title + #well_selector_layout.addWidget(title_label) + + well_selector_row = QHBoxLayout() + well_selector_row.addStretch(1) + well_selector_row.addWidget(self.wellSelectionWidget) + well_selector_row.addStretch(1) + well_selector_layout.addLayout(well_selector_row) + well_selector_layout.addStretch() + + well_selector_dock_widget = QWidget() + well_selector_dock_widget.setLayout(well_selector_layout) + self.dock_well_selector = self.viewer.window.add_dock_widget(well_selector_dock_widget, area='bottom', name='well selector') + self.dock_well_selector.setFixedHeight(self.dock_well_selector.minimumSizeHint().height()) + + layer_controls_widget = self.viewer.window._qt_viewer.dockLayerControls.widget() + layer_list_widget = self.viewer.window._qt_viewer.dockLayerList.widget() + + self.viewer.window._qt_viewer.layerButtons.hide() + self.viewer.window.remove_dock_widget(self.viewer.window._qt_viewer.dockLayerControls) + self.viewer.window.remove_dock_widget(self.viewer.window._qt_viewer.dockLayerList) + self.print_window_menu_items() + + def print_window_menu_items(self): + print("Items in window_menu:") + for action in self.viewer.window.window_menu.actions(): + print(action.text()) + + def on_histogram_region_changed(self): + if self.live_configuration.name: + min_val, max_val = self.histogram_widget.region.getRegion() + self.updateContrastLimits(self.live_configuration.name, min_val, max_val) + + def toggle_live(self, pressed): + if pressed: + self.liveController.start_live() + self.btn_live.setText("Stop Live") + else: + self.liveController.stop_live() + self.btn_live.setText("Start Live") + + def toggle_live_controls(self, show): + if show: + self.dock_live_controls.show() + else: + self.dock_live_controls.hide() + + def toggle_well_selector(self, show): + if show: + self.dock_well_selector.show() + else: + self.dock_well_selector.hide() + + def replace_well_selector(self, wellSelector): + self.viewer.window.remove_dock_widget(self.dock_well_selector) + self.wellSelectionWidget = wellSelector + well_selector_layout = QHBoxLayout() + well_selector_layout.addStretch(1) # Add stretch on the left + well_selector_layout.addWidget(self.wellSelectionWidget) + well_selector_layout.addStretch(1) # Add stretch on the right + well_selector_dock_widget = QWidget() + well_selector_dock_widget.setLayout(well_selector_layout) + self.dock_well_selector = self.viewer.window.add_dock_widget(well_selector_dock_widget, area='bottom', name='well selector', tabify=True) + + def set_microscope_mode(self,config): + self.dropdown_modeSelection.setCurrentText(config.name) + + def update_microscope_mode_by_name(self, current_microscope_mode_name): + self.live_configuration = next((config for config in self.configurationManager.configurations if config.name == current_microscope_mode_name), None) + if self.live_configuration: + self.liveController.set_microscope_mode(self.live_configuration) + self.entry_exposureTime.setValue(self.live_configuration.exposure_time) + self.entry_analogGain.setValue(self.live_configuration.analog_gain) + self.slider_illuminationIntensity.setValue(int(self.live_configuration.illumination_intensity)) + + def update_config_exposure_time(self, new_value): + self.live_configuration.exposure_time = new_value + self.configurationManager.update_configuration(self.live_configuration.id, 'ExposureTime', new_value) + self.signal_newExposureTime.emit(new_value) + + def update_config_analog_gain(self, new_value): + self.live_configuration.analog_gain = new_value + self.configurationManager.update_configuration(self.live_configuration.id, 'AnalogGain', new_value) + self.signal_newAnalogGain.emit(new_value) + + def update_config_illumination_intensity(self, new_value): + self.live_configuration.illumination_intensity = new_value + self.configurationManager.update_configuration(self.live_configuration.id, 'IlluminationIntensity', new_value) + self.liveController.set_illumination(self.live_configuration.illumination_source, new_value) + + def update_resolution_scaling(self, value): + self.streamHandler.set_display_resolution_scaling(value) + self.liveController.set_display_resolution_scaling(value) + + def on_trigger_mode_changed(self, index): + # Get the actual value using user data + actual_value = self.dropdown_triggerMode.itemData(index) + print(f"Selected: {self.dropdown_triggerMode.currentText()} (actual value: {actual_value})") + + def addNapariGrayclipColormap(self): + if hasattr(napari.utils.colormaps.AVAILABLE_COLORMAPS, 'grayclip'): + return + grayclip = [] + for i in range(255): + grayclip.append([i / 255, i / 255, i / 255]) + grayclip.append([1, 0, 0]) + napari.utils.colormaps.AVAILABLE_COLORMAPS['grayclip'] = napari.utils.Colormap(name='grayclip', colors=grayclip) + + def initLiveLayer(self, channel, image_height, image_width, image_dtype, rgb=False): + """Initializes the full canvas for each channel based on the acquisition parameters.""" + self.viewer.layers.clear() + self.image_width = image_width + self.image_height = image_height + if self.dtype != np.dtype(image_dtype): + + self.contrastManager.scale_contrast_limits(np.dtype(image_dtype)) # Fix This to scale existing contrast limits to new dtype range + self.dtype = image_dtype + + self.channels.add(channel) + self.live_configuration.name = channel + + if rgb: + canvas = np.zeros((image_height, image_width, 3), dtype=self.dtype) + else: + canvas = np.zeros((image_height, image_width), dtype=self.dtype) + limits = self.getContrastLimits(self.dtype) + layer = self.viewer.add_image(canvas, name="Live View", visible=True, rgb=rgb, colormap='grayclip', + contrast_limits=limits, blending='additive') + layer.contrast_limits = self.contrastManager.get_limits(self.live_configuration.name, self.dtype) + layer.mouse_double_click_callbacks.append(self.onDoubleClick) + layer.events.contrast_limits.connect(self.signalContrastLimits) + self.updateHistogram(layer) + + if not self.init_scale: + self.resetView() + self.previous_scale = self.viewer.camera.zoom + self.previous_center = self.viewer.camera.center + else: + self.viewer.camera.zoom = self.previous_scale + self.viewer.camera.center = self.previous_center + + def updateLiveLayer(self, image, from_autofocus=False): + """Updates the canvas with the new image data.""" + if self.dtype != np.dtype(image.dtype): + self.contrastManager.scale_contrast_limits(np.dtype(image.dtype)) + self.dtype = np.dtype(image.dtype) + self.init_live = False + self.init_live_rgb = False + + if not self.live_configuration.name: + self.live_configuration.name = self.liveController.currentConfiguration.name + rgb = len(image.shape) >= 3 + + if not rgb and not self.init_live or 'Live View' not in self.viewer.layers: + self.initLiveLayer(self.live_configuration.name, image.shape[0], image.shape[1], image.dtype, rgb) + self.init_live = True + self.init_live_rgb = False + print("init live") + elif rgb and not self.init_live_rgb: + self.initLiveLayer(self.live_configuration.name, image.shape[0], image.shape[1], image.dtype, rgb) + self.init_live_rgb = True + self.init_live = False + print("init live rgb") + + layer = self.viewer.layers["Live View"] + layer.data = image + layer.contrast_limits = self.contrastManager.get_limits(self.live_configuration.name) + self.updateHistogram(layer) + + if from_autofocus: + # save viewer scale + if not self.last_was_autofocus: + self.previous_scale = self.viewer.camera.zoom + self.previous_center = self.viewer.camera.center + # resize to cropped view + self.resetView() + self.last_was_autofocus = True + else: + if not self.init_scale: + # init viewer scale + self.resetView() + self.previous_scale = self.viewer.camera.zoom + self.previous_center = self.viewer.camera.center + self.init_scale = True + elif self.last_was_autofocus: + # return to to original view + self.viewer.camera.zoom = self.previous_scale + self.viewer.camera.center = self.previous_center + # save viewer scale + self.previous_scale = self.viewer.camera.zoom + self.previous_center = self.viewer.camera.center + self.last_was_autofocus = False + layer.refresh() + + def onDoubleClick(self, layer, event): + """Handle double-click events and emit centered coordinates if within the data range.""" + if self.navigationController.get_flag_click_to_move(): + coords = layer.world_to_data(event.position) + layer_shape = layer.data.shape[0:2] if len(layer.data.shape) >= 3 else layer.data.shape + + if coords is not None and (0 <= int(coords[-1]) < layer_shape[-1] and (0 <= int(coords[-2]) < layer_shape[-2])): + x_centered = int(coords[-1] - layer_shape[-1] / 2) + y_centered = int(coords[-2] - layer_shape[-2] / 2) + # Emit the centered coordinates and dimensions of the layer's data array + self.signal_coordinates_clicked.emit(x_centered, y_centered, layer_shape[-1], layer_shape[-2]) + else: + self.resetView() + + def set_live_configuration(self, live_configuration): + self.live_configuration = live_configuration + + def updateContrastLimits(self, channel, min_val, max_val): + self.contrastManager.update_limits(channel, min_val, max_val) + if "Live View" in self.viewer.layers: + self.viewer.layers["Live View"].contrast_limits = (min_val, max_val) + + def signalContrastLimits(self, event): + layer = event.source + min_val, max_val = map(float, layer.contrast_limits) + self.contrastManager.update_limits(self.live_configuration.name, min_val, max_val) + + def getContrastLimits(self, dtype): + return self.contrastManager.get_default_limits() + + def resetView(self): + self.viewer.reset_view() + + def activate(self): + print("ACTIVATING NAPARI LIVE WIDGET") + self.viewer.window.activate() + + +class NapariMultiChannelWidget(QWidget): + + def __init__(self, objectiveStore, contrastManager, grid_enabled=False, parent=None): + super().__init__(parent) + # Initialize placeholders for the acquisition parameters + self.objectiveStore = objectiveStore + self.contrastManager = contrastManager + self.image_width = 0 + self.image_height = 0 + self.dtype = np.uint8 + self.channels = set() + self.pixel_size_um = 1 + self.dz_um = 1 + self.Nz = 1 + self.layers_initialized = False + self.acquisition_initialized = False + self.viewer_scale_initialized = False + self.update_layer_count = 0 + self.grid_enabled = grid_enabled + + # Initialize a napari Viewer without showing its standalone window. + self.initNapariViewer() + + def initNapariViewer(self): + self.viewer = napari.Viewer(show=False) + if self.grid_enabled: + self.viewer.grid.enabled = True + self.viewer.dims.axis_labels = ['Z-axis', 'Y-axis', 'X-axis'] + self.viewerWidget = self.viewer.window._qt_window + self.layout = QVBoxLayout() + self.layout.addWidget(self.viewerWidget) + self.setLayout(self.layout) + self.customizeViewer() + + def customizeViewer(self): + # Hide the status bar (which includes the activity button) + if hasattr(self.viewer.window, '_status_bar'): + self.viewer.window._status_bar.hide() + + # Hide the layer buttons + if hasattr(self.viewer.window._qt_viewer, 'layerButtons'): + self.viewer.window._qt_viewer.layerButtons.hide() + + def initLayersShape(self, Nz, dz): + pixel_size_um = self.objectiveStore.get_pixel_size() + if self.Nz != Nz or self.dz_um != dz or self.pixel_size_um != pixel_size_um: + self.acquisition_initialized = False + self.Nz = Nz + self.dz_um = dz if Nz > 1 and dz != 0 else 1.0 + self.pixel_size_um = pixel_size_um + + def initChannels(self, channels): + self.channels = set(channels) + + def extractWavelength(self, name): + # Split the string and find the wavelength number immediately after "Fluorescence" + parts = name.split() + if 'Fluorescence' in parts: + index = parts.index('Fluorescence') + 1 + if index < len(parts): + return parts[index].split()[0] # Assuming '488 nm Ex' and taking '488' + for color in ['R', 'G', 'B']: + if color in parts or f"full_{color}" in parts: + return color + return None + + def generateColormap(self, channel_info): + """Convert a HEX value to a normalized RGB tuple.""" + positions = [0, 1] + c0 = (0, 0, 0) + c1 = (((channel_info['hex'] >> 16) & 0xFF) / 255, # Normalize the Red component + ((channel_info['hex'] >> 8) & 0xFF) / 255, # Normalize the Green component + (channel_info['hex'] & 0xFF) / 255) # Normalize the Blue component + return Colormap(colors=[c0, c1], controls=[0, 1], name=channel_info['name']) + + def initLayers(self, image_height, image_width, image_dtype): + """Initializes the full canvas for each channel based on the acquisition parameters.""" + if self.acquisition_initialized: + for layer in list(self.viewer.layers): + if layer.name not in self.channels: + self.viewer.layers.remove(layer) + else: + self.viewer.layers.clear() + self.acquisition_initialized = True + if self.dtype != np.dtype(image_dtype) and not USE_NAPARI_FOR_LIVE_VIEW: + self.contrastManager.scale_contrast_limits(image_dtype) + + self.image_width = image_width + self.image_height = image_height + self.dtype = np.dtype(image_dtype) + self.layers_initialized = True + self.update_layer_count = 0 + + def updateLayers(self, image, i, j, k, channel_name): + """Updates the appropriate slice of the canvas with the new image data.""" + rgb = len(image.shape) == 3 + + # Check if the layer exists and has a different dtype + if self.dtype != np.dtype(image.dtype): # or self.viewer.layers[channel_name].data.dtype != image.dtype: + # Remove the existing layer + self.layers_initialized = False + self.acquisition_initialized = False + + if not self.layers_initialized: + self.initLayers(image.shape[0], image.shape[1], image.dtype) + + if channel_name not in self.viewer.layers: + self.channels.add(channel_name) + if rgb: + color = None # RGB images do not need a colormap + canvas = np.zeros((self.Nz, self.image_height, self.image_width, 3), dtype=self.dtype) + else: + channel_info = CHANNEL_COLORS_MAP.get(self.extractWavelength(channel_name), {'hex': 0xFFFFFF, 'name': 'gray'}) + if channel_info['name'] in AVAILABLE_COLORMAPS: + color = AVAILABLE_COLORMAPS[channel_info['name']] + else: + color = self.generateColormap(channel_info) + canvas = np.zeros((self.Nz, self.image_height, self.image_width), dtype=self.dtype) + + limits = self.getContrastLimits(self.dtype) + layer = self.viewer.add_image(canvas, name=channel_name, visible=True, rgb=rgb, + colormap=color, contrast_limits=limits, blending='additive', + scale=(self.dz_um, self.pixel_size_um, self.pixel_size_um)) + + # print(f"multi channel - dz_um:{self.dz_um}, pixel_y_um:{self.pixel_size_um}, pixel_x_um:{self.pixel_size_um}") + layer.contrast_limits = self.contrastManager.get_limits(channel_name) + layer.events.contrast_limits.connect(self.signalContrastLimits) + + if not self.viewer_scale_initialized: + self.resetView() + self.viewer_scale_initialized = True + else: + layer.refresh() + + layer = self.viewer.layers[channel_name] + layer.data[k] = image + layer.contrast_limits = self.contrastManager.get_limits(channel_name) + self.update_layer_count += 1 + if self.update_layer_count % len(self.channels) == 0: + if self.Nz > 1: + self.viewer.dims.set_point(0, k * self.dz_um) + for layer in self.viewer.layers: + layer.refresh() + + def updateRTPLayers(self, image, channel_name): + """Updates the appropriate slice of the canvas with the new image data.""" + # Check if the layer exists and has a different dtype + if self.dtype != image.dtype: # or self.viewer.layers[channel_name].data.dtype != image.dtype: + # Remove the existing layer + self.layers_initialized = False + self.acquisition_initialized = False + + if not self.layers_initialized: + self.initLayers(image.shape[0], image.shape[1], image.dtype) + + rgb = len(image.shape) == 3 + if channel_name not in self.viewer.layers: + self.channels.add(channel_name) + if rgb: + color = None # RGB images do not need a colormap + canvas = np.zeros((self.image_height, self.image_width, 3), dtype=self.dtype) + else: + channel_info = CHANNEL_COLORS_MAP.get(self.extractWavelength(channel_name), {'hex': 0xFFFFFF, 'name': 'gray'}) + if channel_info['name'] in AVAILABLE_COLORMAPS: + color = AVAILABLE_COLORMAPS[channel_info['name']] + else: + color = self.generateColormap(channel_info) + canvas = np.zeros((self.image_height, self.image_width), dtype=self.dtype) + + layer = self.viewer.add_image(canvas, name=channel_name, visible=True, rgb=rgb, colormap=color, + blending='additive', contrast_limits=self.getContrastLimits(self.dtype)) + layer.events.contrast_limits.connect(self.signalContrastLimits) + self.resetView() + + layer = self.viewer.layers[channel_name] + layer.data = image + layer.contrast_limits = self.contrastManager.get_limits(channel_name) + layer.refresh() + + def signalContrastLimits(self, event): + layer = event.source + min_val, max_val = map(float, layer.contrast_limits) + self.contrastManager.update_limits(layer.name, min_val, max_val) + + def getContrastLimits(self, dtype): + return self.contrastManager.get_default_limits() + + def resetView(self): + self.viewer.reset_view() + for layer in self.viewer.layers: + layer.refresh() + + def activate(self): + print("ACTIVATING NAPARI MULTICHANNEL WIDGET") + self.viewer.window.activate() + + +class NapariTiledDisplayWidget(QWidget): + + signal_coordinates_clicked = Signal(int, int, int, int, int, int, float, float) + + def __init__(self, objectiveStore, contrastManager, parent=None): + super().__init__(parent) + # Initialize placeholders for the acquisition parameters + self.objectiveStore = objectiveStore + self.contrastManager = contrastManager + self.downsample_factor = PRVIEW_DOWNSAMPLE_FACTOR + self.image_width = 0 + self.image_height = 0 + self.dtype = np.uint8 + self.channels = set() + self.Nx = 1 + self.Ny = 1 + self.Nz = 1 + self.dz_um = 1 + self.pixel_size_um = 1 + self.layers_initialized = False + self.acquisition_initialized = False + self.viewer_scale_initialized = False + self.initNapariViewer() + + def initNapariViewer(self): + self.viewer = napari.Viewer(show=False) #, ndisplay=3) + self.viewerWidget = self.viewer.window._qt_window + self.viewer.dims.axis_labels = ['Z-axis', 'Y-axis', 'X-axis'] + self.layout = QVBoxLayout() + self.layout.addWidget(self.viewerWidget) + self.setLayout(self.layout) + self.customizeViewer() + + def customizeViewer(self): + # Hide the status bar (which includes the activity button) + if hasattr(self.viewer.window, '_status_bar'): + self.viewer.window._status_bar.hide() + + # Hide the layer buttons + if hasattr(self.viewer.window._qt_viewer, 'layerButtons'): + self.viewer.window._qt_viewer.layerButtons.hide() + + def initLayersShape(self, Nx, Ny, Nz, dx, dy, dz): + self.acquisition_initialized = False + self.Nx = Nx + self.Ny = Ny + self.Nz = Nz + self.dx_mm = dx + self.dy_mm = dy + self.dz_um = dz if Nz > 1 and dz != 0 else 1.0 + pixel_size_um = self.objectiveStore.get_pixel_size() + self.pixel_size_um = pixel_size_um * self.downsample_factor + + def initChannels(self, channels): + self.channels = set(channels) + + def extractWavelength(self, name): + # Split the string and find the wavelength number immediately after "Fluorescence" + parts = name.split() + if 'Fluorescence' in parts: + index = parts.index('Fluorescence') + 1 + if index < len(parts): + return parts[index].split()[0] # Assuming '488 nm Ex' and taking '488' + for color in ['R', 'G', 'B']: + if color in parts or f"full_{color}" in parts: + return color + return None + + def generateColormap(self, channel_info): + """Convert a HEX value to a normalized RGB tuple.""" + c0 = (0, 0, 0) + c1 = (((channel_info['hex'] >> 16) & 0xFF) / 255, # Normalize the Red component + ((channel_info['hex'] >> 8) & 0xFF) / 255, # Normalize the Green component + (channel_info['hex'] & 0xFF) / 255) # Normalize the Blue component + return Colormap(colors=[c0, c1], controls=[0, 1], name=channel_info['name']) + + def initLayers(self, image_height, image_width, image_dtype): + """Initializes the full canvas for each channel based on the acquisition parameters.""" + if self.acquisition_initialized: + for layer in list(self.viewer.layers): + if layer.name not in self.channels: + self.viewer.layers.remove(layer) + else: + self.viewer.layers.clear() + self.acquisition_initialized = True + + self.image_width = image_width // self.downsample_factor + self.image_height = image_height // self.downsample_factor + self.dtype = np.dtype(image_dtype) + self.layers_initialized = True + self.resetView() + self.viewer_scale_initialized = False + + def updateLayers(self, image, i, j, k, channel_name): + """Updates the appropriate slice of the canvas with the new image data.""" + if i == -1 or j == -1: + print("no tiled preview for coordinate acquisition") + return + + # Check if the layer exists and has a different dtype + if self.dtype != image.dtype: + # Remove the existing layer + self.layers_initialized = False + self.acquisition_initialized = False + + if not self.layers_initialized: + self.initLayers(image.shape[0], image.shape[1], image.dtype) + + rgb = len(image.shape) == 3 # Check if image is RGB based on shape + if channel_name not in self.viewer.layers: + self.channels.add(channel_name) + if rgb: + color = None # No colormap for RGB images + canvas = np.zeros((self.Nz, self.Ny * self.image_height, self.Nx * self.image_width, 3), dtype=self.dtype) + else: + channel_info = CHANNEL_COLORS_MAP.get(self.extractWavelength(channel_name), {'hex': 0xFFFFFF, 'name': 'gray'}) + if channel_info['name'] in AVAILABLE_COLORMAPS: + color = AVAILABLE_COLORMAPS[channel_info['name']] + else: + color = self.generateColormap(channel_info) + canvas = np.zeros((self.Nz, self.Ny * self.image_height, self.Nx * self.image_width), dtype=self.dtype) + + limits = self.getContrastLimits(self.dtype) + layer = self.viewer.add_image(canvas, name=channel_name, visible=True, rgb=rgb, + colormap=color, contrast_limits=limits, blending='additive', + scale=(self.dz_um, self.pixel_size_um, self.pixel_size_um)) + # print(f"tiled display - dz_um:{self.dz_um}, pixel_y_um:{self.pixel_size_um}, pixel_x_um:{self.pixel_size_um}") + layer.contrast_limits = self.contrastManager.get_limits(channel_name) + layer.events.contrast_limits.connect(self.signalContrastLimits) + layer.mouse_double_click_callbacks.append(self.onDoubleClick) + + image = cv2.resize(image, (self.image_width, self.image_height), interpolation=cv2.INTER_AREA) + + if not self.viewer_scale_initialized: + self.resetView() + self.viewer_scale_initialized = True + self.viewer.dims.set_point(0, k * self.dz_um) + layer = self.viewer.layers[channel_name] + layer.contrast_limits = self.contrastManager.get_limits(channel_name) + layer_data = layer.data + y_slice = slice(i * self.image_height, (i + 1) * self.image_height) + x_slice = slice(j * self.image_width, (j + 1) * self.image_width) + if rgb: + layer_data[k, y_slice, x_slice, :] = image + else: + layer_data[k, y_slice, x_slice] = image + layer.data = layer_data + layer.refresh() + + def signalContrastLimits(self, event): + layer = event.source + min_val, max_val = map(float, layer.contrast_limits) + self.contrastManager.update_limits(layer.name, min_val, max_val) + + def getContrastLimits(self, dtype): + return self.contrastManager.get_default_limits() + + def onDoubleClick(self, layer, event): + """Handle double-click events and emit centered coordinates if within the data range.""" + coords = layer.world_to_data(event.position) + layer_shape = layer.data.shape[0:3] if len(layer.data.shape) >= 4 else layer.data.shape + + if coords is not None and (0 <= int(coords[-1]) < layer_shape[-1] and (0 <= int(coords[-2]) < layer_shape[-2])): + x_centered = int(coords[-1] - layer_shape[-1] / 2) + y_centered = int(coords[-2] - layer_shape[-2] / 2) + # Emit the centered coordinates and dimensions of the layer's data array + self.signal_coordinates_clicked.emit(x_centered, y_centered, + layer_shape[-1], layer_shape[-2], + self.Nx, self.Ny, + self.dx_mm, self.dy_mm) + + def resetView(self): + self.viewer.reset_view() + for layer in self.viewer.layers: + layer.refresh() + + def activate(self): + print("ACTIVATING NAPARI TILED DISPLAY WIDGET") + self.viewer.window.activate() + + +class NapariMosaicDisplayWidget(QWidget): + + signal_coordinates_clicked = Signal(float, float) # x, y in mm + signal_update_viewer = Signal() + signal_layers_initialized = Signal(bool) + signal_shape_drawn = Signal(list) + + def __init__(self, objectiveStore, contrastManager, parent=None): + super().__init__(parent) + self.objectiveStore = objectiveStore + self.contrastManager = contrastManager + self.downsample_factor = PRVIEW_DOWNSAMPLE_FACTOR + self.viewer = napari.Viewer(show=False) + self.layout = QVBoxLayout() + self.layout.addWidget(self.viewer.window._qt_window) + self.layers_initialized = False + self.shape_layer = None + self.shapes_mm = [] + self.is_drawing_shape = False + + # add clear button + self.clear_button = QPushButton("Clear Mosaic View") + self.clear_button.clicked.connect(self.clearAllLayers) + self.layout.addWidget(self.clear_button) + + self.setLayout(self.layout) + self.customizeViewer() + self.viewer_pixel_size_mm = 1 + self.dz_um = None + self.Nz = None + self.channels = set() + self.viewer_extents = [] # [min_y, max_y, min_x, max_x] + self.top_left_coordinate = None # [y, x] in mm + self.mosaic_dtype = None + + def customizeViewer(self): + # hide status bar + if hasattr(self.viewer.window, '_status_bar'): + self.viewer.window._status_bar.hide() + + self.viewer.bind_key('D', self.toggle_draw_mode) + + def toggle_draw_mode(self, viewer): + self.is_drawing_shape = not self.is_drawing_shape + + if 'Manual ROI' not in self.viewer.layers: + self.shape_layer = self.viewer.add_shapes(name='Manual ROI', edge_width=40, edge_color='red', face_color='transparent') + self.shape_layer.events.data.connect(self.on_shape_change) + else: + self.shape_layer = self.viewer.layers['Manual ROI'] + + if self.is_drawing_shape: + self.shape_layer.mode = 'add_polygon' + else: + self.shape_layer.mode = 'pan_zoom' + + self.on_shape_change() - def import_location_list(self): - file_path, _ = QFileDialog.getOpenFileName(self, "Import Location List", '', "CSV Files (*.csv);;All Files (*)") - if file_path: - location_list_df = pd.read_csv(file_path) - location_list_df_relevant = None - try: - location_list_df_relevant = location_list_df[['x (mm)', 'y (mm)', 'z (um)']] - except KeyError: - print("Improperly formatted location list being imported") - return - self.clear_only_location_list() - for index, row in location_list_df_relevant.iterrows(): - x = row['x (mm)'] - y = row['y (mm)'] - z = row['z (um)'] - if not np.any(np.all(self.location_list[:, :2] == [x, y], axis=1)): - location_str = 'x: ' + str(round(x,3)) + ' mm, y: ' + str(round(y,3)) + ' mm, z: ' + str(round(1000*z,1)) + ' um' - self.dropdown_location_list.addItem(location_str) - index = self.dropdown_location_list.count() - 1 - self.dropdown_location_list.setCurrentIndex(index) - self.location_list = np.vstack((self.location_list, [[x,y,z]])) - self.navigationViewer.register_fov_to_image(x,y) - else: - print("Duplicate values not added based on x and y.") - print(self.location_list) + def enable_shape_drawing(self, enable): + if enable: + self.toggle_draw_mode(self.viewer) + else: + self.is_drawing_shape = False + if self.shape_layer is not None: + self.shape_layer.mode = 'pan_zoom' + + def on_shape_change(self, event=None): + if self.shape_layer is not None and len(self.shape_layer.data) > 0: + # convert shapes to mm coordinates + self.shapes_mm = [self.convert_shape_to_mm(shape) for shape in self.shape_layer.data] + else: + self.shapes_mm = [] + self.signal_shape_drawn.emit(self.shapes_mm) + + def convert_shape_to_mm(self, shape_data): + shape_data_mm = [] + for point in shape_data: + coords = self.viewer.layers[0].world_to_data(point) + x_mm = self.top_left_coordinate[1] + coords[1] * self.viewer_pixel_size_mm + y_mm = self.top_left_coordinate[0] + coords[0] * self.viewer_pixel_size_mm + shape_data_mm.append([x_mm, y_mm]) + return np.array(shape_data_mm) + + def convert_mm_to_viewer_shapes(self, shapes_mm): + viewer_shapes = [] + for shape_mm in shapes_mm: + viewer_shape = [] + for point_mm in shape_mm: + x_data = (point_mm[0] - self.top_left_coordinate[1]) / self.viewer_pixel_size_mm + y_data = (point_mm[1] - self.top_left_coordinate[0]) / self.viewer_pixel_size_mm + world_coords = self.viewer.layers[0].data_to_world([y_data, x_data]) + viewer_shape.append(world_coords) + viewer_shapes.append(viewer_shape) + return viewer_shapes + + def update_shape_layer_position(self, prev_top_left, new_top_left): + if self.shape_layer is None or len(self.shapes_mm) == 0: + return + try: + # update top_left_coordinate + self.top_left_coordinate = new_top_left + + # convert mm coordinates to viewer coordinates + new_shapes = self.convert_mm_to_viewer_shapes(self.shapes_mm) + + # update shape layer data + self.shape_layer.data = new_shapes + except Exception as e: + print(f"Error updating shape layer position: {e}") + import traceback + traceback.print_exc() + + def initChannels(self, channels): + self.channels = set(channels) + + def initLayersShape(self, Nz, dz): + self.Nz = 1 + self.dz_um = dz + + def extractWavelength(self, name): + # extract wavelength from channel name + parts = name.split() + if 'Fluorescence' in parts: + index = parts.index('Fluorescence') + 1 + if index < len(parts): + return parts[index].split()[0] + for color in ['R', 'G', 'B']: + if color in parts or f"full_{color}" in parts: + return color + return None + + def generateColormap(self, channel_info): + # generate colormap from hex value + c0 = (0, 0, 0) + c1 = (((channel_info['hex'] >> 16) & 0xFF) / 255, + ((channel_info['hex'] >> 8) & 0xFF) / 255, + (channel_info['hex'] & 0xFF) / 255) + return Colormap(colors=[c0, c1], controls=[0, 1], name=channel_info['name']) + + def updateMosaic(self, image, x_mm, y_mm, k, channel_name): + # calculate pixel size + image_pixel_size_um = self.objectiveStore.get_pixel_size() * self.downsample_factor + image_pixel_size_mm = image_pixel_size_um / 1000 + image_dtype = image.dtype + + # downsample image + if self.downsample_factor != 1: + image = cv2.resize(image, (image.shape[1] // self.downsample_factor, image.shape[0] // self.downsample_factor), interpolation=cv2.INTER_AREA) + + # adjust image position + x_mm -= (image.shape[1] * image_pixel_size_mm) / 2 + y_mm -= (image.shape[0] * image_pixel_size_mm) / 2 + + if not self.viewer.layers: + # initialize first layer + self.layers_initialized = True + self.signal_layers_initialized.emit(self.layers_initialized) + self.viewer_pixel_size_mm = image_pixel_size_mm + self.viewer_extents = [y_mm, y_mm + image.shape[0] * image_pixel_size_mm, + x_mm, x_mm + image.shape[1] * image_pixel_size_mm] + self.top_left_coordinate = [y_mm, x_mm] + self.mosaic_dtype = image_dtype + else: + # convert image dtype and scale if necessary + image = self.convertImageDtype(image, self.mosaic_dtype) + if image_pixel_size_mm != self.viewer_pixel_size_mm: + scale_factor = image_pixel_size_mm / self.viewer_pixel_size_mm + image = cv2.resize(image, (int(image.shape[1] * scale_factor), int(image.shape[0] * scale_factor)), interpolation=cv2.INTER_LINEAR) + + if channel_name not in self.viewer.layers: + # create new layer for channel + channel_info = CHANNEL_COLORS_MAP.get(self.extractWavelength(channel_name), {'hex': 0xFFFFFF, 'name': 'gray'}) + if channel_info['name'] in AVAILABLE_COLORMAPS: + color = AVAILABLE_COLORMAPS[channel_info['name']] + else: + color = self.generateColormap(channel_info) + + layer = self.viewer.add_image( + np.zeros_like(image), name=channel_name, rgb=len(image.shape) == 3, colormap=color, + visible=True, blending='additive', scale=(self.viewer_pixel_size_mm * 1000, self.viewer_pixel_size_mm * 1000) + ) + layer.mouse_double_click_callbacks.append(self.onDoubleClick) + layer.events.contrast_limits.connect(self.signalContrastLimits) + + # get layer for channel + layer = self.viewer.layers[channel_name] + + # update extents + self.viewer_extents[0] = min(self.viewer_extents[0], y_mm) + self.viewer_extents[1] = max(self.viewer_extents[1], y_mm + image.shape[0] * self.viewer_pixel_size_mm) + self.viewer_extents[2] = min(self.viewer_extents[2], x_mm) + self.viewer_extents[3] = max(self.viewer_extents[3], x_mm + image.shape[1] * self.viewer_pixel_size_mm) + + # store previous top-left coordinate + prev_top_left = self.top_left_coordinate.copy() if self.top_left_coordinate else None + self.top_left_coordinate = [self.viewer_extents[0], self.viewer_extents[2]] + + # update layer + self.updateLayer(layer, image, x_mm, y_mm, k, prev_top_left) + + # update contrast limits + min_val, max_val = self.contrastManager.get_limits(channel_name) + scaled_min = self.convertValue(min_val, self.contrastManager.acquisition_dtype, self.mosaic_dtype) + scaled_max = self.convertValue(max_val, self.contrastManager.acquisition_dtype, self.mosaic_dtype) + layer.contrast_limits = (scaled_min, scaled_max) + layer.refresh() + + def updateLayer(self, layer, image, x_mm, y_mm, k, prev_top_left): + # calculate new mosaic size and position + mosaic_height = int(math.ceil((self.viewer_extents[1] - self.viewer_extents[0]) / self.viewer_pixel_size_mm)) + mosaic_width = int(math.ceil((self.viewer_extents[3] - self.viewer_extents[2]) / self.viewer_pixel_size_mm)) + + is_rgb = len(image.shape) == 3 and image.shape[2] == 3 + if layer.data.shape[:2] != (mosaic_height, mosaic_width): + # calculate offsets for existing data + y_offset = int(math.floor((prev_top_left[0] - self.top_left_coordinate[0]) / self.viewer_pixel_size_mm)) + x_offset = int(math.floor((prev_top_left[1] - self.top_left_coordinate[1]) / self.viewer_pixel_size_mm)) + + for mosaic in self.viewer.layers: + if mosaic.name != 'Manual ROI': + if len(mosaic.data.shape) == 3 and mosaic.data.shape[2] == 3: + new_data = np.zeros((mosaic_height, mosaic_width, 3), dtype=mosaic.data.dtype) + else: + new_data = np.zeros((mosaic_height, mosaic_width), dtype=mosaic.data.dtype) + + # ensure offsets don't exceed bounds + y_end = min(y_offset + mosaic.data.shape[0], new_data.shape[0]) + x_end = min(x_offset + mosaic.data.shape[1], new_data.shape[1]) + + # shift existing data + if len(mosaic.data.shape) == 3 and mosaic.data.shape[2] == 3: + new_data[y_offset:y_end, x_offset:x_end, :] = mosaic.data[:y_end-y_offset, :x_end-x_offset, :] + else: + new_data[y_offset:y_end, x_offset:x_end] = mosaic.data[:y_end-y_offset, :x_end-x_offset] + mosaic.data = new_data + + if 'Manual ROI' in self.viewer.layers: + self.update_shape_layer_position(prev_top_left, self.top_left_coordinate) + + self.resetView() + + # insert new image + y_pos = int(math.floor((y_mm - self.top_left_coordinate[0]) / self.viewer_pixel_size_mm)) + x_pos = int(math.floor((x_mm - self.top_left_coordinate[1]) / self.viewer_pixel_size_mm)) + + # ensure indices are within bounds + y_end = min(y_pos + image.shape[0], layer.data.shape[0]) + x_end = min(x_pos + image.shape[1], layer.data.shape[1]) + + # insert image data + if is_rgb: + layer.data[y_pos:y_end, x_pos:x_end, :] = image[:y_end - y_pos, :x_end - x_pos, :] + else: + layer.data[y_pos:y_end, x_pos:x_end] = image[:y_end - y_pos, :x_end - x_pos] + layer.refresh() + + def convertImageDtype(self, image, target_dtype): + # convert image to target dtype + if image.dtype == target_dtype: + return image + + # get full range of values for both dtypes + if np.issubdtype(image.dtype, np.integer): + input_info = np.iinfo(image.dtype) + input_min, input_max = input_info.min, input_info.max + else: + input_min, input_max = np.min(image), np.max(image) + if np.issubdtype(target_dtype, np.integer): + output_info = np.iinfo(target_dtype) + output_min, output_max = output_info.min, output_info.max + else: + output_min, output_max = 0.0, 1.0 + + # normalize and scale image + image_normalized = (image.astype(np.float64) - input_min) / (input_max - input_min) + image_scaled = image_normalized * (output_max - output_min) + output_min + + return image_scaled.astype(target_dtype) + + def convertValue(self, value, from_dtype, to_dtype): + # Convert value from one dtype range to another + from_info = np.iinfo(from_dtype) + to_info = np.iinfo(to_dtype) + + # Normalize the value to [0, 1] range + normalized = (value - from_info.min) / (from_info.max - from_info.min) + + # Scale to the target dtype range + return normalized * (to_info.max - to_info.min) + to_info.min + + def signalContrastLimits(self, event): + layer = event.source + min_val, max_val = map(float, layer.contrast_limits) + + # Convert the new limits from mosaic_dtype to acquisition_dtype + acquisition_min = self.convertValue(min_val, self.mosaic_dtype, self.contrastManager.acquisition_dtype) + acquisition_max = self.convertValue(max_val, self.mosaic_dtype, self.contrastManager.acquisition_dtype) + + # Update the ContrastManager with the new limits + self.contrastManager.update_limits(layer.name, acquisition_min, acquisition_max) + + def getContrastLimits(self, dtype): + return self.contrastManager.get_default_limits() + + def onDoubleClick(self, layer, event): + coords = layer.world_to_data(event.position) + if coords is not None: + x_mm = self.top_left_coordinate[1] + coords[-1] * self.viewer_pixel_size_mm + y_mm = self.top_left_coordinate[0] + coords[-2] * self.viewer_pixel_size_mm + print(f"move from click: ({x_mm:.6f}, {y_mm:.6f})") + self.signal_coordinates_clicked.emit(x_mm, y_mm) + + def resetView(self): + self.viewer.reset_view() + for layer in self.viewer.layers: + layer.refresh() + + def clear_shape(self): + if self.shape_layer is not None: + self.viewer.layers.remove(self.shape_layer) + self.shape_layer = None + self.is_drawing_shape = False + self.signal_shape_drawn.emit([]) + + def clearAllLayers(self): + self.clear_shape() + self.viewer.layers.clear() + self.viewer_extents = None + self.top_left_coordinate = None + self.dtype = None + self.channels = set() + self.dz_um = None + self.Nz = None + self.layers_initialized = False + self.signal_layers_initialized.emit(self.layers_initialized) + self.signal_update_viewer.emit() + + def activate(self): + print("ACTIVATING NAPARI MOSAIC WIDGET") + self.viewer.window.activate() class TrackingControllerWidget(QFrame): @@ -2200,17 +5831,18 @@ def add_components(self,show_configurations): self.lineEdit_experimentID = QLineEdit() - self.dropdown_objective = QComboBox() - self.dropdown_objective.addItems(list(OBJECTIVES.keys())) - self.dropdown_objective.setCurrentText(DEFAULT_OBJECTIVE) + # self.dropdown_objective = QComboBox() + # self.dropdown_objective.addItems(list(OBJECTIVES.keys())) + # self.dropdown_objective.setCurrentText(DEFAULT_OBJECTIVE) + self.objectivesWidget = ObjectivesWidget(self.objectiveStore) self.dropdown_tracker = QComboBox() self.dropdown_tracker.addItems(TRACKERS) self.dropdown_tracker.setCurrentText(DEFAULT_TRACKER) self.entry_tracking_interval = QDoubleSpinBox() - self.entry_tracking_interval.setMinimum(0) - self.entry_tracking_interval.setMaximum(30) + self.entry_tracking_interval.setMinimum(0) + self.entry_tracking_interval.setMaximum(30) self.entry_tracking_interval.setSingleStep(0.5) self.entry_tracking_interval.setValue(0) @@ -2241,15 +5873,17 @@ def add_components(self,show_configurations): grid_line0.addWidget(self.lineEdit_experimentID, 1,1, 1,1) tmp = QLabel('Objective') tmp.setFixedWidth(90) + # grid_line0.addWidget(tmp,1,2) + # grid_line0.addWidget(self.dropdown_objective, 1,3) grid_line0.addWidget(tmp,1,2) - grid_line0.addWidget(self.dropdown_objective, 1,3) + grid_line0.addWidget(self.objectivesWidget, 1,3) grid_line3 = QHBoxLayout() tmp = QLabel('Configurations') tmp.setFixedWidth(90) grid_line3.addWidget(tmp) grid_line3.addWidget(self.list_configurations) - + grid_line1 = QHBoxLayout() tmp = QLabel('Tracker') grid_line1.addWidget(tmp) @@ -2270,7 +5904,7 @@ def add_components(self,show_configurations): self.grid.addLayout(grid_line3) else: self.list_configurations.setCurrentRow(0) # select the first configuration - self.grid.addLayout(grid_line1) + self.grid.addLayout(grid_line1) self.grid.addLayout(grid_line4) self.grid.addStretch() self.setLayout(self.grid) @@ -2284,7 +5918,8 @@ def add_components(self,show_configurations): self.btn_track.clicked.connect(self.toggle_acquisition) # connections - selections and entries self.dropdown_tracker.currentIndexChanged.connect(self.update_tracker) - self.dropdown_objective.currentIndexChanged.connect(self.update_pixel_size) + #self.dropdown_objective.currentIndexChanged.connect(self.update_pixel_size) + self.objectivesWidget.dropdown.currentIndexChanged.connect(self.update_pixel_size) # controller to widget self.trackingController.signal_tracking_stopped.connect(self.slot_tracking_stopped) @@ -2318,7 +5953,7 @@ def set_saving_dir(self): save_dir_base = dialog.getExistingDirectory(None, "Select Folder") self.trackingController.set_base_path(save_dir_base) self.lineEdit_savingDir.setText(save_dir_base) - self.base_path_is_set = True + self.base_path_is_set = True def toggle_acquisition(self,pressed): if pressed: @@ -2328,7 +5963,7 @@ def toggle_acquisition(self,pressed): msg.setText("Please choose base saving directory first") msg.exec_() return - # @@@ to do: add a widgetManger to enable and disable widget + # @@@ to do: add a widgetManger to enable and disable widget # @@@ to do: emit signal to widgetManager to disable other widgets self.setEnabled_all(False) self.trackingController.start_new_experiment(self.lineEdit_experimentID.text()) @@ -2341,21 +5976,33 @@ def setEnabled_all(self,enabled): self.btn_setSavingDir.setEnabled(enabled) self.lineEdit_savingDir.setEnabled(enabled) self.lineEdit_experimentID.setEnabled(enabled) - self.dropdown_tracker - self.dropdown_objective + # self.dropdown_tracker + # self.dropdown_objective + self.checkbox_set_z_range.setEnabled(enabled) self.list_configurations.setEnabled(enabled) def update_tracker(self, index): self.trackingController.update_tracker_selection(self.dropdown_tracker.currentText()) - def update_pixel_size(self): + def update_pixel_size(self): objective = self.dropdown_objective.currentText() self.trackingController.objective = objective # self.internal_state.data['Objective'] = self.objective pixel_size_um = CAMERA_PIXEL_SIZE_UM[CAMERA_SENSOR] / ( TUBE_LENS_MM/ (OBJECTIVES[objective]['tube_lens_f_mm']/OBJECTIVES[objective]['magnification']) ) self.trackingController.update_pixel_size(pixel_size_um) - print('pixel size is ' + str(pixel_size_um) + ' um') + print('pixel size is ' + str(pixel_size_um) + ' μm') + def update_pixel_size(self): + objective = self.objectiveStore.current_objective + self.trackingController.objective = objective + objective_info = self.objectiveStore.objectives_dict[objective] + magnification = objective_info["magnification"] + objective_tube_lens_mm = objective_info["tube_lens_f_mm"] + tube_lens_mm = TUBE_LENS_MM + pixel_size_um = CAMERA_PIXEL_SIZE_UM[CAMERA_SENSOR] + pixel_size_xy = pixel_size_um / (magnification / (objective_tube_lens_mm / tube_lens_mm)) + self.trackingController.update_pixel_size(pixel_size_xy) + print(f'pixel size is {pixel_size_xy:.2f} μm') ''' # connections @@ -2379,7 +6026,7 @@ def toggle_acquisition(self,pressed): msg.exec_() return if pressed: - # @@@ to do: add a widgetManger to enable and disable widget + # @@@ to do: add a widgetManger to enable and disable widget # @@@ to do: emit signal to widgetManager to disable other widgets self.setEnabled_all(False) self.trackingController.start_new_experiment(self.lineEdit_experimentID.text()) @@ -2506,7 +6153,7 @@ def toggle_acquisition(self,pressed): msg.exec_() return if pressed: - # @@@ to do: add a widgetManger to enable and disable widget + # @@@ to do: add a widgetManger to enable and disable widget # @@@ to do: emit signal to widgetManager to disable other widgets self.setEnabled_all(False) self.plateReadingController.start_new_experiment(self.lineEdit_experimentID.text()) @@ -2534,7 +6181,8 @@ def setEnabled_all(self,enabled,exclude_btn_startAcquisition=False): def slot_homing_complete(self): self.btn_startAcquisition.setEnabled(True) - + + class PlateReaderNavigationWidget(QFrame): def __init__(self, plateReaderNavigationController, *args, **kwargs): super().__init__(*args, **kwargs) @@ -2558,7 +6206,7 @@ def add_components(self): self.dropdown_column.setEnabled(False) self.dropdown_row.setEnabled(False) self.btn_moveto.setEnabled(False) - + # layout grid_line0 = QHBoxLayout() # tmp = QLabel('Saving Path') @@ -2608,7 +6256,7 @@ def update_current_location(self,location_str): class TriggerControlWidget(QFrame): - # for synchronized trigger + # for synchronized trigger signal_toggle_live = Signal(bool) signal_trigger_mode = Signal(str) signal_trigger_fps = Signal(float) @@ -2630,8 +6278,8 @@ def add_components(self): # line 1: fps self.entry_triggerFPS = QDoubleSpinBox() - self.entry_triggerFPS.setMinimum(0.02) - self.entry_triggerFPS.setMaximum(1000) + self.entry_triggerFPS.setMinimum(0.02) + self.entry_triggerFPS.setMaximum(1000) self.entry_triggerFPS.setSingleStep(1) self.entry_triggerFPS.setValue(self.fps_trigger) @@ -2672,6 +6320,7 @@ def update_trigger_fps(self,fps): self.signal_trigger_fps.emit(fps) self.microcontroller2.set_camera_trigger_frequency(self.fps_trigger) + class MultiCameraRecordingWidget(QFrame): def __init__(self, streamHandler, imageSaver, channels, main=None, *args, **kwargs): super().__init__(*args, **kwargs) @@ -2686,7 +6335,7 @@ def add_components(self): self.btn_setSavingDir = QPushButton('Browse') self.btn_setSavingDir.setDefault(False) self.btn_setSavingDir.setIcon(QIcon('icon/folder.png')) - + self.lineEdit_savingDir = QLineEdit() self.lineEdit_savingDir.setReadOnly(True) self.lineEdit_savingDir.setText('Choose a base saving directory') @@ -2694,16 +6343,16 @@ def add_components(self): self.lineEdit_experimentID = QLineEdit() self.entry_saveFPS = QDoubleSpinBox() - self.entry_saveFPS.setMinimum(0.02) - self.entry_saveFPS.setMaximum(1000) + self.entry_saveFPS.setMinimum(0.02) + self.entry_saveFPS.setMaximum(1000) self.entry_saveFPS.setSingleStep(1) self.entry_saveFPS.setValue(1) for channel in self.channels: self.streamHandler[channel].set_save_fps(1) self.entry_timeLimit = QSpinBox() - self.entry_timeLimit.setMinimum(-1) - self.entry_timeLimit.setMaximum(60*60*24*30) + self.entry_timeLimit.setMinimum(-1) + self.entry_timeLimit.setMaximum(60*60*24*30) self.entry_timeLimit.setSingleStep(1) self.entry_timeLimit.setValue(-1) @@ -2765,7 +6414,7 @@ def toggle_recording(self,pressed): self.lineEdit_experimentID.setEnabled(False) self.btn_setSavingDir.setEnabled(False) experiment_ID = self.lineEdit_experimentID.text() - experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%-S.%f') + experiment_ID = experiment_ID + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f') os.mkdir(os.path.join(self.save_dir_base,experiment_ID)) for channel in self.channels: self.imageSaver[channel].start_new_experiment(os.path.join(experiment_ID,channel),add_timestamp=False) @@ -2784,6 +6433,7 @@ def stop_recording(self): self.streamHandler[channel].stop_recording() self.btn_setSavingDir.setEnabled(True) + class WaveformDisplay(QFrame): def __init__(self, N=1000, include_x=True, include_y=True, main=None, *args, **kwargs): @@ -2817,21 +6467,23 @@ def update_N(self,N): self.plotWidget['X'].update_N(N) self.plotWidget['Y'].update_N(N) + class PlotWidget(pg.GraphicsLayoutWidget): - + def __init__(self, title='', N = 1000, parent=None,add_legend=False): super().__init__(parent) self.plotWidget = self.addPlot(title = '', axisItems = {'bottom': pg.DateAxisItem()}) if add_legend: self.plotWidget.addLegend() self.N = N - + def plot(self,x,y,label,color,clear=False): - self.plotWidget.plot(x[-self.N:],y[-self.N:],pen=pg.mkPen(color=color,width=2),name=label,clear=clear) + self.plotWidget.plot(x[-self.N:],y[-self.N:],pen=pg.mkPen(color=color,width=4),name=label,clear=clear) def update_N(self,N): self.N = N + class DisplacementMeasurementWidget(QFrame): def __init__(self, displacementMeasurementController, waveformDisplay, main=None, *args, **kwargs): super().__init__(*args, **kwargs) @@ -2842,47 +6494,47 @@ def __init__(self, displacementMeasurementController, waveformDisplay, main=None def add_components(self): self.entry_x_offset = QDoubleSpinBox() - self.entry_x_offset.setMinimum(0) - self.entry_x_offset.setMaximum(3000) + self.entry_x_offset.setMinimum(0) + self.entry_x_offset.setMaximum(3000) self.entry_x_offset.setSingleStep(0.2) self.entry_x_offset.setDecimals(3) self.entry_x_offset.setValue(0) self.entry_x_offset.setKeyboardTracking(False) self.entry_y_offset = QDoubleSpinBox() - self.entry_y_offset.setMinimum(0) - self.entry_y_offset.setMaximum(3000) + self.entry_y_offset.setMinimum(0) + self.entry_y_offset.setMaximum(3000) self.entry_y_offset.setSingleStep(0.2) self.entry_y_offset.setDecimals(3) self.entry_y_offset.setValue(0) self.entry_y_offset.setKeyboardTracking(False) self.entry_x_scaling = QDoubleSpinBox() - self.entry_x_scaling.setMinimum(-100) - self.entry_x_scaling.setMaximum(100) + self.entry_x_scaling.setMinimum(-100) + self.entry_x_scaling.setMaximum(100) self.entry_x_scaling.setSingleStep(0.1) self.entry_x_scaling.setDecimals(3) self.entry_x_scaling.setValue(1) self.entry_x_scaling.setKeyboardTracking(False) self.entry_y_scaling = QDoubleSpinBox() - self.entry_y_scaling.setMinimum(-100) - self.entry_y_scaling.setMaximum(100) + self.entry_y_scaling.setMinimum(-100) + self.entry_y_scaling.setMaximum(100) self.entry_y_scaling.setSingleStep(0.1) self.entry_y_scaling.setDecimals(3) self.entry_y_scaling.setValue(1) self.entry_y_scaling.setKeyboardTracking(False) self.entry_N_average = QSpinBox() - self.entry_N_average.setMinimum(1) - self.entry_N_average.setMaximum(25) + self.entry_N_average.setMinimum(1) + self.entry_N_average.setMaximum(25) self.entry_N_average.setSingleStep(1) self.entry_N_average.setValue(1) self.entry_N_average.setKeyboardTracking(False) self.entry_N = QSpinBox() - self.entry_N.setMinimum(1) - self.entry_N.setMaximum(5000) + self.entry_N.setMinimum(1) + self.entry_N.setMaximum(5000) self.entry_N.setSingleStep(1) self.entry_N.setValue(1000) self.entry_N.setKeyboardTracking(False) @@ -2905,7 +6557,7 @@ def add_components(self): grid_line0.addWidget(self.entry_y_offset, 0,5) grid_line0.addWidget(QLabel('y scaling'), 0,6) grid_line0.addWidget(self.entry_y_scaling, 0,7) - + grid_line1 = QGridLayout() grid_line1.addWidget(QLabel('d from x'), 0,0) grid_line1.addWidget(self.reading_x, 0,1) @@ -2920,7 +6572,7 @@ def add_components(self): self.grid.addLayout(grid_line0,0,0) self.grid.addLayout(grid_line1,1,0) self.setLayout(self.grid) - + # connections self.entry_x_offset.valueChanged.connect(self.update_settings) self.entry_y_offset.valueChanged.connect(self.update_settings) @@ -2933,14 +6585,15 @@ def add_components(self): def update_settings(self,new_value): print('update settings') self.displacementMeasurementController.update_settings(self.entry_x_offset.value(),self.entry_y_offset.value(),self.entry_x_scaling.value(),self.entry_y_scaling.value(),self.entry_N_average.value(),self.entry_N.value()) - - def update_waveformDisplay_N(self,N): + + def update_waveformDisplay_N(self,N): self.waveformDisplay.update_N(N) def display_readings(self,readings): self.reading_x.setText("{:.2f}".format(readings[0])) self.reading_y.setText("{:.2f}".format(readings[1])) + class LaserAutofocusControlWidget(QFrame): def __init__(self, laserAutofocusController, main=None, *args, **kwargs): super().__init__(*args, **kwargs) @@ -2954,7 +6607,7 @@ def add_components(self): self.btn_initialize.setChecked(False) self.btn_initialize.setDefault(False) - self.btn_set_reference = QPushButton("Set as reference plane") + self.btn_set_reference = QPushButton(" Set Reference ") self.btn_set_reference.setCheckable(False) self.btn_set_reference.setChecked(False) self.btn_set_reference.setDefault(False) @@ -2964,7 +6617,7 @@ def add_components(self): self.label_displacement = QLabel() self.label_displacement.setFrameStyle(QFrame.Panel | QFrame.Sunken) - self.btn_measure_displacement = QPushButton("Measure displacement") + self.btn_measure_displacement = QPushButton("Measure Displacement") self.btn_measure_displacement.setCheckable(False) self.btn_measure_displacement.setChecked(False) self.btn_measure_displacement.setDefault(False) @@ -2979,24 +6632,25 @@ def add_components(self): self.entry_target.setValue(0) self.entry_target.setKeyboardTracking(False) - self.btn_move_to_target = QPushButton("Move to target") + self.btn_move_to_target = QPushButton("Move to Target") self.btn_move_to_target.setCheckable(False) self.btn_move_to_target.setChecked(False) self.btn_move_to_target.setDefault(False) if not self.laserAutofocusController.is_initialized: self.btn_move_to_target.setEnabled(False) - + self.grid = QGridLayout() - self.grid.addWidget(self.btn_initialize,0,0,1,3) - self.grid.addWidget(self.btn_set_reference,1,0,1,3) - self.grid.addWidget(QLabel('Displacement (um)'),2,0) - self.grid.addWidget(self.label_displacement,2,1) - self.grid.addWidget(self.btn_measure_displacement,2,2) - self.grid.addWidget(QLabel('Target (um)'),3,0) - self.grid.addWidget(self.entry_target,3,1) - self.grid.addWidget(self.btn_move_to_target,3,2) - self.grid.setRowStretch(self.grid.rowCount(), 1) + self.grid.addWidget(self.btn_initialize,0,0,1,2) + self.grid.addWidget(self.btn_set_reference,0,2,1,2) + + self.grid.addWidget(QLabel('Displacement (um)'),1,0) + self.grid.addWidget(self.label_displacement,1,1) + self.grid.addWidget(self.btn_measure_displacement,1,2,1,2) + + self.grid.addWidget(QLabel('Target (um)'),2,0) + self.grid.addWidget(self.entry_target,2,1) + self.grid.addWidget(self.btn_move_to_target,2,2,1,2) self.setLayout(self.grid) # make connections @@ -3017,123 +6671,1302 @@ def move_to_target(self,target_um): self.laserAutofocusController.move_to_target(self.entry_target.value()) +class WellplateFormatWidget(QWidget): + + signalWellplateSettings = Signal(QVariant, float, float, int, int, float, float, int, int, int) + + def __init__(self, navigationController, navigationViewer, streamHandler, liveController): + super().__init__() + self.navigationController = navigationController + self.navigationViewer = navigationViewer + self.streamHandler = streamHandler + self.liveController = liveController + self.wellplate_format = WELLPLATE_FORMAT + self.csv_path = SAMPLE_FORMATS_CSV_PATH # 'sample_formats.csv' + self.initUI() + + def initUI(self): + layout = QHBoxLayout(self) + self.label = QLabel("Sample Format", self) + self.comboBox = QComboBox(self) + self.populate_combo_box() + self.comboBox.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) + layout.addWidget(self.label) + layout.addWidget(self.comboBox) + self.comboBox.currentIndexChanged.connect(self.wellplateChanged) + index = self.comboBox.findData(self.wellplate_format) + if index >= 0: + self.comboBox.setCurrentIndex(index) + + def populate_combo_box(self): + self.comboBox.clear() + for format_, settings in WELLPLATE_FORMAT_SETTINGS.items(): + self.comboBox.addItem(format_, format_) + + # Add custom item and set its font to italic + self.comboBox.addItem("calibrate format...", 'custom') + index = self.comboBox.count() - 1 # Get the index of the last item + font = QFont() + font.setItalic(True) + self.comboBox.setItemData(index, font, Qt.FontRole) + + def wellplateChanged(self, index): + self.wellplate_format = self.comboBox.itemData(index) + if self.wellplate_format == "custom": + calibration_dialog = WellplateCalibration(self, self.navigationController, self.navigationViewer, self.streamHandler, self.liveController) + result = calibration_dialog.exec_() + if result == QDialog.Rejected: + # If the dialog was closed without adding a new format, revert to the previous selection + prev_index = self.comboBox.findData(self.wellplate_format) + self.comboBox.setCurrentIndex(prev_index) + else: + self.setWellplateSettings(self.wellplate_format) + + def setWellplateSettings(self, wellplate_format): + if wellplate_format in WELLPLATE_FORMAT_SETTINGS: + settings = WELLPLATE_FORMAT_SETTINGS[wellplate_format] + elif wellplate_format == 'glass slide': + self.signalWellplateSettings.emit(QVariant('glass slide'), 0, 0, 0, 0, 0, 0, 0, 1, 1) + return + else: + print(f"Wellplate format {wellplate_format} not recognized") + return + + self.signalWellplateSettings.emit( + QVariant(wellplate_format), + settings['a1_x_mm'], + settings['a1_y_mm'], + settings['a1_x_pixel'], + settings['a1_y_pixel'], + settings['well_size_mm'], + settings['well_spacing_mm'], + settings['number_of_skip'], + settings['rows'], + settings['cols'] + ) + + def getWellplateSettings(self, wellplate_format): + if wellplate_format in WELLPLATE_FORMAT_SETTINGS: + settings = WELLPLATE_FORMAT_SETTINGS[wellplate_format] + elif wellplate_format == 'glass slide': + settings = { + 'format': 'glass slide', + 'a1_x_mm': 0, + 'a1_y_mm': 0, + 'a1_x_pixel': 0, + 'a1_y_pixel': 0, + 'well_size_mm': 0, + 'well_spacing_mm': 0, + 'number_of_skip': 0, + 'rows': 1, + 'cols': 1 + } + else: + return None + return settings + + def add_custom_format(self, name, settings): + self.WELLPLATE_FORMAT_SETTINGS[name] = settings + self.populate_combo_box() + index = self.comboBox.findData(name) + if index >= 0: + self.comboBox.setCurrentIndex(index) + self.wellplateChanged(index) + + def save_formats_to_csv(self): + cache_path = os.path.join('cache', self.csv_path) + os.makedirs('cache', exist_ok=True) + + fieldnames = ['format', 'a1_x_mm', 'a1_y_mm', 'a1_x_pixel', 'a1_y_pixel', 'well_size_mm', 'well_spacing_mm', 'number_of_skip', 'rows', 'cols'] + with open(cache_path, 'w', newline='') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + for format_, settings in WELLPLATE_FORMAT_SETTINGS.items(): + writer.writerow({**{'format': format_}, **settings}) + + @staticmethod + def parse_csv_row(row): + return { + 'a1_x_mm': float(row['a1_x_mm']), + 'a1_y_mm': float(row['a1_y_mm']), + 'a1_x_pixel': int(row['a1_x_pixel']), + 'a1_y_pixel': int(row['a1_y_pixel']), + 'well_size_mm': float(row['well_size_mm']), + 'well_spacing_mm': float(row['well_spacing_mm']), + 'number_of_skip': int(row['number_of_skip']), + 'rows': int(row['rows']), + 'cols': int(row['cols']) + } + + +class WellplateCalibration(QDialog): + + def __init__(self, wellplateFormatWidget, navigationController, navigationViewer, streamHandler, liveController): + super().__init__() + self.setWindowTitle("Well Plate Calibration") + self.wellplateFormatWidget = wellplateFormatWidget + self.navigationController = navigationController + self.navigationViewer = navigationViewer + self.streamHandler = streamHandler + self.liveController = liveController + self.was_live = self.liveController.is_live + self.corners = [None, None, None] + self.show_virtual_joystick = True # FLAG + self.initUI() + # Initially allow click-to-move and hide the joystick controls + self.clickToMoveCheckbox.setChecked(True) + self.toggleVirtualJoystick(False) + + def initUI(self): + layout = QHBoxLayout(self) # Change to QHBoxLayout to have two columns + + # Left column for existing controls + left_layout = QVBoxLayout() + + # Add radio buttons for selecting mode + self.mode_group = QButtonGroup(self) + self.new_format_radio = QRadioButton("Add New Format") + self.calibrate_format_radio = QRadioButton("Calibrate Existing Format") + self.mode_group.addButton(self.new_format_radio) + self.mode_group.addButton(self.calibrate_format_radio) + self.new_format_radio.setChecked(True) + + left_layout.addWidget(self.new_format_radio) + left_layout.addWidget(self.calibrate_format_radio) + + # Existing format selection (initially hidden) + self.existing_format_combo = QComboBox(self) + self.populate_existing_formats() + self.existing_format_combo.hide() + left_layout.addWidget(self.existing_format_combo) + + # Connect radio buttons to toggle visibility + self.new_format_radio.toggled.connect(self.toggle_input_mode) + self.calibrate_format_radio.toggled.connect(self.toggle_input_mode) + + self.form_layout = QFormLayout() + + self.nameInput = QLineEdit(self) + self.nameInput.setPlaceholderText("custom well plate") + self.form_layout.addRow("Sample Name:", self.nameInput) + + self.rowsInput = QSpinBox(self) + self.rowsInput.setRange(1, 100) + self.rowsInput.setValue(8) + self.form_layout.addRow("# Rows:", self.rowsInput) + + self.colsInput = QSpinBox(self) + self.colsInput.setRange(1, 100) + self.colsInput.setValue(12) + self.form_layout.addRow("# Columns:", self.colsInput) + + # Add new inputs for plate dimensions + self.plateWidthInput = QDoubleSpinBox(self) + self.plateWidthInput.setRange(10, 500) # Adjust range as needed + self.plateWidthInput.setValue(127.76) # Default value for a standard 96-well plate + self.plateWidthInput.setSuffix(' mm') + self.form_layout.addRow("Plate Width:", self.plateWidthInput) + + self.plateHeightInput = QDoubleSpinBox(self) + self.plateHeightInput.setRange(10, 500) # Adjust range as needed + self.plateHeightInput.setValue(85.48) # Default value for a standard 96-well plate + self.plateHeightInput.setSuffix(' mm') + self.form_layout.addRow("Plate Height:", self.plateHeightInput) + + self.wellSpacingInput = QDoubleSpinBox(self) + self.wellSpacingInput.setRange(0.1, 100) + self.wellSpacingInput.setValue(9) + self.wellSpacingInput.setSingleStep(0.1) + self.wellSpacingInput.setDecimals(2) + self.wellSpacingInput.setSuffix(' mm') + self.form_layout.addRow("Well Spacing:", self.wellSpacingInput) + + left_layout.addLayout(self.form_layout) + + points_layout = QGridLayout() + self.cornerLabels = [] + self.setPointButtons = [] + navigate_label = QLabel("Navigate to and Select\n3 Points on the Edge of Well A1") + navigate_label.setAlignment(Qt.AlignCenter) + # navigate_label.setStyleSheet("font-weight: bold;") + points_layout.addWidget(navigate_label, 0, 0, 1, 2) + for i in range(1, 4): + label = QLabel(f"Point {i}: N/A") + button = QPushButton("Set Point") + button.setFixedWidth(button.sizeHint().width()) + button.clicked.connect(lambda checked, index=i-1: self.setCorner(index)) + points_layout.addWidget(label, i, 0) + points_layout.addWidget(button, i, 1) + self.cornerLabels.append(label) + self.setPointButtons.append(button) + + points_layout.setColumnStretch(0,1) + left_layout.addLayout(points_layout) + + # Add 'Click to Move' checkbox + self.clickToMoveCheckbox = QCheckBox("Click to Move") + self.clickToMoveCheckbox.stateChanged.connect(self.toggleClickToMove) + left_layout.addWidget(self.clickToMoveCheckbox) + + # Add 'Show Virtual Joystick' checkbox + self.showJoystickCheckbox = QCheckBox("Virtual Joystick") + self.showJoystickCheckbox.stateChanged.connect(self.toggleVirtualJoystick) + left_layout.addWidget(self.showJoystickCheckbox) + + self.calibrateButton = QPushButton("Calibrate") + self.calibrateButton.clicked.connect(self.calibrate) + self.calibrateButton.setEnabled(False) + left_layout.addWidget(self.calibrateButton) + + # Add left column to main layout + layout.addLayout(left_layout) + + self.live_viewer = CalibrationLiveViewer(parent=self) + self.streamHandler.image_to_display.connect(self.live_viewer.display_image) + #self.live_viewer.signal_calibration_viewer_click.connect(self.navigationController.move_from_click) + + if not self.was_live: + self.liveController.start_live() + + # when the dialog closes i want to # self.liveController.stop_live() if live was stopped before. . . if it was on before, leave it on + layout.addWidget(self.live_viewer) + + # Right column for joystick and sensitivity controls + self.right_layout = QVBoxLayout() + self.right_layout.addStretch(1) + + self.joystick = Joystick(self) + self.joystick.joystickMoved.connect(self.moveStage) + self.right_layout.addWidget(self.joystick, 0, Qt.AlignTop | Qt.AlignHCenter) + + self.right_layout.addStretch(1) + + # Create a container widget for sensitivity label and slider + sensitivity_layout = QVBoxLayout() + + sensitivityLabel = QLabel("Joystick Sensitivity") + sensitivityLabel.setAlignment(Qt.AlignCenter) + sensitivity_layout.addWidget(sensitivityLabel) + + self.sensitivitySlider = QSlider(Qt.Horizontal) + self.sensitivitySlider.setMinimum(1) + self.sensitivitySlider.setMaximum(100) + self.sensitivitySlider.setValue(50) + self.sensitivitySlider.setTickPosition(QSlider.TicksBelow) + self.sensitivitySlider.setTickInterval(10) + + label_width = sensitivityLabel.sizeHint().width() + self.sensitivitySlider.setFixedWidth(label_width) + + sensitivity_layout.addWidget(self.sensitivitySlider, 0, Qt.AlignHCenter) + + self.right_layout.addLayout(sensitivity_layout) + + layout.addLayout(self.right_layout) + + if not self.was_live: + self.liveController.start_live() + + def toggleVirtualJoystick(self, state): + if state: + self.joystick.show() + self.sensitivitySlider.show() + self.right_layout.itemAt(self.right_layout.indexOf(self.joystick)).widget().show() + self.right_layout.itemAt(self.right_layout.count() - 1).layout().itemAt(0).widget().show() # Show sensitivity label + self.right_layout.itemAt(self.right_layout.count() - 1).layout().itemAt(1).widget().show() # Show sensitivity slider + else: + self.joystick.hide() + self.sensitivitySlider.hide() + self.right_layout.itemAt(self.right_layout.indexOf(self.joystick)).widget().hide() + self.right_layout.itemAt(self.right_layout.count() - 1).layout().itemAt(0).widget().hide() # Hide sensitivity label + self.right_layout.itemAt(self.right_layout.count() - 1).layout().itemAt(1).widget().hide() # Hide sensitivity slider + + # def updateCursorPosition(self, x, y): + # x_mm = self.navigationController.x_pos_mm + (x - self.live_viewer.width() / 2) * self.navigationController.x_mm_per_pixel + # y_mm = self.navigationController.y_pos_mm + (y - self.live_viewer.height() / 2) * self.navigationController.y_mm_per_pixel + + def moveStage(self, x, y): + sensitivity = self.sensitivitySlider.value() / 50.0 # Normalize to 0-2 range + max_speed = 0.1 * sensitivity + exponent = 2 + + dx = math.copysign(max_speed * abs(x)**exponent, x) + dy = math.copysign(max_speed * abs(y)**exponent, y) + + self.navigationController.move_x(dx) + self.navigationController.move_y(dy) + + def toggleClickToMove(self, state): + if state == Qt.Checked: + self.navigationController.set_flag_click_to_move(True) + self.live_viewer.signal_calibration_viewer_click.connect(self.viewerClicked) + else: + self.live_viewer.signal_calibration_viewer_click.disconnect(self.viewerClicked) + self.navigationController.set_flag_click_to_move(False) + + def viewerClicked(self, x, y, width, height): + if self.clickToMoveCheckbox.isChecked(): + self.navigationController.move_from_click(x, y, width, height) + + def setCorner(self, index): + if self.corners[index] is None: + x = self.navigationController.x_pos_mm + y = self.navigationController.y_pos_mm + + # Check if the new point is different from existing points + if any(corner is not None and np.allclose([x, y], corner) for corner in self.corners): + QMessageBox.warning(self, "Duplicate Point", "This point is too close to an existing point. Please choose a different location.") + return + + self.corners[index] = (x, y) + self.cornerLabels[index].setText(f"Point {index+1}: ({x:.2f}, {y:.2f})") + self.setPointButtons[index].setText("Clear Point") + else: + self.corners[index] = None + self.cornerLabels[index].setText(f"Point {index+1}: Not set") + self.setPointButtons[index].setText("Set Point") + + self.calibrateButton.setEnabled(all(corner is not None for corner in self.corners)) + + def populate_existing_formats(self): + self.existing_format_combo.clear() + for format_ in WELLPLATE_FORMAT_SETTINGS: + self.existing_format_combo.addItem(f"{format_} well plate", format_) + + def toggle_input_mode(self): + if self.new_format_radio.isChecked(): + self.existing_format_combo.hide() + for i in range(self.form_layout.rowCount()): + self.form_layout.itemAt(i, QFormLayout.FieldRole).widget().show() + self.form_layout.itemAt(i, QFormLayout.LabelRole).widget().show() + else: + self.existing_format_combo.show() + for i in range(self.form_layout.rowCount()): + self.form_layout.itemAt(i, QFormLayout.FieldRole).widget().hide() + self.form_layout.itemAt(i, QFormLayout.LabelRole).widget().hide() + + def calibrate(self): + try: + if self.new_format_radio.isChecked(): + if not self.nameInput.text() or not all(self.corners): + QMessageBox.warning(self, "Incomplete Information", "Please fill in all fields and set 3 corner points before calibrating.") + return + + name = self.nameInput.text() + rows = self.rowsInput.value() + cols = self.colsInput.value() + well_spacing_mm = self.wellSpacingInput.value() + plate_width_mm = self.plateWidthInput.value() + plate_height_mm = self.plateHeightInput.value() + + center, radius = self.calculate_circle(self.corners) + well_size_mm = radius * 2 + a1_x_mm, a1_y_mm = center + scale = 1 / 0.084665 + a1_x_pixel = round(a1_x_mm * scale) + a1_y_pixel = round(a1_y_mm * scale) + + new_format = { + 'a1_x_mm': a1_x_mm, + 'a1_y_mm': a1_y_mm, + 'a1_x_pixel': a1_x_pixel, + 'a1_y_pixel': a1_y_pixel, + 'well_size_mm': well_size_mm, + 'well_spacing_mm': well_spacing_mm, + 'number_of_skip': 0, + 'rows': rows, + 'cols': cols, + } + + self.wellplateFormatWidget.add_custom_format(name, new_format) + self.wellplateFormatWidget.save_formats_to_csv() + self.create_wellplate_image(name, new_format, plate_width_mm, plate_height_mm) + self.wellplateFormatWidget.setWellplateSettings(name) + success_message = f"New format '{name}' has been successfully created and calibrated." + + else: + selected_format = self.existing_format_combo.currentData() + if not all(self.corners): + QMessageBox.warning(self, "Incomplete Information", "Please set 3 corner points before calibrating.") + return + + center, radius = self.calculate_circle(self.corners) + well_size_mm = radius * 2 + a1_x_mm, a1_y_mm = center + + # Get the existing format settings + existing_settings = WELLPLATE_FORMAT_SETTINGS[selected_format] + + # # Calculate the offset between the original 0,0 pixel and 0,0 mm + # original_offset_x = existing_settings['a1_x_mm'] - (existing_settings['a1_x_pixel'] * 0.084665) + # original_offset_y = existing_settings['a1_y_mm'] - (existing_settings['a1_y_pixel'] * 0.084665) + # # Calculate new pixel coordinates using the original offset + # a1_x_pixel = round((a1_x_mm - original_offset_x) / 0.084665) + # a1_y_pixel = round((a1_y_mm - original_offset_y) / 0.084665) + + print(f"Updating existing format {selected_format} well plate") + print(f"OLD: 'a1_x_mm': {existing_settings['a1_x_mm']}, 'a1_y_mm': {existing_settings['a1_y_mm']}, 'well_size_mm': {existing_settings['well_size_mm']}") + print(f"NEW: 'a1_x_mm': {a1_x_mm}, 'a1_y_mm': {a1_y_mm}, 'well_size_mm': {well_size_mm}") + + updated_settings = { + 'a1_x_mm': a1_x_mm, + 'a1_y_mm': a1_y_mm, + # 'a1_x_pixel': a1_x_pixel, + # 'a1_y_pixel': a1_y_pixel, + 'well_size_mm': well_size_mm, + } + + WELLPLATE_FORMAT_SETTINGS[selected_format].update(updated_settings) + + self.wellplateFormatWidget.save_formats_to_csv() + self.wellplateFormatWidget.setWellplateSettings(selected_format) + success_message = f"Format '{selected_format} well plate' has been successfully recalibrated." + + # Update the WellplateFormatWidget's combo box to reflect the newly calibrated format + self.wellplateFormatWidget.populate_combo_box() + index = self.wellplateFormatWidget.comboBox.findData(selected_format if self.calibrate_format_radio.isChecked() else name) + if index >= 0: + self.wellplateFormatWidget.comboBox.setCurrentIndex(index) + + # Display success message + QMessageBox.information(self, "Calibration Successful", success_message) + self.accept() + + except Exception as e: + QMessageBox.critical(self, "Calibration Error", f"An error occurred during calibration: {str(e)}") + + def create_wellplate_image(self, name, format_data, plate_width_mm, plate_height_mm): + + scale = 1 / 0.084665 + def mm_to_px(mm): + return round(mm * scale) + + width = mm_to_px(plate_width_mm) + height = mm_to_px(plate_height_mm) + image = Image.new('RGB', (width, height), color='white') + draw = ImageDraw.Draw(image) + + rows, cols = format_data['rows'], format_data['cols'] + well_spacing_mm = format_data['well_spacing_mm'] + well_size_mm = format_data['well_size_mm'] + a1_x_mm, a1_y_mm = format_data['a1_x_mm'], format_data['a1_y_mm'] + + def draw_left_slanted_rectangle(draw, xy, slant, width=4, outline='black', fill=None): + x1, y1, x2, y2 = xy + + # Define the polygon points + points = [ + (x1 + slant, y1), # Top-left after slant + (x2, y1), # Top-right + (x2, y2), # Bottom-right + (x1 + slant, y2), # Bottom-left after slant + (x1, y2 - slant), # Bottom of left slant + (x1, y1 + slant) # Top of left slant + ] + + # Draw the filled polygon with outline + draw.polygon(points, fill=fill, outline=outline, width=width) + + # Draw the outer rectangle with rounded corners + corner_radius = 20 + draw.rounded_rectangle([0, 0, width-1, height-1], radius=corner_radius, outline='black', width=4, fill='grey') + + # Draw the inner rectangle with left slanted corners + margin = 20 + slant = 40 + draw_left_slanted_rectangle(draw, + [margin, margin, width-margin, height-margin], + slant, width=4, outline='black', fill='lightgrey') + + # Function to draw a circle + def draw_circle(x, y, diameter): + radius = diameter / 2 + draw.ellipse([x-radius, y-radius, x+radius, y+radius], outline='black', width=4, fill='white') + + # Draw the wells + for row in range(rows): + for col in range(cols): + x = mm_to_px(a1_x_mm + col * well_spacing_mm) + y = mm_to_px(a1_y_mm + row * well_spacing_mm) + draw_circle(x, y, mm_to_px(well_size_mm)) + + # Load a default font + font_size = 30 + font = ImageFont.load_default().font_variant(size=font_size) + + # Add column labels + for col in range(cols): + label = str(col + 1) + x = mm_to_px(a1_x_mm + col * well_spacing_mm) + y = mm_to_px((a1_y_mm - well_size_mm/2) / 2) + bbox = font.getbbox(label) + text_width = bbox[2] - bbox[0] + text_height = bbox[3] - bbox[1] + draw.text((x - text_width/2, y), label, fill="black", font=font) + + # Add row labels + for row in range(rows): + label = chr(65 + row) if row < 26 else chr(65 + row // 26 - 1) + chr(65 + row % 26) + x = mm_to_px((a1_x_mm - well_size_mm/2 ) / 2) + y = mm_to_px(a1_y_mm + row * well_spacing_mm) + bbox = font.getbbox(label) + text_height = bbox[3] - bbox[1] + text_width = bbox[2] - bbox[0] + draw.text((x + 20 - text_width/2, y - text_height + 1), label, fill="black", font=font) + + image_path = os.path.join('images', f'{name.replace(" ", "_")}.png') + image.save(image_path) + print(f"Wellplate image saved as {image_path}") + return image_path + + @staticmethod + def calculate_circle(points): + # Convert points to numpy array + points = np.array(points) + + # Calculate the center and radius of the circle + A = np.array([points[1] - points[0], points[2] - points[0]]) + b = np.sum(A * (points[1:3] + points[0]) / 2, axis=1) + center = np.linalg.solve(A, b) + + # Calculate the radius + radius = np.mean(np.linalg.norm(points - center, axis=1)) + + return center, radius + + def closeEvent(self, event): + # Stop live view if it wasn't initially on + if not self.was_live: + self.liveController.stop_live() + super().closeEvent(event) + + def accept(self): + # Stop live view if it wasn't initially on + if not self.was_live: + self.liveController.stop_live() + super().accept() + + def reject(self): + # This method is called when the dialog is closed without accepting + if not self.was_live: + self.liveController.stop_live() + sample = self.navigationViewer.sample + + # Convert sample string to format int + if 'glass slide' in sample: + sample_format = 'glass slide' + else: + try: + sample_format = int(sample.split()[0]) + except (ValueError, IndexError): + print(f"Unable to parse sample format from '{sample}'. Defaulting to 0.") + sample_format = 'glass slide' + + # Set dropdown to the current sample format + index = self.wellplateFormatWidget.comboBox.findData(sample_format) + if index >= 0: + self.wellplateFormatWidget.comboBox.setCurrentIndex(index) + + # Update wellplate settings + self.wellplateFormatWidget.setWellplateSettings(sample_format) + + super().reject() + + +class CalibrationLiveViewer(QWidget): + + signal_calibration_viewer_click = Signal(int, int, int, int) + signal_mouse_moved = Signal(int, int) + + def __init__(self, parent=None): + super().__init__() + self.parent = parent + self.initial_zoom_set = False + self.initUI() + + def initUI(self): + layout = QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + + self.view = pg.GraphicsLayoutWidget() + self.viewbox = self.view.addViewBox() + self.viewbox.setAspectLocked(True) + self.viewbox.invertY(True) + + # Set appropriate panning limits based on the acquisition image or plate size + xmax = int(Acquisition.CROP_WIDTH * Acquisition.IMAGE_DISPLAY_SCALING_FACTOR) + ymax = int(Acquisition.CROP_HEIGHT * Acquisition.IMAGE_DISPLAY_SCALING_FACTOR) + self.viewbox.setLimits(xMin=0, xMax=xmax, yMin=0, yMax=ymax) + + self.img_item = pg.ImageItem() + self.viewbox.addItem(self.img_item) + + # Add fixed crosshair + pen = QPen(QColor(255, 0, 0)) # Red color + pen.setWidth(4) + + self.crosshair_h = pg.InfiniteLine(angle=0, movable=False, pen=pen) + self.crosshair_v = pg.InfiniteLine(angle=90, movable=False, pen=pen) + self.viewbox.addItem(self.crosshair_h) + self.viewbox.addItem(self.crosshair_v) + + layout.addWidget(self.view) + + # Connect double-click event + self.view.scene().sigMouseClicked.connect(self.onMouseClicked) + + # Set fixed size for the viewer + self.setFixedSize(500, 500) + + # Initialize with a blank image + self.display_image(np.zeros((xmax, ymax))) + + def setCrosshairPosition(self): + center = self.viewbox.viewRect().center() + self.crosshair_h.setPos(center.y()) + self.crosshair_v.setPos(center.x()) + + def display_image(self, image): + # Step 1: Update the image + self.img_item.setImage(image) + + # Step 2: Get the image dimensions + image_width = image.shape[1] + image_height = image.shape[0] + + # Step 3: Calculate the center of the image + image_center_x = image_width / 2 + image_center_y = image_height / 2 + + # Step 4: Calculate the current view range + current_view_range = self.viewbox.viewRect() + + # Step 5: If it's the first image or initial zoom hasn't been set, center the image + if not self.initial_zoom_set: + self.viewbox.setRange(xRange=(0, image_width), yRange=(0, image_height), padding=0) + self.initial_zoom_set = True # Mark initial zoom as set + + # Step 6: Always center the view around the image center (for seamless transitions) + else: + self.viewbox.setRange( + xRange=(image_center_x - current_view_range.width() / 2, + image_center_x + current_view_range.width() / 2), + yRange=(image_center_y - current_view_range.height() / 2, + image_center_y + current_view_range.height() / 2), + padding=0 + ) + + # Step 7: Ensure the crosshair is updated + self.setCrosshairPosition() + + # def mouseMoveEvent(self, event): + # self.signal_mouse_moved.emit(event.x(), event.y()) + + def onMouseClicked(self, event): + # Map the scene position to view position + if event.double(): # double click to move + pos = event.pos() + scene_pos = self.viewbox.mapSceneToView(pos) + + # Get the x, y coordinates + x, y = int(scene_pos.x()), int(scene_pos.y()) + # Ensure the coordinates are within the image boundaries + image_shape = self.img_item.image.shape + if 0 <= x < image_shape[1] and 0 <= y < image_shape[0]: + # Adjust the coordinates to be relative to the center of the image + x_centered = x - image_shape[1] // 2 + y_centered = y - image_shape[0] // 2 + # Emit the signal with the clicked coordinates and image size + self.signal_calibration_viewer_click.emit(x_centered, y_centered, image_shape[1], image_shape[0]) + else: + print("click was outside the image bounds.") + else: + print("single click only detected") + + def wheelEvent(self, event): + if event.angleDelta().y() > 0: + scale_factor = 0.9 + else: + scale_factor = 1.1 + + # Get the center of the viewbox + center = self.viewbox.viewRect().center() + + # Scale the view + self.viewbox.scaleBy((scale_factor, scale_factor), center) + + # Update crosshair position after scaling + self.setCrosshairPosition() + + event.accept() + + def resizeEvent(self, event): + super().resizeEvent(event) + self.setCrosshairPosition() + + +class Joystick(QWidget): + joystickMoved = Signal(float, float) # Emits x and y values between -1 and 1 + + def __init__(self, parent=None): + super().__init__(parent) + self.setFixedSize(200, 200) + self.inner_radius = 40 + self.max_distance = self.width() // 2 - self.inner_radius + self.outer_radius = int(self.width() * 3 / 8) + self.current_x = 0 + self.current_y = 0 + self.is_pressed = False + self.timer = QTimer(self) + + def paintEvent(self, event): + painter = QPainter(self) + painter.setRenderHint(QPainter.Antialiasing) + + # Calculate the painting area + paint_rect = QRectF(0, 0, 200, 200) + + # Draw outer circle + painter.setBrush(QColor(230, 230, 230)) # Light grey fill + painter.setPen(QPen(QColor(100, 100, 100), 2)) # Dark grey outline + painter.drawEllipse(paint_rect.center(), self.outer_radius, self.outer_radius) + + # Draw inner circle (joystick position) + painter.setBrush(QColor(100, 100, 100)) + painter.setPen(Qt.NoPen) + joystick_x = paint_rect.center().x() + self.current_x * self.max_distance + joystick_y = paint_rect.center().y() + self.current_y * self.max_distance + painter.drawEllipse(QPointF(joystick_x, joystick_y), self.inner_radius, self.inner_radius) + + def mousePressEvent(self, event): + if QRectF(0, 0, 200, 200).contains(event.pos()): + self.is_pressed = True + self.updateJoystickPosition(event.pos()) + self.timer.timeout.connect(self.update_position) + self.timer.start(10) + + def mouseMoveEvent(self, event): + if self.is_pressed and QRectF(0, 0, 200, 200).contains(event.pos()): + self.updateJoystickPosition(event.pos()) + + def mouseReleaseEvent(self, event): + self.is_pressed = False + self.updateJoystickPosition(QPointF(100, 100)) # Center position + self.timer.timeout.disconnect(self.update_position) + self.joystickMoved.emit(0, 0) + + def update_position(self): + if self.is_pressed: + self.joystickMoved.emit(self.current_x, -self.current_y) + + def updateJoystickPosition(self, pos): + center = QPointF(100, 100) + dx = pos.x() - center.x() + dy = pos.y() - center.y() + distance = math.sqrt(dx**2 + dy**2) + + if distance > self.max_distance: + dx = dx * self.max_distance / distance + dy = dy * self.max_distance / distance + + self.current_x = dx / self.max_distance + self.current_y = dy / self.max_distance + self.update() + + class WellSelectionWidget(QTableWidget): - signal_wellSelected = Signal(int,int,float) - signal_wellSelectedPos = Signal(float,float) + signal_wellSelected = Signal(bool) + signal_wellSelectedPos = Signal(float, float) - def __init__(self, format_, *args): - - if format_ == 6: - self.rows = 2 - self.columns = 3 - self.spacing_mm = 39.2 - elif format_ == 12: - self.rows = 3 - self.columns = 4 - self.spacing_mm = 26 - elif format_ == 24: - self.rows = 4 - self.columns = 6 - self.spacing_mm = 18 - elif format_ == 96: - self.rows = 8 - self.columns = 12 - self.spacing_mm = 9 - elif format_ == 384: - self.rows = 16 - self.columns = 24 - self.spacing_mm = 4.5 - elif format_ == 1536: - self.rows = 32 - self.columns = 48 - self.spacing_mm = 2.25 + def __init__(self, format_, wellplateFormatWidget, *args, **kwargs): + super(WellSelectionWidget, self).__init__(*args, **kwargs) + self.wellplateFormatWidget = wellplateFormatWidget + self.wellplateFormatWidget.signalWellplateSettings.connect(self.updateWellplateSettings) + self.cellDoubleClicked.connect(self.onDoubleClick) + self.itemSelectionChanged.connect(self.onSelectionChanged) + self.fixed_height = 400 + self.setFormat(format_) + def setFormat(self, format_): self.format = format_ - - QTableWidget.__init__(self, self.rows, self.columns, *args) + settings = self.getWellplateSettings(self.format) + self.rows = settings['rows'] + self.columns = settings['cols'] + self.spacing_mm = settings['well_spacing_mm'] + self.number_of_skip = settings['number_of_skip'] + self.a1_x_mm = settings['a1_x_mm'] + self.a1_y_mm = settings['a1_y_mm'] + self.a1_x_pixel = settings['a1_x_pixel'] + self.a1_y_pixel = settings['a1_y_pixel'] + self.well_size_mm = settings['well_size_mm'] + + self.setRowCount(self.rows) + self.setColumnCount(self.columns) + self.initUI() self.setData() - self.resizeColumnsToContents() - self.resizeRowsToContents() + + def initUI(self): + # Disable editing, scrollbars, and other interactions self.setEditTriggers(QTableWidget.NoEditTriggers) - self.cellDoubleClicked.connect(self.onDoubleClick) - self.cellClicked.connect(self.onSingleClick) + self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) + self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) + self.verticalScrollBar().setDisabled(True) + self.horizontalScrollBar().setDisabled(True) + self.setFocusPolicy(Qt.NoFocus) + self.setTabKeyNavigation(False) + self.setDragEnabled(False) + self.setAcceptDrops(False) + self.setDragDropOverwriteMode(False) + self.setMouseTracking(False) + + if self.format == '1536 well plate': + font = QFont() + font.setPointSize(6) # You can adjust this value as needed + else: + font = QFont() + self.horizontalHeader().setFont(font) + self.verticalHeader().setFont(font) + + self.setLayout() + + def setLayout(self): + # Calculate available space and cell size + header_height = self.horizontalHeader().height() + available_height = self.fixed_height - header_height # Fixed height of 408 pixels + + # Calculate cell size based on the minimum of available height and width + cell_size = available_height // self.rowCount() - # size self.verticalHeader().setSectionResizeMode(QHeaderView.Fixed) - self.verticalHeader().setDefaultSectionSize(int(5*self.spacing_mm)) + self.verticalHeader().setDefaultSectionSize(cell_size) self.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed) - self.horizontalHeader().setMinimumSectionSize(int(5*self.spacing_mm)) + self.horizontalHeader().setDefaultSectionSize(cell_size) - self.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum) - self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) - self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) - self.resizeColumnsToContents() - self.setFixedSize(self.horizontalHeader().length() + - self.verticalHeader().width(), - self.verticalHeader().length() + - self.horizontalHeader().height()) - - def setData(self): - ''' - # cells - for i in range(16): - for j in range(24): - newitem = QTableWidgetItem( chr(ord('A')+i) + str(j) ) - self.setItem(i, j, newitem) - ''' - # row header + # Ensure sections do not resize + self.verticalHeader().setMinimumSectionSize(cell_size) + self.verticalHeader().setMaximumSectionSize(cell_size) + self.horizontalHeader().setMinimumSectionSize(cell_size) + self.horizontalHeader().setMaximumSectionSize(cell_size) + + row_header_width = self.verticalHeader().width() + + # Calculate total width and height + total_height = (self.rowCount() * cell_size) + header_height + total_width = (self.columnCount() * cell_size) + row_header_width + + # Set the widget's fixed size + self.setFixedHeight(total_height) + self.setFixedWidth(total_width) + + # Force the widget to update its layout + self.updateGeometry() + self.viewport().update() + + def getWellplateSettings(self, wellplate_format): + return self.wellplateFormatWidget.getWellplateSettings(wellplate_format) + + def updateWellplateSettings(self, format_, a1_x_mm, a1_y_mm, a1_x_pixel, a1_y_pixel, well_size_mm, well_spacing_mm, number_of_skip, rows, cols): + if isinstance(format_, QVariant): + format_ = format_.value() + self.setFormat(format_) + + def setData(self): + for i in range(self.rowCount()): + for j in range(self.columnCount()): + item = self.item(i, j) + if not item: # Create a new item if none exists + item = QTableWidgetItem() + self.setItem(i, j, item) + # Reset to selectable by default + item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) + + if self.number_of_skip > 0 and self.format != 0: + for i in range(self.number_of_skip): + for j in range(self.columns): # Apply to rows + self.item(i, j).setFlags(self.item(i, j).flags() & ~Qt.ItemIsSelectable) + self.item(self.rows - 1 - i, j).setFlags(self.item(self.rows - 1 - i, j).flags() & ~Qt.ItemIsSelectable) + for k in range(self.rows): # Apply to columns + self.item(k, i).setFlags(self.item(k, i).flags() & ~Qt.ItemIsSelectable) + self.item(k, self.columns - 1 - i).setFlags(self.item(k, self.columns - 1 - i).flags() & ~Qt.ItemIsSelectable) + + # Update row headers row_headers = [] - for i in range(16): - row_headers.append(chr(ord('A')+i)) + for i in range(self.rows): + if i < 26: + label = chr(ord('A') + i) + else: + first_letter = chr(ord('A') + (i // 26) - 1) + second_letter = chr(ord('A') + (i % 26)) + label = first_letter + second_letter + row_headers.append(label) self.setVerticalHeaderLabels(row_headers) - # make the outer cells not selectable if using 96 and 384 well plates - if self.format == 384: - for i in range(self.rows): - item = QTableWidgetItem() - item.setFlags(item.flags() & ~Qt.ItemIsSelectable) - self.setItem(i,0,item) - item = QTableWidgetItem() - item.setFlags(item.flags() & ~Qt.ItemIsSelectable) - self.setItem(i,self.columns-1,item) - for j in range(self.columns): - item = QTableWidgetItem() - item.setFlags(item.flags() & ~Qt.ItemIsSelectable) - self.setItem(0,j,item) - item = QTableWidgetItem() - item.setFlags(item.flags() & ~Qt.ItemIsSelectable) - self.setItem(self.rows-1,j,item) - elif self.format == 96: - if NUMBER_OF_SKIP == 1: - for i in range(self.rows): - item = QTableWidgetItem() - item.setFlags(item.flags() & ~Qt.ItemIsSelectable) - self.setItem(i,0,item) - item = QTableWidgetItem() - item.setFlags(item.flags() & ~Qt.ItemIsSelectable) - self.setItem(i,self.columns-1,item) - for j in range(self.columns): - item = QTableWidgetItem() - item.setFlags(item.flags() & ~Qt.ItemIsSelectable) - self.setItem(0,j,item) - item = QTableWidgetItem() - item.setFlags(item.flags() & ~Qt.ItemIsSelectable) - self.setItem(self.rows-1,j,item) + # Adjust vertical header width after setting labels + self.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents) - def onDoubleClick(self,row,col): - if (row >= 0 + NUMBER_OF_SKIP and row <= self.rows-1-NUMBER_OF_SKIP ) and ( col >= 0 + NUMBER_OF_SKIP and col <= self.columns-1-NUMBER_OF_SKIP ): - x_mm = X_MM_384_WELLPLATE_UPPERLEFT + WELL_SIZE_MM_384_WELLPLATE/2 - (A1_X_MM_384_WELLPLATE+WELL_SPACING_MM_384_WELLPLATE*NUMBER_OF_SKIP_384) + col*WELL_SPACING_MM + A1_X_MM + WELLPLATE_OFFSET_X_mm - y_mm = Y_MM_384_WELLPLATE_UPPERLEFT + WELL_SIZE_MM_384_WELLPLATE/2 - (A1_Y_MM_384_WELLPLATE+WELL_SPACING_MM_384_WELLPLATE*NUMBER_OF_SKIP_384) + row*WELL_SPACING_MM + A1_Y_MM + WELLPLATE_OFFSET_Y_mm + def onDoubleClick(self, row, col): + print("double click well", row, col) + if (row >= 0 + self.number_of_skip and row <= self.rows-1-self.number_of_skip ) and ( col >= 0 + self.number_of_skip and col <= self.columns-1-self.number_of_skip ): + x_mm = col*self.spacing_mm + self.a1_x_mm + WELLPLATE_OFFSET_X_mm + y_mm = row*self.spacing_mm + self.a1_y_mm + WELLPLATE_OFFSET_Y_mm self.signal_wellSelectedPos.emit(x_mm,y_mm) - # print('(' + str(row) + ',' + str(col) + ') doubleclicked') + print("well location:", (x_mm,y_mm)) + self.signal_wellSelected.emit(True) + else: + self.signal_wellSelected.emit(False) def onSingleClick(self,row,col): - # self.get_selected_cells() - pass + print("single click well", row, col) + if (row >= 0 + self.number_of_skip and row <= self.rows-1-self.number_of_skip ) and ( col >= 0 + self.number_of_skip and col <= self.columns-1-self.number_of_skip ): + self.signal_wellSelected.emit(True) + else: + self.signal_wellSelected.emit(False) + + def onSelectionChanged(self): + selected_cells = self.get_selected_cells() + self.signal_wellSelected.emit(bool(selected_cells)) def get_selected_cells(self): list_of_selected_cells = [] + print("getting selected cells...") + if self.format == 'glass slide': + return list_of_selected_cells for index in self.selectedIndexes(): - list_of_selected_cells.append((index.row(),index.column())) + row, col = index.row(), index.column() + # Check if the cell is within the allowed bounds + if (row >= 0 + self.number_of_skip and row <= self.rows - 1 - self.number_of_skip) and \ + (col >= 0 + self.number_of_skip and col <= self.columns - 1 - self.number_of_skip): + list_of_selected_cells.append((row, col)) + if list_of_selected_cells: + print("cells:", list_of_selected_cells) + else: + print("no cells") + return list_of_selected_cells + + def resizeEvent(self, event): + self.initUI() + super().resizeEvent(event) + + def wheelEvent(self, event): + # Ignore wheel events to prevent scrolling + event.ignore() + + def scrollTo(self, index, hint=QAbstractItemView.EnsureVisible): + pass + + def set_white_boundaries_style(self): + style = """ + QTableWidget { + gridline-color: white; + border: 1px solid white; + } + QHeaderView::section { + color: white; + } + """ + # QTableWidget::item { + # border: 1px solid white; + # } + self.setStyleSheet(style) + + +class Well1536SelectionWidget(QWidget): + + signal_wellSelected = Signal(bool) + signal_wellSelectedPos = Signal(float,float) + + def __init__(self): + super().__init__() + self.format = '1536 well plate' + self.selected_cells = {} # Dictionary to keep track of selected cells and their colors + self.current_cell = None # To track the current (green) cell + self.rows = 32 + self.columns = 48 + self.spacing_mm = 2.25 + self.number_of_skip = 0 + self.well_size_mm = 1.5 + self.a1_x_mm = 11.0 # measured stage position - to update + self.a1_y_mm = 7.86 # measured stage position - to update + self.a1_x_pixel = 144 # coordinate on the png - to update + self.a1_y_pixel = 108 # coordinate on the png - to update + self.initUI() + + def initUI(self): + self.setWindowTitle('1536 Well Plate') + self.setGeometry(100, 100, 750, 400) # Increased width to accommodate controls + + self.a = 11 + image_width = 48 * self.a + image_height = 32 * self.a + + self.image = QPixmap(image_width, image_height) + self.image.fill(QColor('white')) + self.label = QLabel() + self.label.setPixmap(self.image) + self.label.setFixedSize(image_width, image_height) + self.label.setAlignment(Qt.AlignCenter) + + self.cell_input = QLineEdit(self) + self.cell_input.setPlaceholderText("e.g. AE12 or B4") + go_button = QPushButton('Go to well', self) + go_button.clicked.connect(self.go_to_cell) + self.selection_input = QLineEdit(self) + self.selection_input.setPlaceholderText("e.g. A1:E48, X1, AC24, Z2:AF6, ...") + self.selection_input.editingFinished.connect(self.select_cells) + + # Create navigation buttons + up_button = QPushButton('↑', self) + left_button = QPushButton('←', self) + right_button = QPushButton('→', self) + down_button = QPushButton('↓', self) + add_button = QPushButton('Select', self) + + # Connect navigation buttons to their respective functions + up_button.clicked.connect(self.move_up) + left_button.clicked.connect(self.move_left) + right_button.clicked.connect(self.move_right) + down_button.clicked.connect(self.move_down) + add_button.clicked.connect(self.add_current_well) + + layout = QHBoxLayout() + layout.addWidget(self.label) + + layout_controls = QVBoxLayout() + layout_controls.addStretch(2) + + # Add navigation buttons in a + sign layout + layout_move = QGridLayout() + layout_move.addWidget(up_button, 0, 2) + layout_move.addWidget(left_button, 1, 1) + layout_move.addWidget(add_button, 1, 2) + layout_move.addWidget(right_button, 1, 3) + layout_move.addWidget(down_button, 2, 2) + layout_move.setColumnStretch(0, 1) + layout_move.setColumnStretch(4, 1) + layout_controls.addLayout(layout_move) + + layout_controls.addStretch(1) + + layout_input = QGridLayout() + layout_input.addWidget(QLabel("Well Navigation"), 0, 0) + layout_input.addWidget(self.cell_input, 0, 1) + layout_input.addWidget(go_button, 0, 2) + layout_input.addWidget(QLabel("Well Selection"), 1, 0) + layout_input.addWidget(self.selection_input, 1, 1, 1, 2) + layout_controls.addLayout(layout_input) + + control_widget = QWidget() + control_widget.setLayout(layout_controls) + control_widget.setFixedHeight(image_height) # Set the height of controls to match the image + + layout.addWidget(control_widget) + self.setLayout(layout) + + def move_up(self): + if self.current_cell: + row, col = self.current_cell + if row > 0: + self.current_cell = (row - 1, col) + self.update_current_cell() + + def move_left(self): + if self.current_cell: + row, col = self.current_cell + if col > 0: + self.current_cell = (row, col - 1) + self.update_current_cell() + + def move_right(self): + if self.current_cell: + row, col = self.current_cell + if col < self.columns - 1: + self.current_cell = (row, col + 1) + self.update_current_cell() + + def move_down(self): + if self.current_cell: + row, col = self.current_cell + if row < self.rows - 1: + self.current_cell = (row + 1, col) + self.update_current_cell() + + def add_current_well(self): + if self.current_cell: + row, col = self.current_cell + cell_name = f"{chr(65 + row)}{col + 1}" + + if (row, col) in self.selected_cells: + # If the well is already selected, remove it + del self.selected_cells[(row, col)] + self.remove_well_from_selection_input(cell_name) + print(f"Removed well {cell_name}") + else: + # If the well is not selected, add it + self.selected_cells[(row, col)] = '#1f77b4' # Add to selected cells with blue color + self.add_well_to_selection_input(cell_name) + print(f"Added well {cell_name}") + + self.redraw_wells() + self.signal_wellSelected.emit(bool(self.selected_cells)) + + def add_well_to_selection_input(self, cell_name): + current_selection = self.selection_input.text() + if current_selection: + self.selection_input.setText(f"{current_selection}, {cell_name}") + else: + self.selection_input.setText(cell_name) + + def remove_well_from_selection_input(self, cell_name): + current_selection = self.selection_input.text() + cells = [cell.strip() for cell in current_selection.split(',')] + if cell_name in cells: + cells.remove(cell_name) + self.selection_input.setText(', '.join(cells)) + + def update_current_cell(self): + self.redraw_wells() + row, col = self.current_cell + if row < 26: + row_label = chr(65 + row) + else: + row_label = chr(64 + (row // 26)) + chr(65 + (row % 26)) + # Update cell_input with the correct label (e.g., A1, B2, AA1, etc.) + self.cell_input.setText(f"{row_label}{col + 1}") + + x_mm = col * self.spacing_mm + self.a1_x_mm + WELLPLATE_OFFSET_X_mm + y_mm = row * self.spacing_mm + self.a1_y_mm + WELLPLATE_OFFSET_Y_mm + self.signal_wellSelectedPos.emit(x_mm, y_mm) + + def redraw_wells(self): + self.image.fill(QColor('white')) # Clear the pixmap first + painter = QPainter(self.image) + painter.setPen(QColor('white')) + # Draw selected cells in red + for (row, col), color in self.selected_cells.items(): + painter.setBrush(QColor(color)) + painter.drawRect(col * self.a, row * self.a, self.a, self.a) + # Draw current cell in green + if self.current_cell: + painter.setBrush(Qt.NoBrush) # No fill + painter.setPen(QPen(QColor('red'), 2)) # Red outline, 2 pixels wide + row, col = self.current_cell + painter.drawRect(col * self.a+2, row * self.a+2, self.a-3, self.a-3) + painter.end() + self.label.setPixmap(self.image) + + def go_to_cell(self): + cell_desc = self.cell_input.text().strip() + match = re.match(r'([A-Za-z]+)(\d+)', cell_desc) + if match: + row_part, col_part = match.groups() + row_index = self.row_to_index(row_part) + col_index = int(col_part) - 1 + self.current_cell = (row_index, col_index) # Update the current cell + self.redraw_wells() # Redraw with the new current cell + x_mm = col_index * self.spacing_mm + self.a1_x_mm + WELLPLATE_OFFSET_X_mm + y_mm = row_index * self.spacing_mm + self.a1_y_mm + WELLPLATE_OFFSET_Y_mm + self.signal_wellSelectedPos.emit(x_mm,y_mm) + + def select_cells(self): + # first clear selection + self.selected_cells = {} + + pattern = r'([A-Za-z]+)(\d+):?([A-Za-z]*)(\d*)' + cell_descriptions = self.selection_input.text().split(',') + for desc in cell_descriptions: + match = re.match(pattern, desc.strip()) + if match: + start_row, start_col, end_row, end_col = match.groups() + start_row_index = self.row_to_index(start_row) + start_col_index = int(start_col) - 1 + + if end_row and end_col: # It's a range + end_row_index = self.row_to_index(end_row) + end_col_index = int(end_col) - 1 + for row in range(min(start_row_index, end_row_index), max(start_row_index, end_row_index) + 1): + for col in range(min(start_col_index, end_col_index), max(start_col_index, end_col_index) + 1): + self.selected_cells[(row, col)] = '#1f77b4' + else: # It's a single cell + self.selected_cells[(start_row_index, start_col_index)] = '#1f77b4' + self.redraw_wells() + if self.selected_cells: + self.signal_wellSelected.emit(True) + + def row_to_index(self, row): + index = 0 + for char in row: + index = index * 26 + (ord(char.upper()) - ord('A') + 1) + return index - 1 + + def onSelectionChanged(self): + selected_cells = self.get_selected_cells() + + def get_selected_cells(self): + list_of_selected_cells = list(self.selected_cells.keys()) return(list_of_selected_cells) + + +class LedMatrixSettingsDialog(QDialog): + def __init__(self,led_array): + self.led_array = led_array + super().__init__() + self.setWindowTitle("LED Matrix Settings") + + self.layout = QVBoxLayout() + + # Add QDoubleSpinBox for LED intensity (0-1) + self.NA_spinbox = QDoubleSpinBox() + self.NA_spinbox.setRange(0, 1) + self.NA_spinbox.setSingleStep(0.01) + self.NA_spinbox.setValue(self.led_array.NA) + + NA_layout = QHBoxLayout() + NA_layout.addWidget(QLabel("NA")) + NA_layout.addWidget(self.NA_spinbox) + + self.layout.addLayout(NA_layout) + self.setLayout(self.layout) + + # add ok/cancel buttons + self.button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) + self.button_box.accepted.connect(self.accept) + self.button_box.rejected.connect(self.reject) + self.layout.addWidget(self.button_box) + + self.button_box.accepted.connect(self.update_NA) + + def update_NA(self): + self.led_array.set_NA(self.NA_spinbox.value()) + + +class SampleSettingsWidget(QFrame): + def __init__(self, ObjectivesWidget, WellplateFormatWidget, *args, **kwargs): + super().__init__(*args, **kwargs) + self.objectivesWidget = ObjectivesWidget + self.wellplateFormatWidget = WellplateFormatWidget + + # Set up the layout + top_row_layout = QGridLayout() + top_row_layout.setSpacing(2) + top_row_layout.setContentsMargins(0, 2, 0, 2) + top_row_layout.addWidget(self.objectivesWidget, 0, 0) + top_row_layout.addWidget(self.wellplateFormatWidget, 0, 1) + self.setLayout(top_row_layout) + self.setFrameStyle(QFrame.Panel | QFrame.Raised) + + # Connect signals for saving settings + self.objectivesWidget.signal_objective_changed.connect(self.save_settings) + self.wellplateFormatWidget.signalWellplateSettings.connect(lambda *args: self.save_settings()) + + def save_settings(self): + """Save current objective and wellplate format to cache""" + os.makedirs('cache', exist_ok=True) + data = { + 'objective': self.objectivesWidget.dropdown.currentText(), + 'wellplate_format': self.wellplateFormatWidget.wellplate_format + } + + with open('cache/objective_and_sample_format.txt', 'w') as f: + json.dump(data, f) + diff --git a/software/drivers and libraries/hamamatsu/install_hamamatsu.sh b/software/drivers and libraries/hamamatsu/install_hamamatsu.sh new file mode 100755 index 000000000..d69193995 --- /dev/null +++ b/software/drivers and libraries/hamamatsu/install_hamamatsu.sh @@ -0,0 +1,8 @@ +#!/bin/bash +wget https://www.hamamatsu.com/content/dam/hamamatsu-photonics/sites/static/sys/dcam-api-for-linux/tar-gz/DCAM-API_Lite_for_Linux_v24.4.6764.tar.gz +tar -xvf DCAM-API_Lite_for_Linux_v24.4.6764.tar.gz +ln -s ~/Desktop/octopi-research/software/drivers\ and\ libraries/hamamatsu/DCAM-API_Lite_for_Linux_v24.4.6764 /tmp/dcam_lib +cd /tmp/dcam_lib +./api/install.sh usb3 +cd ~/Desktop/octopi-research/software/drivers\ and\ libraries/hamamatsu +rm /tmp/dcam_lib diff --git a/software/drivers and libraries/ids/install_ids.sh b/software/drivers and libraries/ids/install_ids.sh new file mode 100644 index 000000000..40e8668cd --- /dev/null +++ b/software/drivers and libraries/ids/install_ids.sh @@ -0,0 +1,28 @@ +#!/bin/bash +wget https://en.ids-imaging.com/files/downloads/ids-peak/software/linux-desktop/ids-peak_2.11.0.0-178_amd64.tgz +tar -xvzf ids-peak_2.11.0.0-178_amd64.tgz +sudo sh ids-peak_2.11.0.0-178_amd64/local/scripts/ids_install_udev_rule.sh +python3 -m pip install ids_peak_ipl +python3 -m pip install ids_peak +python3 -m pip install ids_peak_afl + +# Define the content to add to .bashrc +content=" +# IDS camera library paths +export LD_LIBRARY_PATH=\"$HOME/Desktop/octopi-research/software/drivers and libraries/ids/ids-peak_2.11.0.0-178_amd64/lib:\$LD_LIBRARY_PATH\" +export GENICAM_GENTL32_PATH=\"\$GENICAM_GENTL32_PATH:$HOME/Desktop/octopi-research/software/drivers and libraries/ids/ids-peak_2.11.0.0-178_amd64/lib/ids/cti\" +export GENICAM_GENTL64_PATH=\"\$GENICAM_GENTL64_PATH:$HOME/Desktop/octopi-research/software/drivers and libraries/ids/ids-peak_2.11.0.0-178_amd64/lib/ids/cti\" +" + +# Append the content to .bashrc if it's not already there +if ! grep -qF "IDS camera library paths" ~/.bashrc; then + echo "$content" >> ~/.bashrc + echo "iDS peak library paths added to .bashrc" +else + echo "iDS peak library paths already exist in .bashrc" +fi + +# Source .bashrc to apply changes immediately +source ~/.bashrc + +echo "Finished installation for iDS camera." diff --git a/software/drivers and libraries/toupcam/linux/arm64/libtoupcam.so b/software/drivers and libraries/toupcam/linux/arm64/libtoupcam.so deleted file mode 100644 index c73dd77ce..000000000 Binary files a/software/drivers and libraries/toupcam/linux/arm64/libtoupcam.so and /dev/null differ diff --git a/software/drivers and libraries/toupcam/linux/armel/libtoupcam.so b/software/drivers and libraries/toupcam/linux/armel/libtoupcam.so deleted file mode 100644 index 3a416f1e0..000000000 Binary files a/software/drivers and libraries/toupcam/linux/armel/libtoupcam.so and /dev/null differ diff --git a/software/drivers and libraries/toupcam/linux/armhf/libtoupcam.so b/software/drivers and libraries/toupcam/linux/armhf/libtoupcam.so deleted file mode 100644 index 22e3d634c..000000000 Binary files a/software/drivers and libraries/toupcam/linux/armhf/libtoupcam.so and /dev/null differ diff --git a/software/drivers and libraries/toupcam/linux/x64/libtoupcam.so b/software/drivers and libraries/toupcam/linux/x64/libtoupcam.so index 24aa3ac69..32ee53589 100644 Binary files a/software/drivers and libraries/toupcam/linux/x64/libtoupcam.so and b/software/drivers and libraries/toupcam/linux/x64/libtoupcam.so differ diff --git a/software/drivers and libraries/toupcam/linux/x86/libtoupcam.so b/software/drivers and libraries/toupcam/linux/x86/libtoupcam.so index 7e8e6422d..09c7f9d8a 100644 Binary files a/software/drivers and libraries/toupcam/linux/x86/libtoupcam.so and b/software/drivers and libraries/toupcam/linux/x86/libtoupcam.so differ diff --git a/software/drivers and libraries/toupcam/mac/libtoupcam.dylib b/software/drivers and libraries/toupcam/mac/libtoupcam.dylib deleted file mode 100644 index 5f45a6531..000000000 Binary files a/software/drivers and libraries/toupcam/mac/libtoupcam.dylib and /dev/null differ diff --git a/software/drivers and libraries/toupcam/windows/toupcam.dll b/software/drivers and libraries/toupcam/windows/toupcam.dll new file mode 100644 index 000000000..4b6bdb57b Binary files /dev/null and b/software/drivers and libraries/toupcam/windows/toupcam.dll differ diff --git a/software/drivers and libraries/toupcam/windows/toupcam.lib b/software/drivers and libraries/toupcam/windows/toupcam.lib new file mode 100644 index 000000000..e13298ea6 Binary files /dev/null and b/software/drivers and libraries/toupcam/windows/toupcam.lib differ diff --git a/software/drivers and libraries/tucsen/install_tucsen.sh b/software/drivers and libraries/tucsen/install_tucsen.sh new file mode 100644 index 000000000..19ba10b95 --- /dev/null +++ b/software/drivers and libraries/tucsen/install_tucsen.sh @@ -0,0 +1,24 @@ +#!/bin/bash +sudo apt-get install libudev1 +sudo apt-get install libudev-dev +chmod 777 sdk/install.sh +cd sdk + +folder="/etc/tucam/" + +if [ ! -d "$folder" ]; then + mkdir "$folder" +fi + +# copy the tucsen usb camera config file +cp tuusb.conf /etc/tucam +cp 50-tuusb.rules /etc/udev/rules.d + +# copy the tucsen camera libraries +cp libphxapi-x86_64.so /usr/lib +cp libTUCam.so /usr/lib +cp libTUCam.so.1 /usr/lib +cp libTUCam.so.1.0 /usr/lib +cp libTUCam.so.1.0.0 /usr/lib + +exit diff --git a/software/drivers and libraries/tucsen/sdk/50-tuusb.rules b/software/drivers and libraries/tucsen/sdk/50-tuusb.rules new file mode 100644 index 000000000..de9117011 --- /dev/null +++ b/software/drivers and libraries/tucsen/sdk/50-tuusb.rules @@ -0,0 +1,110 @@ +# Dhyana 90 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="6401", GROUP="users", MODE="0666" + +# Dhyana 400A +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="6402", GROUP="users", MODE="0666" + +# Dhyana 400A +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="6409", GROUP="users", MODE="0666" + +# Dhyana 400D +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="6404", GROUP="users", MODE="0666" + +# Dhyana 400D +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e403", GROUP="users", MODE="0666" + +# Dhyana 400D +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e404", GROUP="users", MODE="0666" + +# Dhyana 400D +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e405", GROUP="users", MODE="0666" + +# Dhyana 401D +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="6406", GROUP="users", MODE="0666" + +# Dhyana 401DS +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e002", GROUP="users", MODE="0666" + +# Dhyana D95 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="6405", GROUP="users", MODE="0666" + +# Dhyana D95 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e406", GROUP="users", MODE="0666" + +# Dhyana D95S +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e001", GROUP="users", MODE="0666" + +# Dhyana 200D +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="6408", GROUP="users", MODE="0666" + +# Dhyana 201DS +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e003", GROUP="users", MODE="0666" + +# Dhyana D95 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e004", GROUP="users", MODE="0666" + +# Dhyana 400DC +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="6804", GROUP="users", MODE="0666" + +# Dhyana 400DC +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="ec03", GROUP="users", MODE="0666" + +# Dhyana 400BSI +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e406", GROUP="users", MODE="0666" + +# Dhyana 400BSI V2 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e408", GROUP="users", MODE="0666" + +# MIchrome 5 Pro +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="ec07", GROUP="users", MODE="0666" + +# MIchrome 6 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="ec09", GROUP="users", MODE="0666" + +# MIchrome 20 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="ec0b", GROUP="users", MODE="0666" + +# FL-20 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="ec0d", GROUP="users", MODE="0666" + +# MIchrome 5BW +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e407", GROUP="users", MODE="0666" + +# MIchrome 6BW +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e409", GROUP="users", MODE="0666" + +# FL-20BW +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e40d", GROUP="users", MODE="0666" + +# DigiRetina16 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="2a01", GROUP="users", MODE="0666" + +# ISH1000 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="a803", GROUP="users", MODE="0666" + +# ISH500 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="a804", GROUP="users", MODE="0666" + +# ISH300 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="a807", GROUP="users", MODE="0666" + +# ISH130 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="0547", ATTRS{idProduct}=="a008", GROUP="users", MODE="0666" + +# TrueChrome AF +SUBSYSTEMS=="usb", ATTRS{idVendor}=="eba1", ATTRS{idProduct}=="7590", GROUP="users", MODE="0666" + +# GT 2.0 +SUBSYSTEMS=="usb", ATTRS{idVendor}=="eba1", ATTRS{idProduct}=="7601", GROUP="users", MODE="0666" + +# FL 9BW +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e422", GROUP="users", MODE="0666" + +# FL 9BW LT +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e426", GROUP="users", MODE="0666" + +# FL 26BW +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e423", GROUP="users", MODE="0666" + +# FL 26BW +SUBSYSTEMS=="usb", ATTRS{idVendor}=="5453", ATTRS{idProduct}=="e423", GROUP="users", MODE="0666" diff --git a/software/drivers and libraries/tucsen/sdk/install.sh b/software/drivers and libraries/tucsen/sdk/install.sh new file mode 100755 index 000000000..7f051d349 --- /dev/null +++ b/software/drivers and libraries/tucsen/sdk/install.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +folder="/etc/tucam/" + +if [ ! -d "$folder" ]; then + mkdir "$folder" +fi + +# copy the tucsen usb camera config file +cp tuusb.conf /etc/tucam +cp 50-tuusb.rules /etc/udev/rules.d + +# copy the tucsen camera libraries +cp libphxapi-86_64.so /usr/lib +cp libTUCam.so /usr/lib +cp libTUCam.so.1 /usr/lib +cp libTUCam.so.1.0 /usr/lib +cp libTUCam.so.1.0.0 /usr/lib diff --git a/software/drivers and libraries/tucsen/sdk/libTUCam.so b/software/drivers and libraries/tucsen/sdk/libTUCam.so new file mode 120000 index 000000000..6c0f7a853 --- /dev/null +++ b/software/drivers and libraries/tucsen/sdk/libTUCam.so @@ -0,0 +1 @@ +libTUCam.so.1.0.0 \ No newline at end of file diff --git a/software/drivers and libraries/tucsen/sdk/libTUCam.so.1 b/software/drivers and libraries/tucsen/sdk/libTUCam.so.1 new file mode 120000 index 000000000..6c0f7a853 --- /dev/null +++ b/software/drivers and libraries/tucsen/sdk/libTUCam.so.1 @@ -0,0 +1 @@ +libTUCam.so.1.0.0 \ No newline at end of file diff --git a/software/drivers and libraries/tucsen/sdk/libTUCam.so.1.0 b/software/drivers and libraries/tucsen/sdk/libTUCam.so.1.0 new file mode 120000 index 000000000..6c0f7a853 --- /dev/null +++ b/software/drivers and libraries/tucsen/sdk/libTUCam.so.1.0 @@ -0,0 +1 @@ +libTUCam.so.1.0.0 \ No newline at end of file diff --git a/software/drivers and libraries/tucsen/sdk/libTUCam.so.1.0.0 b/software/drivers and libraries/tucsen/sdk/libTUCam.so.1.0.0 new file mode 100755 index 000000000..3fb5f51a3 Binary files /dev/null and b/software/drivers and libraries/tucsen/sdk/libTUCam.so.1.0.0 differ diff --git a/software/drivers and libraries/tucsen/sdk/libphxapi-x86_64.so b/software/drivers and libraries/tucsen/sdk/libphxapi-x86_64.so new file mode 100755 index 000000000..5d6d8188b Binary files /dev/null and b/software/drivers and libraries/tucsen/sdk/libphxapi-x86_64.so differ diff --git a/software/drivers and libraries/tucsen/sdk/tuusb.conf b/software/drivers and libraries/tucsen/sdk/tuusb.conf new file mode 100755 index 000000000..88fa6bd2a --- /dev/null +++ b/software/drivers and libraries/tucsen/sdk/tuusb.conf @@ -0,0 +1,70 @@ +# This is the main configuration file for TUCSEN camera devices +# All lines beginning with a # in the first column is treated as a comment +# Blank lines are ignored +# Copyright - Copyright (C) Xintu Photonics Co.,Ltd. 2011-2017 +# Date - June 28, 2017 +# License - GPL Version 2 +#--------------------------------------------------------------------------- + +# Location of logfile for the Cypress USB daemon process 'tuusbd' +# Ensure that you have write permission in that directory, if you are non-root. + +LogFile=tuusbd.log + +# Location of PID file for the Cypress USB daemon process 'tuusbd' +# Ensure that you have write permission in that directory, if you are non-root. + +PIDFile=tuusbd.pid + +# Vital Product Data : Vendor/Device IDs - one per line. +# Format - vendorID DeviceID FriendlyName (Max 30 chars or end of line) + + +5453 6401 Dhyana 90 +5453 6402 Dhyana 400A +5453 6409 Dhyana 400A +5453 6404 Dhyana 400D +5453 E403 Dhyana 400D +5453 E404 Dhyana 400D +5453 E405 Dhyana 400D +5453 6406 Dhyana 401D +5453 E002 Dhyana 401DS +5453 6405 Dhyana D95 +5453 E406 Dhyana D95 +5453 E001 Dhyana D95S +5453 6408 Dhyana 200D +5453 E003 Dhyana 201DS +5453 E004 Dhyana 201DS +5453 6804 Dhyana 400DC +5453 EC03 Dhyana 400DC +5453 E406 Dhyana 400BSI +5453 E408 Dhyana 400BSI V2 +5453 E40D FL-20BW +5453 E40E Dhyana 4040 +5453 E40F Dhyana 95V2 +5453 E005 Dhyana 401D +5453 E007 Dhyana 201D + +5453 E412 Dhyana 4040V2 +5453 E413 Dhyana 4040BSI +5453 E41B Dhyana XFXV4040BSI +5453 E419 Dhyana 400BSIV3 +5453 E41F Dhyana XFXV95 +5453 E420 Dhyana XFXV400BSI + +5453 2A01 DigiRetina16 + +5453 A807 ISH300 +5453 A804 ISH500 +5453 A803 ISH1000 + +5453 E422 FL 9BW +5453 E426 FL 9BWLT +5453 E423 FL 26BW +5453 E424 ARIES 16 LT +5453 E425 ARIES 16 + +0547 A008 ISH130 +0547 E004 ISH205M + + diff --git a/software/images/12 well plate_1509x1010.png b/software/images/12 well plate_1509x1010.png index f41a2d607..95c785019 100644 Binary files a/software/images/12 well plate_1509x1010.png and b/software/images/12 well plate_1509x1010.png differ diff --git a/software/images/1536 well plate_1509x1010.png b/software/images/1536 well plate_1509x1010.png new file mode 100644 index 000000000..6ec8ff3d4 Binary files /dev/null and b/software/images/1536 well plate_1509x1010.png differ diff --git a/software/images/24 well plate_1509x1010.png b/software/images/24 well plate_1509x1010.png new file mode 100644 index 000000000..6d90ac14d Binary files /dev/null and b/software/images/24 well plate_1509x1010.png differ diff --git a/software/images/4 slide carrier_1509x1010.png b/software/images/4 slide carrier_1509x1010.png new file mode 100644 index 000000000..fead47598 Binary files /dev/null and b/software/images/4 slide carrier_1509x1010.png differ diff --git a/software/images/6 well plate_1509x1010.png b/software/images/6 well plate_1509x1010.png new file mode 100644 index 000000000..75fb665dd Binary files /dev/null and b/software/images/6 well plate_1509x1010.png differ diff --git a/software/main.py b/software/main.py index 6215e37ea..e92ec2ace 100644 --- a/software/main.py +++ b/software/main.py @@ -1,30 +1,26 @@ # set QT_API environment variable import os +import sys import argparse os.environ["QT_API"] = "pyqt5" -import qtpy # qt libraries -from qtpy.QtCore import * from qtpy.QtWidgets import * -from qtpy.QtGui import * # app specific libraries import control.gui as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -parser = argparse.ArgumentParser() -parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action = 'store_true') -args = parser.parse_args() if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action="store_true") + parser.add_argument("--verbose", help="Turn on verbose (DEBUG) level logging.", action="store_true") + args = parser.parse_args() app = QApplication([]) app.setStyle('Fusion') - if(args.simulation): + if args.simulation: win = gui.OctopiGUI(is_simulation = True) else: win = gui.OctopiGUI() win.show() - app.exec_() #sys.exit(app.exec_()) + sys.exit(app.exec_()) diff --git a/software/main_2cameras_sync.py b/software/main_2cameras_sync.py deleted file mode 100644 index 553621528..000000000 --- a/software/main_2cameras_sync.py +++ /dev/null @@ -1,30 +0,0 @@ -# set QT_API environment variable -import os -import argparse -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_2cameras_sync as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -parser = argparse.ArgumentParser() -parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action = 'store_true') -args = parser.parse_args() - -if __name__ == "__main__": - - app = QApplication([]) - app.setStyle('Fusion') - if(args.simulation): - win = gui.OctopiGUI(is_simulation = True) - else: - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_6060.py b/software/main_6060.py deleted file mode 100644 index 37a2dfde6..000000000 --- a/software/main_6060.py +++ /dev/null @@ -1,63 +0,0 @@ -# set QT_API environment variable -import os -import glob -import argparse -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_6060 as gui - -from configparser import ConfigParser -from control.widgets import ConfigEditorBackwardsCompatible, ConfigEditorForAcquisitions - -from control._def import CACHED_CONFIG_FILE_PATH - -parser = argparse.ArgumentParser() -parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action = 'store_true') -args = parser.parse_args() - -def show_config(cfp, configpath, main_gui): - config_widget = ConfigEditorBackwardsCompatible(cfp, configpath, main_gui) - config_widget.exec_() - -def show_acq_config(cfm): - acq_config_widget = ConfigEditorForAcquisitions(cfm) - acq_config_widget.exec_() - -if __name__ == "__main__": - legacy_config = False - cf_editor_parser = ConfigParser() - config_files = glob.glob('.' + '/' + 'configuration*.ini') - if config_files: - cf_editor_parser.read(CACHED_CONFIG_FILE_PATH) - else: - print('configuration*.ini file not found, defaulting to legacy configuration') - legacy_config = True - app = QApplication([]) - app.setStyle('Fusion') - if(args.simulation): - win = gui.OctopiGUI(is_simulation = True) - else: - win = gui.OctopiGUI() - - acq_config_action = QAction("Acquisition Settings", win) - acq_config_action.triggered.connect(lambda : show_acq_config(win.configurationManager)) - - file_menu = QMenu("File", win) - file_menu.addAction(acq_config_action) - - if not legacy_config: - config_action = QAction("Microscope Settings", win) - config_action.triggered.connect(lambda : show_config(cf_editor_parser, config_files[0], win)) - file_menu.addAction(config_action) - - menu_bar = win.menuBar() - menu_bar.addMenu(file_menu) - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_PDAF_calibration.py b/software/main_PDAF_calibration.py deleted file mode 100644 index a9bd43ff7..000000000 --- a/software/main_PDAF_calibration.py +++ /dev/null @@ -1,19 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_PDAF_calibration as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI(is_simulation=True) - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_PDAF_demo.py b/software/main_PDAF_demo.py deleted file mode 100644 index cff8126ad..000000000 --- a/software/main_PDAF_demo.py +++ /dev/null @@ -1,19 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_PDAF_demo as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI(is_simulation=True) - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_camera_only.py b/software/main_camera_only.py deleted file mode 100644 index 550fb56ab..000000000 --- a/software/main_camera_only.py +++ /dev/null @@ -1,21 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_camera_only as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_displacement_measurement.py b/software/main_displacement_measurement.py deleted file mode 100644 index 65f91e77d..000000000 --- a/software/main_displacement_measurement.py +++ /dev/null @@ -1,28 +0,0 @@ -# set QT_API environment variable -import os -import argparse -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_displacement_measurement as gui - -parser = argparse.ArgumentParser() -parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action = 'store_true') -args = parser.parse_args() - -if __name__ == "__main__": - - app = QApplication([]) - app.setStyle('Fusion') - if(args.simulation): - win = gui.OctopiGUI(is_simulation = True) - else: - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_hcs.py b/software/main_hcs.py index 04381821d..3aed2f7f4 100644 --- a/software/main_hcs.py +++ b/software/main_hcs.py @@ -1,53 +1,68 @@ # set QT_API environment variable -import os -import glob import argparse +import glob +import logging +import os os.environ["QT_API"] = "pyqt5" -import qtpy +import signal +import sys # qt libraries -from qtpy.QtCore import * from qtpy.QtWidgets import * from qtpy.QtGui import * +import squid.logging +squid.logging.setup_uncaught_exception_logging() + # app specific libraries import control.gui_hcs as gui - from configparser import ConfigParser from control.widgets import ConfigEditorBackwardsCompatible, ConfigEditorForAcquisitions - from control._def import CACHED_CONFIG_FILE_PATH +from control.console import ConsoleThread -import glob - -parser = argparse.ArgumentParser() -parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action = 'store_true') -args = parser.parse_args() def show_config(cfp, configpath, main_gui): config_widget = ConfigEditorBackwardsCompatible(cfp, configpath, main_gui) config_widget.exec_() + def show_acq_config(cfm): acq_config_widget = ConfigEditorForAcquisitions(cfm) acq_config_widget.exec_() if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action='store_true') + parser.add_argument("--live-only", help="Run the GUI only the live viewer.", action='store_true') + parser.add_argument("--verbose", help="Turn on verbose logging (DEBUG level)", action="store_true") + args = parser.parse_args() + + log = squid.logging.get_logger("main_hcs") + + if args.verbose: + log.info("Turning on debug logging.") + squid.logging.set_stdout_log_level(logging.DEBUG) + + if not squid.logging.add_file_logging(f"{squid.logging.get_default_log_directory()}/main_hcs.log"): + log.error("Couldn't setup logging to file!") + sys.exit(1) + legacy_config = False cf_editor_parser = ConfigParser() config_files = glob.glob('.' + '/' + 'configuration*.ini') if config_files: cf_editor_parser.read(CACHED_CONFIG_FILE_PATH) else: - print('configuration*.ini file not found, defaulting to legacy configuration') + log.error('configuration*.ini file not found, defaulting to legacy configuration') legacy_config = True app = QApplication([]) app.setStyle('Fusion') - if(args.simulation): - win = gui.OctopiGUI(is_simulation = True) - else: - win = gui.OctopiGUI() - + # This allows shutdown via ctrl+C even after the gui has popped up. + signal.signal(signal.SIGINT, signal.SIG_DFL) + + win = gui.HighContentScreeningGui(is_simulation=args.simulation, live_only_mode=args.live_only) + acq_config_action = QAction("Acquisition Settings", win) acq_config_action.triggered.connect(lambda : show_acq_config(win.configurationManager)) @@ -58,7 +73,7 @@ def show_acq_config(cfm): config_action = QAction("Microscope Settings", win) config_action.triggered.connect(lambda : show_config(cf_editor_parser, config_files[0], win)) file_menu.addAction(config_action) - + try: csw = win.cswWindow if csw is not None: @@ -76,8 +91,16 @@ def show_acq_config(cfm): file_menu.addAction(csw_fc_action) except AttributeError: pass - + menu_bar = win.menuBar() menu_bar.addMenu(file_menu) win.show() - app.exec_() #sys.exit(app.exec_()) + + console_locals = { + 'microscope': win.microscope + } + + console_thread = ConsoleThread(console_locals) + console_thread.start() + + sys.exit(app.exec_()) diff --git a/software/main_malaria.py b/software/main_malaria.py index 208bd2184..572be5ff6 100644 --- a/software/main_malaria.py +++ b/software/main_malaria.py @@ -1,12 +1,13 @@ # set QT_API environment variable +import logging import os import glob import argparse os.environ["QT_API"] = "pyqt5" -import qtpy + +import sys # qt libraries -from qtpy.QtCore import * from qtpy.QtWidgets import * from qtpy.QtGui import * @@ -15,12 +16,8 @@ from configparser import ConfigParser from control.widgets import ConfigEditorBackwardsCompatible, ConfigEditorForAcquisitions - from control._def import CACHED_CONFIG_FILE_PATH - -parser = argparse.ArgumentParser() -parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action = 'store_true') -args = parser.parse_args() +import squid.logging def show_config(cfp, configpath, main_gui): config_widget = ConfigEditorBackwardsCompatible(cfp, configpath, main_gui) @@ -30,21 +27,32 @@ def show_acq_config(cfm): acq_config_widget = ConfigEditorForAcquisitions(cfm) acq_config_widget.exec_() + if __name__ == "__main__": + log = squid.logging.get_logger("main_malaria") + + parser = argparse.ArgumentParser() + parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action="store_true") + parser.add_argument("--verbose", help="Turn on verbose (DEBUG) logging.", action="store_true") + args = parser.parse_args() + + if args.verbose: + squid.logging.set_stdout_log_level(logging.DEBUG) + legacy_config = False cf_editor_parser = ConfigParser() config_files = glob.glob('.' + '/' + 'configuration*.ini') if config_files: cf_editor_parser.read(CACHED_CONFIG_FILE_PATH) else: - print('configuration*.ini file not found, defaulting to legacy configuration') + log.warning('configuration*.ini file not found, defaulting to legacy configuration') legacy_config = True app = QApplication([]) app.setStyle('Fusion') - if(args.simulation): - win = gui.OctopiGUI(is_simulation = True) + if args.simulation: + win = gui.MalariaGUI(is_simulation = True) else: - win = gui.OctopiGUI() + win = gui.MalariaGUI() acq_config_action = QAction("Acquisition Settings", win) acq_config_action.triggered.connect(lambda : show_acq_config(win.configurationManager)) @@ -60,4 +68,4 @@ def show_acq_config(cfm): menu_bar = win.menuBar() menu_bar.addMenu(file_menu) win.show() - app.exec_() #sys.exit(app.exec_()) + sys.exit(app.exec_()) diff --git a/software/main_motion_only.py b/software/main_motion_only.py deleted file mode 100644 index f1dbf033e..000000000 --- a/software/main_motion_only.py +++ /dev/null @@ -1,21 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_motion_only as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_platereader.py b/software/main_platereader.py deleted file mode 100644 index f2c2b883c..000000000 --- a/software/main_platereader.py +++ /dev/null @@ -1,27 +0,0 @@ -# set QT_API environment variable -import os -import argparse -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_platereader as gui - -parser = argparse.ArgumentParser() -parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action = 'store_true') -args = parser.parse_args() - -if __name__ == "__main__": - - app = QApplication([]) - if(args.simulation): - win = gui.OctopiGUI(is_simulation = True) - else: - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_simulation.py b/software/main_simulation.py deleted file mode 100644 index 357bd35f8..000000000 --- a/software/main_simulation.py +++ /dev/null @@ -1,21 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_simulation as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_tiscamera.py b/software/main_tiscamera.py deleted file mode 100644 index 550fb56ab..000000000 --- a/software/main_tiscamera.py +++ /dev/null @@ -1,21 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_camera_only as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_tiscamera_DZK250.py b/software/main_tiscamera_DZK250.py deleted file mode 100644 index 903eed253..000000000 --- a/software/main_tiscamera_DZK250.py +++ /dev/null @@ -1,19 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_tiscamera_DZK250 as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_tiscamera_camera_only.py b/software/main_tiscamera_camera_only.py deleted file mode 100644 index 9b46a9e0c..000000000 --- a/software/main_tiscamera_camera_only.py +++ /dev/null @@ -1,19 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_camera_only_tiscamera as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_tiscamera_simulation.py b/software/main_tiscamera_simulation.py deleted file mode 100644 index 98e20a070..000000000 --- a/software/main_tiscamera_simulation.py +++ /dev/null @@ -1,21 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_tiscamera_simulation as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_toupcam_IMX571.py b/software/main_toupcam_IMX571.py deleted file mode 100644 index 89f7c9fe3..000000000 --- a/software/main_toupcam_IMX571.py +++ /dev/null @@ -1,30 +0,0 @@ -# set QT_API environment variable -import os -import argparse -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_toupcam_IMX571 as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -parser = argparse.ArgumentParser() -parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action = 'store_true') -args = parser.parse_args() - -if __name__ == "__main__": - - app = QApplication([]) - app.setStyle('Fusion') - if(args.simulation): - win = gui.OctopiGUI(is_simulation = True) - else: - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_two_camera.py b/software/main_two_camera.py deleted file mode 100644 index 1884831d5..000000000 --- a/software/main_two_camera.py +++ /dev/null @@ -1,21 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -#import control.gui_simulation as gui -import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_two_camera_focus_tracking.py b/software/main_two_camera_focus_tracking.py deleted file mode 100644 index 4a6e900b0..000000000 --- a/software/main_two_camera_focus_tracking.py +++ /dev/null @@ -1,22 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -#import control.gui_camera_only as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui -import control.gui_2cameras_async_focus_tracking as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_two_cameras_daheng_tis.py b/software/main_two_cameras_daheng_tis.py deleted file mode 100644 index cc3c6ee15..000000000 --- a/software/main_two_cameras_daheng_tis.py +++ /dev/null @@ -1,22 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -#import control.gui_camera_only as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui -import control.gui_2cameras_daheng_tis as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_usbspectrometer.py b/software/main_usbspectrometer.py deleted file mode 100644 index 155cfc9a7..000000000 --- a/software/main_usbspectrometer.py +++ /dev/null @@ -1,28 +0,0 @@ -# set QT_API environment variable -import os -import argparse -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_usbspectrometer as gui - -parser = argparse.ArgumentParser() -parser.add_argument("--simulation", help="Run the GUI with simulated hardware.", action = 'store_true') -args = parser.parse_args() - -if __name__ == "__main__": - - app = QApplication([]) - app.setStyle('Fusion') - if(args.simulation): - win = gui.OctopiGUI(is_simulation = True) - else: - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/main_volumeric_imaging.py b/software/main_volumeric_imaging.py deleted file mode 100644 index 491213422..000000000 --- a/software/main_volumeric_imaging.py +++ /dev/null @@ -1,21 +0,0 @@ -# set QT_API environment variable -import os -os.environ["QT_API"] = "pyqt5" -import qtpy - -# qt libraries -from qtpy.QtCore import * -from qtpy.QtWidgets import * -from qtpy.QtGui import * - -# app specific libraries -import control.gui_volumetric_imaging as gui -#import control.gui_2cameras_async as gui -#import control.gui_tiscamera as gui - -if __name__ == "__main__": - - app = QApplication([]) - win = gui.OctopiGUI() - win.show() - app.exec_() #sys.exit(app.exec_()) diff --git a/software/objective_and_sample_formats/objectives.csv b/software/objective_and_sample_formats/objectives.csv new file mode 100644 index 000000000..81358f345 --- /dev/null +++ b/software/objective_and_sample_formats/objectives.csv @@ -0,0 +1,8 @@ +name,magnification,NA,tube_lens_f_mm +2x,2,0.10,180 +4x,4,0.13,180 +10x,10,0.3,180 +20x,20,0.8,180 +40x,40,0.95,180 +50x,50,0.8,180 +60x,60,1.2,180 diff --git a/software/objective_and_sample_formats/sample_formats.csv b/software/objective_and_sample_formats/sample_formats.csv new file mode 100644 index 000000000..781de84d7 --- /dev/null +++ b/software/objective_and_sample_formats/sample_formats.csv @@ -0,0 +1,8 @@ +format,a1_x_mm,a1_y_mm,a1_x_pixel,a1_y_pixel,well_size_mm,well_spacing_mm,number_of_skip,rows,cols +glass slide,0,0,0,0,0,0,0,1,1 +6,24.55,23.01,290,272,34.94,39.2,0,2,3 +12,24.75,16.86,293,198,22.05,26.0,0,3,4 +24,24.45,22.07,233,210,15.54,19.3,0,4,6 +96,11.31,10.75,171,135,6.21,9.0,0,8,12 +384,12.05,9.05,143,106,3.3,4.5,1,16,24 +1536,11.01,7.87,130,93,1.53,2.25,0,32,48 diff --git a/software/setup_22.04.sh b/software/setup_22.04.sh old mode 100644 new mode 100755 index cdc8d5c28..0de2d0ba3 --- a/software/setup_22.04.sh +++ b/software/setup_22.04.sh @@ -1,29 +1,51 @@ #!/bin/bash +set -eo pipefail +if [[ -n "$TRACE" ]]; then + echo "TRACE variable non-empty, turning on script tracing." + set -x +fi + +if [[ -z "$SRC_ROOT" ]]; then + readonly SRC_ROOT="$HOME/Desktop" +fi +echo "Using SRC_ROOT=$SRC_ROOT" + +mkdir -p "$SRC_ROOT" +readonly SQUID_REPO_HTTP="https://github.com/Cephla-Lab/Squid.git" +readonly SQUID_REPO_NAME="Squid" +readonly SQUID_SOFTWARE_ROOT="$SRC_ROOT/$SQUID_REPO_NAME/software" +readonly DAHENG_CAMERA_DRIVER_ROOT="$SQUID_SOFTWARE_ROOT/drivers and libraries/daheng camera/Galaxy_Linux-x86_Gige-U3_32bits-64bits_1.2.1911.9122" +readonly DAHENG_CAMERA_DRIVER_API_ROOT="$SQUID_SOFTWARE_ROOT/drivers and libraries/daheng camera/Galaxy_Linux_Python_1.0.1905.9081/api" +readonly TOUPCAM_UDEV_RULE_PATH="$SQUID_SOFTWARE_ROOT/drivers and libraries/toupcam/linux/udev/99-toupcam.rules" # update sudo apt update # install packages sudo apt install python3-pip -y sudo apt install python3-pyqtgraph python3-pyqt5 -y +sudo apt install python3-pyqt5.qtsvg # clone the repo sudo apt-get install git -y -cd ~/Desktop -git clone https://github.com/hongquanli/octopi-research.git -cd octopi-research/software -mkdir cache +cd "$SRC_ROOT" +git clone "$SQUID_REPO_HTTP" "$SQUID_REPO_NAME" +cd "$SQUID_SOFTWARE_ROOT" +mkdir -p "$SQUID_SOFTWARE_ROOT/cache" # install libraries -pip3 install qtpy pyserial pandas imageio crc==1.3.0 lxml numpy tifffile +pip3 install qtpy pyserial pandas imageio crc==1.3.0 lxml numpy tifffile scipy napari pip3 install opencv-python-headless opencv-contrib-python-headless +pip3 install napari[all] scikit-image dask_image ome_zarr aicsimageio basicpy pytest # install camera drivers -cd ~/Desktop/octopi-research/software/drivers\ and\ libraries/daheng\ camera/Galaxy_Linux-x86_Gige-U3_32bits-64bits_1.2.1911.9122 +cd "$DAHENG_CAMERA_DRIVER_ROOT" ./Galaxy_camera.run -cd ~/Desktop/octopi-research/software/drivers\ and\ libraries/daheng\ camera/Galaxy_Linux_Python_1.0.1905.9081/api +cd "$DAHENG_CAMERA_DRIVER_API_ROOT" python3 setup.py build sudo python3 setup.py install +cd "$SQUID_SOFTWARE_ROOT" +sudo cp "$TOUPCAM_UDEV_RULE_PATH" /etc/udev/rules.d # enable access to serial ports without sudo sudo usermod -aG dialout $USER diff --git a/software/setup_cuda_22.04.sh b/software/setup_cuda_22.04.sh old mode 100644 new mode 100755 diff --git a/software/squid/__init__.py b/software/squid/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/software/squid/logging.py b/software/squid/logging.py new file mode 100644 index 000000000..fe8018191 --- /dev/null +++ b/software/squid/logging.py @@ -0,0 +1,183 @@ +import logging as py_logging +import logging.handlers +import os.path +import threading +from typing import Optional, Type +from types import TracebackType +import sys +import platformdirs + +_squid_root_logger_name = "squid" +_baseline_log_format = "%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)" +_baseline_log_dateformat = "%Y-%m-%d %H:%M:%S" + + +# The idea for this CustomFormatter is cribbed from https://stackoverflow.com/a/56944256 +class _CustomFormatter(py_logging.Formatter): + GRAY = "\x1b[38;20m" + YELLOW = "\x1b[33;20m" + RED = "\x1b[31;20m" + BOLD_RED = "\x1b[31;1m" + RESET = "\x1b[0m" + FORMAT = _baseline_log_format + + FORMATS = { + py_logging.DEBUG: GRAY + FORMAT + RESET, + py_logging.INFO: GRAY + FORMAT + RESET, + py_logging.WARNING: YELLOW + FORMAT + RESET, + py_logging.ERROR: RED + FORMAT + RESET, + py_logging.CRITICAL: BOLD_RED + FORMAT + RESET + } + + # NOTE(imo): The datetime hackery is so that we can have millisecond timestamps using a period instead + # of comma. The default asctime + datefmt uses a comma. + FORMATTERS = {level: py_logging.Formatter(fmt, datefmt=_baseline_log_dateformat) for (level, fmt) in FORMATS.items()} + + def format(self, record): + return self.FORMATTERS[record.levelno].format(record) + +_COLOR_STREAM_HANDLER = py_logging.StreamHandler() +_COLOR_STREAM_HANDLER.setFormatter(_CustomFormatter()) + +# Make sure the squid root logger has all the handlers we want setup. We could move this into a helper so it +# isn't done at the module level, but not needing to remember to call some helper to setup formatting is nice. +# Also set the default logging level to INFO on the stream handler, but DEBUG on the root logger so we can have +# other loggers at different levels. +_COLOR_STREAM_HANDLER.setLevel(py_logging.INFO) +py_logging.getLogger(_squid_root_logger_name).addHandler(_COLOR_STREAM_HANDLER) +py_logging.getLogger(_squid_root_logger_name).setLevel(py_logging.DEBUG) + + +def get_logger(name: Optional[str] = None) -> py_logging.Logger: + """ + Returns the top level squid logger instance by default, or a logger in the squid + logging hierarchy if a non-None name is given. + """ + if name is None: + logger = py_logging.getLogger(_squid_root_logger_name) + else: + logger = py_logging.getLogger(_squid_root_logger_name).getChild(name) + + return logger + +log = get_logger(__name__) + +def set_stdout_log_level(level): + """ + All squid code should use this set_stdout_log_level method, and the corresponding squid.logging.get_logger, + to control squid-only logging. + + This does not modify the log level of loggers outside the squid logger hierarchy! If global logging control + is needed the normal logging package tools can be used instead. It also leaves FileHandler log levels such that + they can always be outputting everything (regardless of what we set the stdout log level to) + """ + squid_root_logger = get_logger() + + for handler in squid_root_logger.handlers: + # We always want the file handlers to capture everything, so don't touch them. + if isinstance(handler, logging.FileHandler): + continue + handler.setLevel(level) + + +def register_crash_handler(handler, call_existing_too=True): + """ + We want to make sure any uncaught exceptions are logged, so we have this mechanism for putting a hook into + the python system that does custom logging when an exception bubbles all the way to the top. + + NOTE: We do our best below, but it is a really bad idea for your handler to raise an exception. + """ + # The sys.excepthook docs are a good entry point for all of this + # (here: https://docs.python.org/3/library/sys.html#sys.excepthook), but essentially there are 3 different ways + # threads of execution can blow up. We want to catch and log all 3 of them. + old_excepthook = sys.excepthook + old_thread_excepthook = threading.excepthook + # The unraisable hook doesn't have the same signature as the excepthooks, but we can sort of shoehorn the arguments + # into the same signature. Also, this is an extremely rare (I'm not sure I've ever seen it?) failure mode, so + # it should be okay. + old_unraisable_hook = sys.unraisablehook + + logger = get_logger() + + def new_excepthook(exception_type: Type[BaseException], value: BaseException, tb: TracebackType): + try: + handler(exception_type, value, tb) + except BaseException as e: + logger.critical("Custom excepthook handler raised exception", e) + if call_existing_too: + old_excepthook(exception_type, value, tb) + + def new_thread_excepthook(hook_args: threading.ExceptHookArgs): + exception_type = hook_args.exc_type + value = hook_args.exc_type + tb = hook_args.exc_traceback + try: + handler(exception_type, value, type(tb)) + except BaseException as e: + logger.critical("Custom thread excepthook handler raised exception", e) + if call_existing_too: + old_thread_excepthook(exception_type, value, type(tb)) + + def new_unraisable_hook(info): + exception_type = info.exc_type + tb = info.exc_traceback + value = info.exc_value + try: + handler(exception_type, value, type(tb)) + except BaseException as e: + logger.critical("Custom unraisable hook handler raised exception", e) + if call_existing_too: + old_unraisable_hook(info) + + logger.info(f"Registering custom excepthook, threading excepthook, and unraisable hook using handler={handler.__name__}") + sys.excepthook = new_excepthook + threading.excepthook = new_thread_excepthook + sys.unraisablehook = new_unraisable_hook + + +def setup_uncaught_exception_logging(): + """ + This will make sure uncaught exceptions are sent to the root squid logger as error messages. + """ + logger = get_logger() + def uncaught_exception_logger(exception_type: Type[BaseException], value: BaseException, tb: TracebackType): + logger.exception("Uncaught Exception!", exc_info=value) + + register_crash_handler(uncaught_exception_logger, call_existing_too=False) + +def get_default_log_directory(): + return platformdirs.user_log_path(_squid_root_logger_name, "cephla") + +def add_file_logging(log_filename, replace_existing=False): + root_logger = get_logger() + abs_path = os.path.abspath(log_filename) + for handler in root_logger.handlers: + if isinstance(handler, logging.handlers.BaseRotatingHandler): + if handler.baseFilename == abs_path: + if replace_existing: + root_logger.removeHandler(handler) + else: + log.error(f"RotatingFileHandler already exists for {abs_path}, and replace_existing==False!") + return False + + log_file_existed = False + if os.path.isfile(abs_path): + log_file_existed = True + + os.makedirs(os.path.dirname(abs_path), exist_ok=True) + + # For now, don't worry about rollover after a certain size or time. Just get a new file per call. + new_handler = logging.handlers.RotatingFileHandler(abs_path, maxBytes=0, backupCount=25) + new_handler.setLevel(py_logging.DEBUG) + + formatter = py_logging.Formatter(fmt=_baseline_log_format, datefmt=_baseline_log_dateformat) + new_handler.setFormatter(formatter) + + log.info(f"Adding new file logger writing to file '{new_handler.baseFilename}'") + root_logger.addHandler(new_handler) + + # We want a new log file every time we start, so force one at startup if the log file already existed. + if log_file_existed: + new_handler.doRollover() + + return True \ No newline at end of file diff --git a/software/tests/__init__.py b/software/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/software/tests/control/__init__.py b/software/tests/control/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/software/tests/control/test_microscope.py b/software/tests/control/test_microscope.py new file mode 100644 index 000000000..c2b4bfd9d --- /dev/null +++ b/software/tests/control/test_microscope.py @@ -0,0 +1,4 @@ +import control.microscope + +def test_create_simulated_microscope(): + sim_scope = control.microscope.Microscope(is_simulation=True) \ No newline at end of file diff --git a/software/tests/squid/__init__.py b/software/tests/squid/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/software/tests/squid/test_logging.py b/software/tests/squid/test_logging.py new file mode 100644 index 000000000..f1416e346 --- /dev/null +++ b/software/tests/squid/test_logging.py @@ -0,0 +1,50 @@ +import logging +import tempfile + +import squid.logging + +def test_root_logger(): + root_logger = squid.logging.get_logger() + assert root_logger.name == squid.logging._squid_root_logger_name + +def test_children_loggers(): + child_a = "a" + child_b = "b" + + child_a_logger = squid.logging.get_logger(child_a) + child_b_logger = child_a_logger.getChild(child_b) + + assert child_a_logger.name == f"{squid.logging._squid_root_logger_name}.{child_a}" + assert child_b_logger.name == f"{squid.logging._squid_root_logger_name}.{child_a}.{child_b}" + +def test_file_loggers(): + log_file_name = tempfile.mktemp() + + def line_count(): + with open(log_file_name, "r") as fh: + return len(list(fh)) + + def contains(string): + with open(log_file_name, "r") as fh: + for l in fh: + if string in l: + return True + return False + + assert squid.logging.add_file_logging(log_file_name) + assert not squid.logging.add_file_logging(log_file_name) + + initial_line_count = line_count() + log = squid.logging.get_logger("log test") + squid.logging.set_stdout_log_level(logging.DEBUG) + + log.debug("debug msg") + debug_ling_count = line_count() + assert debug_ling_count > initial_line_count + + squid.logging.set_stdout_log_level(logging.INFO) + + a_debug_message = "another message but when stdout is at INFO" + log.debug(a_debug_message) + assert line_count() > debug_ling_count + assert contains(a_debug_message) diff --git a/software/list_controllers.py b/software/tools/list_controllers.py similarity index 100% rename from software/list_controllers.py rename to software/tools/list_controllers.py