diff --git a/data/sensor/colorfilters/auto/SONY/cf_imx490.mat b/data/sensor/colorfilters/auto/SONY/cf_imx490.mat new file mode 100644 index 00000000..71864b2e Binary files /dev/null and b/data/sensor/colorfilters/auto/SONY/cf_imx490.mat differ diff --git a/data/sensor/colorfilters/auto/SONY/qe_imx490.mat b/data/sensor/colorfilters/auto/SONY/qe_imx490.mat index d8cc8dde..30dafbc5 100644 Binary files a/data/sensor/colorfilters/auto/SONY/qe_imx490.mat and b/data/sensor/colorfilters/auto/SONY/qe_imx490.mat differ diff --git a/data/sensor/colorfilters/auto/SONY/qe_imx490_scan.mat b/data/sensor/colorfilters/auto/SONY/qe_imx490_scan.mat new file mode 100644 index 00000000..d8cc8dde Binary files /dev/null and b/data/sensor/colorfilters/auto/SONY/qe_imx490_scan.mat differ diff --git a/data/sensor/colorfilters/auto/SONY/s_imx490QEdata.m b/data/sensor/colorfilters/auto/SONY/s_imx490QEdata.m new file mode 100644 index 00000000..17e1352d --- /dev/null +++ b/data/sensor/colorfilters/auto/SONY/s_imx490QEdata.m @@ -0,0 +1,41 @@ +%% Managing the IMX490 spectral QE +% +% Grabit on the scanned image in sonyIMX490.png produced the first +% file. + +%% +datadir = fullfile(isetRootPath,'data','sensor','colorfilters','auto','SONY'); +curdir = pwd; +chdir(datadir); + +%% Data from grabit, via Zhenyi +tmp = load('qe_imx490_scan.mat'); + +% Scale to 0,1 +tmp.data = tmp.data/100; + +tmp.data(tmp.data<0) = 0; +tmp.data(isnan(tmp.data)) = 0; + +ieNewGraphWin; plot(tmp.wavelength,tmp.data); +xaxisLine; + +%% Write it out in the two formats. Not sure why we have two. + +%% CF file +tmp.filterNames = {'r','g','b'}; +tmp.comment = 'Cleaned up QE from grabit scan of LUCID curves.'; + +cfFile = fullfile(datadir,'cf_imx490.mat'); + +ieSaveColorFilter(tmp,cfFile); +disp("Saved Sony imx490 spectral qe as a color filter.") +disp('Read it using ieReadColorFilter') + +%% QE file +qeFile = fullfile(datadir,'qe_imx490.mat'); +ieSaveSpectralFile(tmp.wavelength,tmp.data,tmp.comment,qeFile); +disp("Saved Sony imx490 spectral qe as a color filter.") +disp('Read it using ieReadSpectra.') + +%% diff --git a/data/sensor/colorfilters/auto/SONY/sonyIMX490.png b/data/sensor/colorfilters/auto/SONY/sonyIMX490.png new file mode 100644 index 00000000..cea57179 Binary files /dev/null and b/data/sensor/colorfilters/auto/SONY/sonyIMX490.png differ diff --git a/data/sensor/ircf_public.mat b/data/sensor/irfilters/ircf_public.mat similarity index 100% rename from data/sensor/ircf_public.mat rename to data/sensor/irfilters/ircf_public.mat diff --git a/imgproc/demosaic/Demosaic.m b/imgproc/demosaic/Demosaic.m index 115a07a7..c349fb54 100644 --- a/imgproc/demosaic/Demosaic.m +++ b/imgproc/demosaic/Demosaic.m @@ -132,6 +132,8 @@ switch lower(m) case {'iebilinear','bilinear'} method = 'ieBilinear'; + case {'matlabbilinear'} + method = 'matlabbilinear'; case {'multichannel'} method = 'multichannel'; case {'adaptivelaplacian'} @@ -174,7 +176,11 @@ % Rarely go this computational path. Mainly used for human retina. demosaicedImage = demosaicMultichannel(imgRGB,sensor,'interpolate'); end - + case {'matlabbilinear'} + % [1] Malvar, H.S., L. He, and R. Cutler, High quality linear + % interpolation for demosaicing of Bayer-patterned color + % images. ICASPP, Volume 34, Issue 11, pp. 2274-2282, May 2004. + demosaicedImage = demosaic(imgRGB,sensorGet(sensor,'pattern')); case 'adaptivelaplacian' clipToRange = 0; if (strcmp(bPattern,'grbg') || ... diff --git a/metrics/scielab/scPrepareFilters.m b/metrics/scielab/scPrepareFilters.m index 204af029..81387a38 100644 --- a/metrics/scielab/scPrepareFilters.m +++ b/metrics/scielab/scPrepareFilters.m @@ -33,18 +33,7 @@ % The filters are a cell array. The support defines the spatial support in % terms of degrees of visual angle. % -% Example: -% params.deltaEversion = '2000'; -% params.sampPerDeg = 145; -% params.imageFormat = 'LMS'; -% params.filterSize = 145; -% params.dimension = 2; -% [filters,support] = scPrepareFilters(params); -% -% figure; % Units are degrees of visual angle -% subplot(1,3,1), mesh(support,support,filters{1}); colormap(hsv(256)); -% subplot(1,3,2), mesh(support,support,filters{2}); colormap(hsv(256)); -% subplot(1,3,3), mesh(support,support,filters{3}); colormap(hsv(256)); +% The source code below contains a runnable example. % % In 1996, Xuemei Zhang used this routine % [k1, k2, k3] = separableFilters(params.sampPerDeg,2); @@ -59,6 +48,35 @@ % % Copyright ImagEval Consultants, LLC, 2003. +% Examples: +%{ + clear; close all; + params.deltaEversion = '2000'; + params.sampPerDeg = 145; + params.imageFormat = 'LMS'; + params.filterSize = 145; + params.dimension = 2; + [filters,support] = scPrepareFilters(params); + + % Units are degrees of visual angle + figure; clf; + subplot(1,3,1), mesh(support,support,filters{1}); colormap(hsv(256)); + subplot(1,3,2), mesh(support,support,filters{2}); colormap(hsv(256)); + subplot(1,3,3), mesh(support,support,filters{3}); colormap(hsv(256)); + + % In 1996, Xuemei Zhang used this routine + [k1, k2, k3] = separableFilters(params.sampPerDeg,2); + figure; clf; + subplot(1,2,1); mesh(support,support,filters{3}) + subplot(1,2,2); mesh(support,support,k1) + + % The values are the same aes shown by the zero difference + figure; clf; + subplot(1,3,1); mesh(support,support,k1-filters{1}); + subplot(1,3,2); mesh(support,support,k2-filters{2}); + subplot(1,3,3); mesh(support,support,k3-filters{3}); +%} + % Check parameters if ieNotDefined('params'), error('Params required.'); end if ~checkfields(params,'sampPerDeg'), params.sampPerDeg = 224; end diff --git a/opticalimage/oiCompute.m b/opticalimage/oiCompute.m index 5109bcd7..76f1dd9e 100644 --- a/opticalimage/oiCompute.m +++ b/opticalimage/oiCompute.m @@ -18,7 +18,10 @@ % % crop - Crop the OI to the same size as the scene. (Logical) % Default: false; -% (We could do a setprefs on these, but BW is resistant.) +% +% pixel size - Spatial resolution of the oi image. A scalar in +% meters. Normally it is set to match the optics and +% scene properties. % % Return % oi - The oi with computed photon irradiance @@ -57,12 +60,12 @@ % in opticsDLCompute. This blur and intensity in this computation depends % on the diffraction limited parameters (f/#) but little else. % -% To create an image with no blur, set the f/# to a very small number. -% This will provide an image that has the geometry and zero-blur as used in -% computer graphics pinhole cameras. The absolute light level, however, -% will be higher than what would be seen through a small pinhole. You can -% manage this by setting scaling the spectral irradiance -% (oiAdjustIlluminance). +% To create an image with no blur, set the f/# (focal length over the +% aperture) to be a very small number. This image has the geometry and +% zero-blur as used in computer graphics pinhole cameras. The +% absolute light level, however, will be higher than what would be +% seen through a small pinhole. You can manage this by setting scaling +% the spectral irradiance (oiAdjustIlluminance). % % * The second model is shift-invariant optics. This depends on having a % wavelength-dependent OTF defined and included in the optics structure. @@ -123,6 +126,13 @@ oi = oiCompute(oi,scene,'pad value','border','crop',true); oiWindow(oi); %} +%{ +% Almost right. Off by 1 part in 100. Need to fix. (BW). +scene = sceneCreate; +oi = oiCreate; +oi = oiCompute(oi,scene,'pad value','border','crop',true,'pixel size',1e-6); +oiWindow(oi); +%} %% Parse varargin = ieParamFormat(varargin); p = inputParser; @@ -135,28 +145,29 @@ p.parse(oi,scene,varargin{:}); % Ages ago, we some code flipped the order of scene and oi. We think -% we have caught all those cases, but we still test. Maybe delete -% this code by January 2024. +% we have caught all those cases, but we still test, and are now forcing +% the user to correct. if strcmp(oi.type,'scene') && (strcmp(scene.type,'opticalimage') ||... strcmp(scene.type,'wvf')) - warning('flipping oi and scene variables.') - tmp = scene; scene = oi; oi = tmp; clear tmp + error('You need to flip order oi and scene variables in the call to oiCompute') + % We used to help the user + % tmp = scene; scene = oi; oi = tmp; clear tmp end -%% Adjust oi fov if user send in a pixel size +%% Adjust oi fov if user sends in a pixel size if ~isempty(p.Results.pixelsize) pz = p.Results.pixelsize; sw = sceneGet(scene, 'cols'); flengthM = oiGet(oi, 'focal length', 'm'); wAngular = atand(pz*sw/2/flengthM)*2; - % oi use scene hFOV later. + % oi uses scene hFOV later. scene = sceneSet(scene, 'wAngular',wAngular); end %% Compute according to the selected model. - +% % Should we pad the scene before the call to these computes? - +% % We pass varargin because it may contain the key/val parameters % such as pad value and crop. But we only use pad value here. if strcmp(oi.type,'wvf') diff --git a/opticalimage/oiCreate.m b/opticalimage/oiCreate.m index dce0c1a6..2b6c6daa 100644 --- a/opticalimage/oiCreate.m +++ b/opticalimage/oiCreate.m @@ -10,6 +10,9 @@ % % Inputs % oiType - +% {'pinhole'} - Turn off cos4th, infinite depth of field and +% NaN focal length +% % {'diffraction limited'} - Diffraction limited optics, no diffuser or % data (Default). Equivalent to using the wvf or % shift-invariant with a zero wavefront @@ -103,15 +106,6 @@ %% scene = []; wvf = []; switch ieParamFormat(oiType) - case {'empty'} - % Just the basic shell of the oi struct - % Other terms will get added by the calling function - oi.type = 'opticalimage'; - oi.name = vcNewObjectName('opticalimage'); - oi.metadata = []; % Store metadata typically for machine-learning apps - oi = oiSet(oi, 'diffuser method', 'skip'); - oi = oiSet(oi,'wave',[]); - case {'diffractionlimited','default'} % Diffraction limited is implemented using the dlMTF method. % This is like the wvf case, but the dl MTF is computed on the fly @@ -136,6 +130,47 @@ oi.optics.transmittance.scale = ones(length(370:730), 1); end + case {'pinhole'} + % Pinhole optics in an OI. + % + % We set the f/# (focal length over the aperture) to be a very + % small number. This image has zero-blur because the + % diffraction aperture is very large. That is the goal of the + % pinhole camera optics, but the way we achieve it is odd. + % + % The absolute light level is high because a small fnumber + % means the focal length is very short compared to the + % aperture. + % + % With these default settings the focal length is 10 mm and the + % aperture diameter is 10 m. + oi = oiCreate(); + oi = oiSet(oi, 'name', 'pinhole'); + oi = oiSet(oi, 'optics name','pinhole'); + + % This makes the computation a pinhole + oi = oiSet(oi,'optics model','skip'); + oi = oiSet(oi, 'optics offaxis method', 'skip'); + oi = oiSet(oi, 'diffuser method', 'skip'); + + % Not sure why we set these, but it is true that when we have + % a very small fnumber, we should have no blur - like a + % pinhole. + oi = oiSet(oi, 'optics fnumber',1e-3); + oi = oiSet(oi, 'optics focal length',1e-2); + + % Pinhole do not have a focal length or fNumber. + wvf = []; + + case {'empty'} + % Just the basic shell of the oi struct + % Other terms will get added by the calling function + oi.type = 'opticalimage'; + oi.name = vcNewObjectName('opticalimage'); + oi.metadata = []; % Store metadata typically for machine-learning apps + oi = oiSet(oi, 'diffuser method', 'skip'); + oi = oiSet(oi,'wave',[]); + case {'shiftinvariant','wvf'} % We create via the wavefront method. We create a wvf, convert it % to an oi using wvf2oi, which calls wvf2optics. @@ -277,21 +312,6 @@ oi = oiSet(oi,'fov',100); wvf = []; - case {'pinhole'} - % Pinhole camera version of OI - oi = oiCreate('shift invariant'); - oi = oiSet(oi, 'optics model', 'skip'); - oi = oiSet(oi, 'bit depth', 64); % Forces double - oi = oiSet(oi, 'optics offaxis method', 'skip'); - oi = oiSet(oi, 'diffuser method', 'skip'); - - % Pinhole do not have a focal length. In this case, the focal - % length is used to say the image plane distance. - oi = oiSet(oi, 'optics focal length',NaN); - oi = oiSet(oi, 'optics name','pinhole'); - oi = oiSet(oi, 'name', 'pinhole'); - wvf = []; - otherwise fprintf('\n--- Valid OI types: ---\n') for ii=1:length(validTypes) diff --git a/opticalimage/oiGet.m b/opticalimage/oiGet.m index 6e29a6ad..ebe5f26c 100644 --- a/opticalimage/oiGet.m +++ b/opticalimage/oiGet.m @@ -404,10 +404,12 @@ if ~isempty(varargin), val = val*ieUnitScaleFactor(varargin{1}); end case {'wangular','widthangular','hfov','horizontalfieldofview','fov'} - % oiCompute(oi,scene) assigns the angular field of view to the oi. + % oiCompute(oi,scene) assigns the angular field of + % view to the oi. + % % This horizontal FOV represents the size of the OI, - % usually after the computational padding. - % reflects the angle of the scene it represents. + % usually after the computational padding. Reflects + % the angle of the scene it represents. if checkfields(oi,'wAngular') val = oi.wAngular; else @@ -416,10 +418,12 @@ % 'focal plane distance'. fprintf('*** The oi fov is not set yet. '); scene = vcGetObject('scene'); - if isempty(scene) % Default scene visual angle. + if isempty(scene) + % Default scene visual angle. fprintf('Using a default fov of 10.\n'); disp('oiGet: No scene, arbitrary oi angle: 10 deg'), val = 10; - else % Use current scene angular width + else + % Use current scene angular width fprintf('Using the scene fov.\n'); val = sceneGet(scene,'wangular'); end diff --git a/opticalimage/optics/customOTF.m b/opticalimage/optics/customOTF.m index 19418185..efd3092d 100644 --- a/opticalimage/optics/customOTF.m +++ b/opticalimage/optics/customOTF.m @@ -2,8 +2,8 @@ % Interpolate optics OTF for shift-invariant calculation in optical image % % Brief -% This routine returns appropriate spectral OTF given the spatial -% sampling in the optical image. +% Returns the spectral OTF given the spatial sampling in the +% optical image. % % Synopsis % [OTF2D,fSupport] = customOTF(oi,[fSupport],[wavelength = :],[units='mm']) @@ -23,22 +23,18 @@ % Description % In the shift-invariant optics model, custom data are stored in the % optics.OTF slot. This routine reads the otf data and interpolates -% them to the fSupport and wavelength of the optical image or optics -% structure. +% them to the fSupport and wavelength of the optical image (oi). % -% The returned OTF is normalized so that all energy is transmitted -% (i.e., the DC value is 1). This is done by normalizing the peak -% value to one. If we ever have a case when the peak is other than -% the DC, we have a problem with energy conservation - where did the -% photons go? +% The OTF is normalized so that all energy is transmitted (i.e., the +% DC value is 1). We always represent the loss of energy by the +% filters along the light path. % % The default units for the frequency support are cycles/millimeters. % -% TODO: We can accept fSupport in -% various units. But we can also set 'units'. It is possible that -% the user sends in fSupport in, say, cyc/deg and sends in units as -% 'mm'. The case we do not want is fSupport is in cyc/deg and units -% is in mm. +% TODO: We can accept fSupport in various units. But we can also set +% 'units'. It is possible that the user sends in fSupport in, say, +% cyc/deg and sends in units as 'mm'. The case we do not want is +% fSupport is in cyc/deg and units is in mm. % % See also: % oiCalculateOTF, dlCore.m @@ -74,9 +70,13 @@ % Find the OTF at each wavelength. % % This may require interpolating the optics data to match the current -% OI. The interpolation method can have significant consequences for -% the result when we are working with very high dynamic range scenes. -% (ZL, BW, 12/2023). +% OI. A theory is that the interpolation method can have significant +% consequences for the result when we are working with very high dynamic +% range scenes (ZL, BW, 12/2023). It might also be that the problem is not +% the interpolation per se, but other bugs (DHB, 4/2/24). +% +% We handel the one wavelength case separately because in this case it is a +% 2D array rather than a 3D array. if length(wavelength) == 1 OTF2D = opticsGet(optics,'otfData',wavelength); @@ -86,14 +86,17 @@ % We interpolate the stored OTF2D onto the support grid for the % optical image. % - % The OTF2D representation stores DC is in (1,1). So we would want + % The OTF2D representation stores DC in (1,1). So we would want % the fSupport to run from 1:N. The otfSupport that we use to % create X and Y, runs from -N:N. To make things match up, we % apply an fftshift to the OTF2D data prior to interpolating to % the spatial frequencies, fx and fy, that are required given the % spatial sampling of the optical image. % - % BW: Can't this also be fftshift? Like below? + % Note that it is critical to use ifftshift to put back what fftshift + % did, and not use fftshift twice. You can get lured into thinking + % that fftshift self-inverts because it does in some special cases, but + % not in all cases. OTF2D = ifftshift(interp2(X, Y, fftshift(OTF2D), fx, fy, 'linear',0)); else @@ -105,7 +108,7 @@ % ieNewGraphWin; mesh(abs(fft2(tmp))); % fftshift(interp2(X, Y, fftshift(tmp), fx, fy, 'linear',0)); OTF2D(:,:,ii) = ... - fftshift(interp2(X, Y, fftshift(tmp), fx, fy, 'linear',0)); + ifftshift(interp2(X, Y, fftshift(tmp), fx, fy, 'linear',0)); %{ ieNewGraphWin; mesh(fx,fy,fftshift(abs(OTF2D(:,:,ii)))); diff --git a/opticalimage/optics/opticsCreate.m b/opticalimage/optics/opticsCreate.m index 62393277..413b588d 100644 --- a/opticalimage/optics/opticsCreate.m +++ b/opticalimage/optics/opticsCreate.m @@ -86,9 +86,11 @@ switch lower(opticsType) case {'empty'} optics.type = 'optics'; - optics.name = 'empty'; + optics.name = 'empty'; case {'default','diffractionlimited'} + % This method is also used for a pinhole. Just in that case + % we set the fNumber to be very small ( optics.type = 'optics'; optics = opticsSet(optics,'name','standard (1/4-inch)'); optics = opticsSet(optics,'model','diffractionLimited'); @@ -113,9 +115,8 @@ end case {'shiftinvariant'} - % Removed: 'standard(1/4-inch)','quarterinch' on Dec 18, 2023 - % These are all diffraction limited methods. - % optics = opticsDefault; + % Initialized as a diffraction limited wavefront that matches + % the diffraction limited one above. wave = 400:10:700; wvf = wvfCreate('wave', wave); @@ -331,7 +332,7 @@ end - +%{ %--------------------------------------- function optics = opticsDefault % Create diffraction limited optics from a wvf structure @@ -367,6 +368,7 @@ % optics = opticsSet(optics,'otfMethod','dlmtf'); % end +%} %--------------------------------------- function optics = opticsHuman(pupilRadiusMeters,fLengthMeters) diff --git a/opticalimage/optics/opticsGet.m b/opticalimage/optics/opticsGet.m index 8a90d586..d4b18044 100644 --- a/opticalimage/optics/opticsGet.m +++ b/opticalimage/optics/opticsGet.m @@ -228,11 +228,13 @@ val = opticsGet(optics,'fNumber')*(1 - opticsGet(optics,'mag')); end case {'focallength','flength'} - % opticsGet(optics,'flength',units); If this is a pinhole (no - % optics), the focal length is how we specify the distance - % from the pinhole to the image plane. If it is not a - % pinhole, it is the effective focal length - which in my mind - % is a generalization of the thin lens focal length. + % opticsGet(optics,'flength',units); + % + % If this is a pinhole (no optics), the focal length is how we + % specify the distance from the pinhole to the image plane. + % If it is not a pinhole, it is the effective focal length - + % which in my mind is a generalization of the thin lens focal + % length. % % In the distant past, we thought we should be using a % different value when calculating with a pinhole. Go back to @@ -339,8 +341,16 @@ % Should this be a call to effective f#? val=1/(2*opticsGet(optics,'fnumber')); case {'aperturediameter','diameter','pupildiameter'} - %These already check the rt condition, so no need to do it again - val = opticsGet(optics,'focalLength')/opticsGet(optics,'fnumber'); + % We already checked the rt condition, so no need to do it again. + % + % When the optics name is 'pinhole*', the fnumber will be + % small, which forces the OTF to be very flat. We use the + % small fnumber value as the aperture size. + if length(optics.name) > 6 && isequal(optics.name(1:7),'pinhole') + val = opticsGet(optics,'fnumber'); + else + val = opticsGet(optics,'focalLength')/opticsGet(optics,'fnumber'); + end if ~isempty(varargin), val = ieUnitScaleFactor(varargin{1})*val; end case {'apertureradius','radius','pupilradius'} val = opticsGet(optics,'diameter')/2; diff --git a/opticalimage/optics/opticsOTF.m b/opticalimage/optics/opticsOTF.m index e9ab4c42..db372114 100644 --- a/opticalimage/optics/opticsOTF.m +++ b/opticalimage/optics/opticsOTF.m @@ -5,13 +5,21 @@ % oi = opticsOTF(oi,scene,varargin); % % Inputs -% oi -% scene +% oi - oi with optical blur not yet applied to photons +% scene - the scene from whence the photons came. Here it is only +% used to determine the pad size, because the photons in +% the passed oi have already been set up by the calling +% routine. % % Optional key/val +% 'padvalue' - How to pad the oi to handle boder effects. {'zero','mean','border','spd'} +% See oiPadValue but note that for some reason that takes +% different strings for the options above. And that 'spd' +% is not implemented. This routine translates the strings +% above into what oiPadValue wants. % % Return -% oi +% oi - oi with computed photons inserted. % % Description % The optical transform function (OTF) associated with the optics in @@ -41,14 +49,12 @@ varargin = ieParamFormat(varargin); p = inputParser; p.KeepUnmatched = true; - p.addRequired('oi',@(x)(isstruct(x) && isequal(x.type,'opticalimage'))); p.addRequired('scene',@(x)(isstruct(x) && isequal(x.type,'scene'))); p.addParameter('padvalue','zero',@(x)(ischar(x) || isvector(x))); - p.parse(oi,scene,varargin{:}); -%% +%% Get the optics model and fan out as appropriate optics = oiGet(oi,'optics'); opticsModel = opticsGet(optics,'model'); @@ -68,7 +74,7 @@ %------------------------------------------- function oi = oiApplyOTF(oi,scene,unit,padvalue) -%Calculate and apply the otf waveband by waveband +% Calculate and apply the otf waveband by waveband % % oi = oiApplyOTF(oi,method,unit); % @@ -85,6 +91,7 @@ if ieNotDefined('oi'), error('Optical image required.'); end if ieNotDefined('unit'), unit = 'cyclesPerDegree'; end +% Get sampling wavelengths wave = oiGet(oi,'wave'); % Pad the optical image to allow for light spread. Also, make sure the row @@ -114,23 +121,26 @@ error('Unknown padvalue %s',padvalue); end +%% Adjust the data by padding, and get the OTF + % oiGet(oi,'sample size') oi = oiPadValue(oi,padSize,padType,sDist); % oiGet(oi,'sample size') -% See t_codeFFTinMatlab to understand the logic of the operations here. -% We used to do this one wavelength at a time. But this could cause -% dynamic range problems for ieCompressData. So, for now we are -% experimenting with filtering one at a time but stuffing the whole data -% set in at once. - -% Get the current data set. It has the right size. We over-write it +% Get the current photons. It has the right size. We over-write it % below. p = oiGet(oi,'photons'); -otfM = oiCalculateOTF(oi, wave, unit); % Took changes from ISETBio. +% Get the OTF +otfM = oiCalculateOTF(oi, wave, unit); + +%% Calculate blured photons for each wavelength + +% Initialize check variables +nWlImagTooBig = 0; +maxImagFraction = 0; +imagFractionTol = 1e-2; for ii=1:length(wave) - % img = oiGet(oi,'photons',wave(ii)); img = p(:, :, ii); % figure(1); imagesc(img); colormap(gray(64)); @@ -139,39 +149,53 @@ % position. % otf = oiCalculateOTF(oi,wave(ii),unit); otf = otfM(:,:,ii); - % figure(1); mesh(otf); otf(1,1) + % figure(1); mesh(abs(otf)); otf(1,1) - % Put the image center in (1,1) and take the transform. + % Take the fourier transform. This leaves DC at (1,1), matched + % to the way we store the OTF. imgFFT = fft2(img); - % imgFFT = fft2(fftshift(img)); % figure(1); imagesc(abs(imgFFT)); % figure(2); imagesc(abs(otf)); % colormap(gray(64)) - % Multiply the transformed otf and the image. - % Then invert and put the image center in the center of the matrix - filteredIMG = abs(ifft2(otf .* imgFFT)); - % filteredIMG = abs(ifftshift(ifft2(otf .* imgFFT))); - % if (sum(filteredIMG(:))/sum(img(:)) - 1) > 1e-10 % Should be 1 if DC is correct - % warning('DC poorly accounted for'); - % end - - % Temporary debug statements - % if ~isreal(filteredIMG) - % warning('ISET:complexphotons','Complex photons: %.0f', wave(ii)); - % end - - % Sometimes we had annoying complex values left after this filtering. - % We got rid of it by an abs() operator. It should never be there. - % But we think it arises because of rounding error. We haven't seen - % this in years, however. - % figure(1); imagesc(abs(filteredIMG)); colormap(gray(64)) + % Multiply the transformed otf and image. No fftshifts needed here + % because everything has DC at (1,1). % - % oi = oiSet(oi,'photons',filteredIMG,wave(ii)); + % Then invert. This leaves the image correct because that is what + % ifft2 does when its input has DC at (1,1), which it does here. + filteredIMGRaw = ifft2(otf .* imgFFT); + + % Get real and imaginary parts. The image should be real up to + % numerical issues. We save the largest deviation over + % wavelengths, and check outside the loop below. + filteredIMGReal = real(filteredIMGRaw); + filteredIMGImag = imag(filteredIMGRaw); + imagFraction = max(abs(filteredIMGImag(:)))/max(abs(filteredIMGReal(:))); + if ( imagFraction > imagFractionTol ) + nWlImagTooBig = nWlImagTooBig + 1; + if (imagFraction > maxImagFraction) + maxImagFraction = imagFraction; + end + end + + % The complex values should never be there. But we think it arises + % because of rounding error and or asymmetry in the OTF as a + % result of the interpolation. Take the absolute value to + % eliminate small imaginary terms + filteredIMG = abs(filteredIMGRaw); + + % figure(1); imagesc(abs(filteredIMG)); colormap(gray(64)) p(:,:,ii) = filteredIMG; end -% Put all the photons in at once. +% Check that the imaginary part was not too big +if (nWlImagTooBig > 0) + if ( max(abs(filteredIMGImag(:)))/max(abs(filteredIMGReal(:))) > imagFractionTol ) + error('OpticsOTF: Imaginary exceeds tolerance relative to real at %d wavelengths, max fraction %0.1g\n',nWlImagTooBig,maxImagFraction); + end +end + +%% Put all the photons into the oi at once. oi = oiSet(oi,'photons',p); end diff --git a/opticalimage/optics/opticsPSF.m b/opticalimage/optics/opticsPSF.m index 1777a124..8a65a651 100644 --- a/opticalimage/optics/opticsPSF.m +++ b/opticalimage/optics/opticsPSF.m @@ -9,6 +9,9 @@ % scene % % Optional key/val +% aperture +% wvf +% padvalue % % Return % oi @@ -86,8 +89,8 @@ % oi = oiApplyPSF(oi); % % Copyright ImagEval Consultants, LLC, 2003. - -% + +% Input handling if ieNotDefined('oi'), error('Optical image required.'); end if ieNotDefined('aperture'), aperture = []; end if ieNotDefined('wvf'), wvf = []; end @@ -127,7 +130,7 @@ flength = oiGet(oi,'focal length',unit); fnumber = oiGet(oi,'f number'); -% WVF is square. Use the arger of the two sizes +% WVF is square. Use the larger of the two sizes oiSize = max(oiGet(oi,'size')); if isempty(wvf) @@ -197,18 +200,48 @@ % Deal with non square scenes if oiWidth ~= oiHeight - sz = double(abs(oiWidth - oiHeight)/2); + sz = round(double(abs(oiWidth - oiHeight)/2)); if oiWidth < oiHeight photons = padarray(p(:,:,ww),[0,sz],0,'both'); - photons = ImageConvFrequencyDomain(photons,PSF{ww}, 2); + % photons = ImageConvFrequencyDomain(photons,PSF{ww}, 2); + photons = fftshift(ifft2(fft2(photons) .* fft2(PSF{ww}))); p(:,:,ww) = photons(:,sz+1:sz+oiWidth); else photons = padarray(p(:,:,ww),[sz,0],0,'both'); - photons = ImageConvFrequencyDomain(photons,PSF{ww}, 2); + % photons = ImageConvFrequencyDomain(photons,PSF{ww}, 2); + photons = fftshift(ifft2(fft2(photons) .* fft2(PSF{ww}))); p(:,:,ww) = photons(sz+1:sz+oiHeight,:); end else - p(:,:,ww) = ImageConvFrequencyDomain(p(:,:,ww), PSF{ww}, 2 ); + % BW: Debugging as per DHB. This line breaks the padding. + % It seems the convolution is not circular. Currently + % debugging in v_ibioRDT_wvfPadPSF.m + + % tmp = conv2(p(:,:,ww),PSF{ww},'same'); + + % The ImageConvFrequencyDomain method almost always worked. + % But for the slanted bar scene, for some reason, it had a + % roll off at the edge towards zero that should not have been + % there. We tried various tests to see why, but none worked. + % The method has parameters in how it calls fft2() that nearly + % always work but for some reason fail us in the slanted edge + % case. (See v_icam_wvfPadPSF). So we now do this step in + % the compute the same way that it is done in opticsOTF. + + % In this case, we need an fftshift that is not needed in the + % opticsOTF case. Perhaps that is because we store the OTF in + % a different format there and here we simply take fft2(PSF). + % + % That may be the reason why there is a 1 pixel shift in the + % result for odd (but not even) size images. See + % v_icam_wvfPadPSF.m. Let's try to eliminate + % + % Deprecated because it pads and causes the roll off sometimes + % p(:,:,ww) = ImageConvFrequencyDomain(p(:,:,ww), PSF{ww}, 2 ); + + % Designed to match the opticsOTF values + p(:,:,ww) = ifft2( fft2(p(:,:,ww)) .* fft2(ifftshift(PSF{ww})) ); + end % otf requires a single wavelength % otf(:,:,ww) = wvfGet(wvf,'otf',wavelist(ww)); diff --git a/opticalimage/optics/opticsSICompute.m b/opticalimage/optics/opticsSICompute.m index a3136fcd..2d4a5575 100644 --- a/opticalimage/optics/opticsSICompute.m +++ b/opticalimage/optics/opticsSICompute.m @@ -1,30 +1,39 @@ function oi = opticsSICompute(scene,oiwvf,aperture,varargin) -%Calculate OI irradiance using a custom shift-invariant PSF +% Calculate OI irradiance using a shift-invariant PSF % % oi = opticsSICompute(scene,oiwvf,varargin) % -% The shift invariant transform (OTF) is stored in the optics structure in -% the optics.data.OTF slot. The representation includes the spatial -% frequencies in the x and y dimensions (OTF.fx, OTF.fy) which are -% represented in cycles/mm. The value of the OTF, which can be complex, is -% stored in OTF.OTF. -% +% The shift invariant transform is calculated from the optics +% structure. At present, we compute the PSF from a stored wavefront +% function, on the fly, using the sampling density appropriate for the +% scene. This is done in the opticsPSF() function. +% +% (See below for another option, opticsOTF().) +% % This routine simply manages the order of events for converting the scene % radiance to sensor irradiance. The events are: % % * Converting scene radiance to image irradiance % * Applying the off-axis (e.g., cos4th) fall off -% * The OTF is applied (opticsOTF) +% * The OTF is applied +% (opticsPSF or opticsOTF, according to the optics name field) % * A final blur for the anti-aliasing filter is applied % * The illuminance is calculated and stored % -% See also: opticsRayTrace, oiCompute, opticsOTF +% Note: +% +% Historically, we pre-computed an OTF and stored it in the optics +% structure in the optics.data.OTF slot. This contines the values +% +% OTF.OTF, OTF.fx, OTF.fy (where frequency is cycles/mm) % -% Example -% scene = vcGetObject('scene'); -% oi = vcGetObject('oi'); +% We interpolate the stored OTF to match the sampling density in the +% scene. This is the opticsOTF() function. To use this path rather +% than the opticsPSF, set the name field of the optics structure to +% 'opticsotf'. For an example, see v_icam_oiPad % -% Copyright ImagEval Consultants, LLC, 2005 +% See also: +% opticsRayTrace, oiCompute, opticsPSF, opticsOTF %% if ieNotDefined('scene'), error('Scene required.'); end diff --git a/opticalimage/wavefront/wvf2optics.m b/opticalimage/wavefront/wvf2optics.m index 10ccba37..e288eda4 100644 --- a/opticalimage/wavefront/wvf2optics.m +++ b/opticalimage/wavefront/wvf2optics.m @@ -81,6 +81,7 @@ % The OTF has DC in the center. thisOTF = wvfGet(wvf,'otf',wave(ww)); + % [~,~,psfCheck] = OtfToPsf([],[],thisOTF); % ieNewGraphWin; mesh(X,Y,abs(thisOTF)); if (all(f == fx)) @@ -96,7 +97,8 @@ % this point, the data have (0,0) in the center. Thus we use ifftshift % to the wvf centered format. Using fftshift() can invert this % reorganization of the data. - otf(:, :, ww) = ifftshift(est); + %otf(:, :, ww) = ifftshift(est); + otf(:, :, ww) = est; end %{ diff --git a/opticalimage/wavefront/wvfComputePupilFunction.m b/opticalimage/wavefront/wvfComputePupilFunction.m index 8053460c..0321786b 100644 --- a/opticalimage/wavefront/wvfComputePupilFunction.m +++ b/opticalimage/wavefront/wvfComputePupilFunction.m @@ -360,7 +360,8 @@ % Get Zernike coefficients % Need to make sure the c vector is long enough to contain defocus - % term, because we handle that specially. But why? + % term, because we handle that specially so we can add in LCA with + % there actually being something there. % % This wastes a little time when we just compute diffraction, but % that is the least of our worries. diff --git a/opticalimage/wavefront/wvfCreate.m b/opticalimage/wavefront/wvfCreate.m index 2d92d306..207f3fb5 100644 --- a/opticalimage/wavefront/wvfCreate.m +++ b/opticalimage/wavefront/wvfCreate.m @@ -93,7 +93,6 @@ p.addParameter('measuredwl', 550, @isscalar); p.addParameter('measuredopticalaxis', 0, @isscalar); p.addParameter('measuredobserveraccommodation', 0, @isscalar); -p.addParameter('measuredobserverfocuscorrection', 0, @isscalar); % Spatial sampling parameters p.addParameter('sampleintervaldomain', 'psf', @ischar); @@ -105,7 +104,7 @@ p.addParameter('calcwavelengths', 550, @isnumeric); p.addParameter('calcopticalaxis', 0, @isscalar); p.addParameter('calcobserveraccommodation', 0), @isscalar; -p.addParameter('calcobserverfocuscorrection', 0, @isscalar); +%p.addParameter('calcobserverfocuscorrection', 0, @isscalar); % Retinal parameters % Set for consistency with 300 um historical. When we adjust one or the @@ -150,8 +149,6 @@ wvf = wvfSet(wvf, 'measured optical axis', p.Results.measuredopticalaxis); wvf = wvfSet(wvf, 'measured observer accommodation', ... p.Results.measuredobserveraccommodation); -wvf = wvfSet(wvf, 'measured observer focus correction', ... - p.Results.measuredobserverfocuscorrection); % Spatial sampling parameters wvf = wvfSet(wvf, 'sample interval domain', ... @@ -165,8 +162,10 @@ wvf = wvfSet(wvf, 'calc optical axis', p.Results.calcopticalaxis); wvf = wvfSet(wvf, 'calc observer accommodation', ... p.Results.calcobserveraccommodation); -wvf = wvfSet(wvf, 'calc observer focus correction', ... - p.Results.calcobserverfocuscorrection); + +% BW thinks DHB made this throw an error. So removed. +% wvf = wvfSet(wvf, 'calc observer focus correction', ... +% p.Results.calcobserverfocuscorrection); % Conversion between degrees of visual angle and mm % This also sets the focal length for consistency. diff --git a/opticalimage/wavefront/wvfGet.m b/opticalimage/wavefront/wvfGet.m index 9ca41b20..ac33c52f 100644 --- a/opticalimage/wavefront/wvfGet.m +++ b/opticalimage/wavefront/wvfGet.m @@ -122,9 +122,6 @@ % 'calc observer accommodation' % - Observer accommodation at calculation % time (diopters) -% 'calc observer focus correction' -% - Focus correction added optically for -% observer at calculation time (diopters) % 'calc wavelengths' - Wavelengths to calculate over (nm, *) % 'calc cone psf info' - Structure with cone sensitivities and % weighting spectrum for computing the @@ -166,10 +163,6 @@ % 'measured observer accommodation' % - Observer accommodation at aberration % measurement time (diopters) -% 'measured observer focus correction' -% - Focus correction added optically for -% observer at measurement time (diopters) -% % % Need to be implemented/checked/documented % +'distanceperpix' - @@ -995,8 +988,9 @@ % otf = fftshift(fft2(ifftshift(psf))); % % [~,~,val1] = PsfToOtf([],[],psf); % Removed PTB function - - val = fftshift(fft2(ifftshift(psf))); % Key line from PTB + val = fftshift(fft2(ifftshift(psf))); % Key line from PTB function + val = ifftshift(val); + %[~,~,psfCheck] = OtfToPsf([],[],val); % From PTB: We used to zero out small imaginary values. % This, however, can cause numerical problems much worse than diff --git a/opticalimage/wavefront/wvfPrint.m b/opticalimage/wavefront/wvfPrint.m index f671eb4e..65d06027 100644 --- a/opticalimage/wavefront/wvfPrint.m +++ b/opticalimage/wavefront/wvfPrint.m @@ -70,8 +70,6 @@ fprintf('\tOptical axis (deg): %g\n', wvfGet(wvf, 'calc optical axis')); fprintf('\tObserver accommodation (diopters): %g\n', ... wvfGet(wvf, 'calc observer accommodation')); -fprintf('\tObserver focus correction (diopters): %g\n', ... - wvfGet(wvf, 'calc observer focus correction')); fprintf('\tWavelengths: ') val = wvfGet(wvf, 'calc wavelengths'); for i = 1:length(val) diff --git a/opticalimage/wavefront/wvfSet.m b/opticalimage/wavefront/wvfSet.m index ba6e9aa7..26ea469f 100644 --- a/opticalimage/wavefront/wvfSet.m +++ b/opticalimage/wavefront/wvfSet.m @@ -194,6 +194,7 @@ % change the point spread location, not quality, as measured in % wavefront aberrations. We use the "j" single-index scheme of OSA % standards + case {'zcoeffs', 'zcoeff', 'zcoef'} % wvfSet(wvf, 'zcoeffs', val, jIndex); % jIndex is optional, and can be a vector of j values or a string @@ -416,6 +417,7 @@ case {'calcobserverfocuscorrection', 'defocusdiopters'} % Specify optical correction added to observer focus at the % calculation time + error('This value is no longer used, so setting it will not lead to good things.') wvf.calcObserverFocusCorrectionDiopters = val; wvf.PUPILFUNCTION_STALE = true; @@ -521,6 +523,9 @@ 'measuredobserverfocuscorrectiondiopters'} % Focus correction added optically for observer at the measurement % time (diopters) + error('This value is no longer used, so setting it will not lead to good things.') + % Also, there is a typo below since this case is setting the wrong + % field. Not fixing because that field is going way. wvf.measObserverAcommodationDiopters = val; wvf.PUPILFUNCTION_STALE = true; diff --git a/pbrtEXRTest.m b/pbrtEXRTest.m new file mode 100644 index 00000000..0041ae2b --- /dev/null +++ b/pbrtEXRTest.m @@ -0,0 +1,17 @@ +% Convert a PBRT exr into a scene. Preparing for sceneFromFile + +%% +chdir('/Volumes/TOSHIBA EXT/pbrt-v4-scenes-renderings'); +fname = 'bistro_boulangerie.exr'; +fname = 'sanmiguel/sanmiguel-in-tree.exr'; + +img = exrread(fname); +imtool(img); + +scene = piEXR2ISET(fname); +depthImage = piReadEXR(fname, 'data type','depth'); +scene = sceneSet(scene,'depth map',depthImage); +sceneWindow(scene); + +% This should work, but doesn't. +% depthmap = piEXR2ISET(fname,'label','depth'); \ No newline at end of file diff --git a/scene/sceneCreate.m b/scene/sceneCreate.m index 68b36572..bcbc9a3a 100644 --- a/scene/sceneCreate.m +++ b/scene/sceneCreate.m @@ -26,12 +26,13 @@ % % MACBETH COLOR AND LUMINANCE CHART % -% The default, scene = sceneCreate, is a Macbeth color checker illuminated -% by a D65 light source with a mean luminance of 100 cd/m2. The scene is -% described only a small number of spatial 64x96 (row,col). This can be -% changed using the patchSize argument (default - 16 pixels). The -% wavelength 400:10:700 samples, making it efficient to use for experiments. +% The default, scene = sceneCreate, is a Macbeth color checker +% illuminated by a D65 light source with a mean luminance of 100 +% cd/m2. The scene is described only a small number of spatial +% 64x96 (row,col). This can be changed using the patchSize argument % +% scene = sceneCreate('macbethEE_IR',[patchSize=16],[wave=400:10:700]); +% % Here are some options % {'macbeth d65'} - Macbeth D65 image. % {'macbeth d50'} - D50 illuminant @@ -41,7 +42,6 @@ % {'macbeth EE_IR'} - Equal energy extends out to the IR % {L star} - Vertical bars spaced in equal L* steps % -% % Use sceneAdjustIlluminant() to change the scene SPD. % % REFLECTANCE CHART @@ -50,7 +50,7 @@ % % NARROWBAND COLOR PATCHES % wave = [600, 610]; sz = 64; -% scene = sceneCreate('uniform monochromatic',wave,sz); +% scene = sceneCreate('uniform monochromatic',sz,wave); % % SPATIAL TEST PATTERNS: % @@ -1423,15 +1423,15 @@ % Make the image imSize = round(imSize/2); [X,Y] = meshgrid(-imSize:imSize,-imSize:imSize); -img = zeros(size(X)); +% img = zeros(size(X)); % y = barSlope*x defines the line. We find all the Y values that are % above the line -list = (Y > barSlope*X ); +% list = (Y > barSlope*X ); % We assume target is perfectly reflective (white), so the illuminant is % the equal energy illuminant; that is, the SPD is all due to the % illuminant -img( list ) = 1; +% img( list ) = 1; % Prevent dynamic range problem with ieCompressData img = ieClip(img,1e-6,1); diff --git a/scene/sceneFromFile.m b/scene/sceneFromFile.m index df6604b7..00f448b8 100644 --- a/scene/sceneFromFile.m +++ b/scene/sceneFromFile.m @@ -12,7 +12,7 @@ % may also be % * RGB data, rather than the file name % * A file that contains a scene structure -% imageType: 'spectral', 'rgb' or 'monochrome' +% imageType: 'spectral', 'rgb', 'exr', or 'monochrome' % When 'rgb', the imageData might be RGB format. 'spectral' % includes both multispectral and hyperspectral. % meanLuminance: If a value is sent in, set scene to this meanLuminance. @@ -141,6 +141,26 @@ imType = ieParamFormat(imType); switch lower(imType) + case {'exr'} + % Read an EXR file into a scene. + % scene = sceneFromFile(exrFile,'exr'); + % In the future we might read additional metadata, but not + % available. + if ~exist(inputData,'file') + error('Cannot find file %s\n',inputData); + else + [~,~,e] = fileparts(inputData); + if ~isequal(e,'.exr') + error('EXR file required. Extension is %s\n',e); + end + end + + scene = piEXR2ISET(inputData); + dMap = piReadEXR(inputData, 'data type','depth'); + scene = sceneSet(scene,'depth map',dMap); + scene = sceneSet(scene, 'filename', inputData); + return; + case {'monochrome','rgb'} % 'unispectral' % init display structure if notDefined('dispCal') diff --git a/scene/sceneGet.m b/scene/sceneGet.m index 70522db2..e63eb77e 100644 --- a/scene/sceneGet.m +++ b/scene/sceneGet.m @@ -90,7 +90,7 @@ % 'distance per degree'* - sample spacing per deg of visual angle % 'degrees per distance' - degrees per unit distance, e.g., sceneGet(scene,'degPerDist','micron') % 'degrees per sample' -% 'spatial support' - spatial locations of points e.g., sceneGet(oi,'spatialsupport','microns') +% 'spatial support' - spatial locations of points e.g., sceneGet(scene,'spatialsupport','microns') % 'h angular resolution' - height degrees per pixel % 'w angular resolution' - width degrees per pixel % 'angular resolution' - (height, width) degrees per pixel diff --git a/scripts/optics/flare/s_opticsFlare2.m b/scripts/optics/flare/s_opticsFlare2.m index 23881cee..a223ae20 100644 --- a/scripts/optics/flare/s_opticsFlare2.m +++ b/scripts/optics/flare/s_opticsFlare2.m @@ -18,13 +18,13 @@ % Now create some flare based on the aperture, dust and scratches. % There are many parameters for this function, including dot mean, line % mean, dot sd, line sd, line opacity. They are returned in params -nsides = 3; +nsides = 6; apertureFunc = wvfAperture(wvf,'nsides',nsides,... 'dot mean',20, 'dot sd',3, 'dot opacity',0.5, ... 'line mean',20, 'line sd', 2, 'line opacity',0.5); -wvf = wvfPupilFunction(wvf,'amplitude',apertureFunc); -wvf = wvfCompute(wvf,'force',false); % force as false is important +wvf = wvfPupilFunction(wvf,'aperture function',apertureFunc); +wvf = wvfCompute(wvf); wvfPlot(wvf,'psf','unit','um','wave',550,'plot range',20,'airy disk',true); %{ @@ -51,7 +51,7 @@ set(gca,'xlim',[-20 20],'ylim',[-20 20]); %% There is a lot of similarity in the PSF, but the spatial scale is not the same - +%{ [oiApply, pMask, psf] = piFlareApply(scenePoint,'num sides aperture',nsides, ... 'focal length',wvfGet(wvf,'focal length','m'), ... 'fnumber',wvfGet(wvf,'fnumber')); @@ -64,6 +64,15 @@ oiApply = oiSet(oiApply,'name','flare'); oiWindow(oiApply); oiSet(oiApply,'gamma',0.5); drawnow; +%% +oi = oiCompute(oiApply,sceneHDR); +oi = oiCrop(oi,'border'); +oi = oiSet(oi,'name','flare'); +oiWindow(oi); +oiSet(oi,'render flag','hdr'); +oiSet(oi,'gamma',1); drawnow; + +%} %% HDR Test scene. Green repeating circles @@ -77,22 +86,14 @@ oiSet(oi,'render flag','hdr'); oiSet(oi,'gamma',1); drawnow; -%% -oi = oiCompute(oiApply,sceneHDR); -oi = oiCrop(oi,'border'); -oi = oiSet(oi,'name','flare'); -oiWindow(oi); -oiSet(oi,'render flag','hdr'); -oiSet(oi,'gamma',1); drawnow; - %% Change the number of sides nsides = 5; [apertureFunc, params] = wvfAperture(wvf,'nsides',nsides,... 'dot mean',20, 'dot sd',3, 'dot opacity',0.5, ... 'line mean',20, 'line sd', 2, 'line opacity',0.5); -wvf = wvfPupilFunction(wvf,'amplitude',apertureFunc); -wvf = wvfComputePSF(wvf,'force',false); % force as false is important +wvf = wvfPupilFunction(wvf,'aperture function',apertureFunc); +wvf = wvfComputePSF(wvf); wvfPlot(wvf,'psf','unit','um','wave',550,'plot range',20,'airy disk',true); scenePoint = sceneSet(scenePoint,'fov',1); @@ -118,8 +119,8 @@ 'dot mean',20, 'dot sd',3, 'dot opacity',0.5, ... 'line mean',20, 'line sd', 2, 'line opacity',0.5); -wvf = wvfPupilFunction(wvf,'amplitude',apertureFunc); -wvf = wvfComputePSF(wvf,'force',false); % force as false is important +wvf = wvfPupilFunction(wvf,'aperture function',apertureFunc); +wvf = wvfComputePSF(wvf); % force as false is important wvfPlot(wvf,'psf','unit','um','wave',550,'plot range',20,'airy disk',true); oi = oiCompute(wvf,sceneHDR); @@ -128,6 +129,4 @@ oiSet(oi,'render flag','hdr'); oiSet(oi,'gamma',1); drawnow; -%% Now attend to longitudinal chromatic aberration - %% END \ No newline at end of file diff --git a/scripts/optics/flare/s_opticsHDR.m b/scripts/optics/flare/s_opticsHDR.m index 09d8bc08..f91b74a0 100644 --- a/scripts/optics/flare/s_opticsHDR.m +++ b/scripts/optics/flare/s_opticsHDR.m @@ -1,4 +1,9 @@ %% Calculate PSF/OTF using wvf that matches the OI +% +% Deprecated +% We now do a version of the potential solution below using +% opticsPSF. +% % Interpolation in isetcam matches the resolution (um/sample) of the default % PSF/OTF with the OI resolution. However, this process introduces minor % artifacts in the PSF, such as horizontal and vertical spiky lines, @@ -6,60 +11,54 @@ % While generally not problematic, these artifacts could be noticeable in % HDR scenes, particularly in night settings. % -% A potential solution is to generate a high-resolution OTF in real-time, -% but this approach is computationally intensive for large scenes. -% As a temporary workaround, we precalculate the OTF at the OI's resolution -% and configure the OI accordingly. -% This method allows oiCompute to bypass the interpolation step. +% A potential solution is to generate a high-resolution OTF in +% real-time, but this approach is computationally intensive for large +% scenes. As a temporary workaround, we precalculate the OTF at the +% OI's resolution and configure the OI accordingly. This method allows +% oiCompute to bypass the interpolation step. % % Zhenyi, 2023 -% Not running. Maybe delete or debug -% -% return; - %% ieInit; ieSessionSet('init clear',true); close all; -%% +%% Comopare DL and WVF calculations for different fNumbers + s_size = 1000; flengthM = 4e-3; fnumber = 8; - pupilMM = (flengthM*1e3)/fnumber; scene = sceneCreateHDR(s_size,17,1); - scene = sceneAdjustLuminance(scene,'peak',1e5); - scene = sceneSet(scene,'fov',10); scene = sceneSet(scene,'distance', 1); index = 1; fig = figure;set(fig, 'AutoResizeChildren', 'off'); for fnumber = 3:5:13 - % DL + % Compute with the DL model oi = oiCreate('diffraction limited'); oi = oiSet(oi,'optics focallength',flengthM); oi = oiSet(oi,'optics fnumber',fnumber); + % oi needs information from scene to figure out the proper resolution. - oi = oiCompute(oi, scene); - oi = oiCrop(oi,'border'); + oi = oiCompute(oi, scene,'crop',true); - % SI + % Compute with the SI model. opticspsf path aperture = []; - oi = oiSet(oi,'optics model','shift invariant'); - oi_wvf = oiCompute(oi,scene,'aperture',aperture); + oi = oiSet(oi,'optics model','shift invariant'); + oi_wvf = oiCompute(oi,scene,'aperture',aperture,'crop',true); oi_wvf = oiSet(oi_wvf, 'name','flare'); - oi_wvf = oiCrop(oi_wvf,'border'); - % + + % Show some images if exist('piRootPath.m','file') % If iset3d exist, use piRadiance2RGB - ip = piRadiance2RGB(oi,'etime',1); - rgb = ipGet(ip,'srgb'); - ip_wvf = piRadiance2RGB(oi_wvf,'etime',1); + ip = piRadiance2RGB(oi,'etime',1); + rgb = ipGet(ip,'srgb'); + ip_wvf = piRadiance2RGB(oi_wvf,'etime',1); rgb_wvf = ipGet(ip_wvf,'srgb'); subplot(3,3,index);imshow(rgb);index = index+1;title(sprintf('DL-Fnumber:%d\n',fnumber)); @@ -67,10 +66,12 @@ subplot(3,3, index);imagesc(abs(rgb(:,:,2)-rgb_wvf(:,:,2)));colormap jet; colorbar; index = index+1;title('difference'); end + % Compare the two computational paths. Check is for each of the fnumbers. assert(mean2(oi_wvf.data.photons(:,:,15))/mean2(oi.data.photons(:,:,15))-1 < 0.0001); end -%% Change the shape of the aperture +%% Change the shape of the aperture and produce flare + if exist('piRootPath.m','file') % Compare with this: https://en.wikipedia.org/wiki/File:Comparison_aperture_diffraction_spikes.svg nsides_list = [0, 4, 5, 6]; @@ -84,13 +85,26 @@ 'dot mean',0, 'dot sd',0, 'dot opacity',0.5,'dot radius',5,... 'line mean',0, 'line sd', 0, 'line opacity',0.5,'linewidth',2); - oi_wvf = oiCompute(oi,scene,'aperture',aperture); + oi_wvf = oiCompute(oi,scene,'aperture',aperture,'crop',true); - oi_wvf = oiSet(oi_wvf, 'name','flare'); - oi_wvf = oiCrop(oi_wvf,'border'); + oi_wvf = oiSet(oi_wvf, 'name','flare'); ip_wvf = piRadiance2RGB(oi_wvf,'etime',1); rgb_wvf = ipGet(ip_wvf,'srgb'); subplot(1, 4, ii);imshow(rgb_wvf);title(sprintf('Number of blades: %d\n',nsides)); end -end \ No newline at end of file +end + +%% +oi = oiCreate('wvf'); +oi = oiSet(oi,'optics name','opticsotf'); +oi = oiCompute(oi,scene,'crop',true,'aperture',aperture); +oiWindow(oi); + +oi = oiSet(oi,'optics name','opticspsf'); +oi = oiCompute(oi,scene,'crop',true,'aperture',aperture); +oiWindow(oi); + + + +%% END \ No newline at end of file diff --git a/scripts/sensor/s_sensorIMX490.m b/scripts/sensor/s_sensorIMX490.m new file mode 100644 index 00000000..2d5df239 --- /dev/null +++ b/scripts/sensor/s_sensorIMX490.m @@ -0,0 +1,106 @@ +%% s_sensorComparison +% +% Run the same OI through multiple sensors, just for comparison. BW +% used this script to create the sensor images for the Ford +% presentation. +% +% Combines two test charts: Macbeth and a sweep frequency. +% +% + +%% +ieInit + +%% Make a combined iimage + +% MCC side +patchSize = 96; +sceneC = sceneCreate('macbethD65',patchSize); +sz = sceneGet(sceneC,'size'); +sceneC = sceneSet(sceneC,'resize',round([sz(1), sz(2)/2])); +sceneWindow(sceneC); + +% Sweep frequency side +sceneS = sceneCreate('sweep frequency',sz(1),sz(1)/16); +sceneWindow(sceneS); + +% Combine +scene = sceneCombine(sceneC,sceneS,'direction','horizontal'); + +hfov = 20; +scene = sceneSet(scene,'fov',hfov); +vfov = sceneGet(scene,'v fov'); +sceneWindow(scene); + +%% +oi = oiCreate; +oi = oiSet(oi,'optics fnumber',1.2); +oi = oiCompute(oi,scene); +oiWindow(oi); + +%% Now run through some sensors + +% sensorList = {'bayer-rggb','imx363','rgbw','mt9v024','mt9v024','imec44','cyym','monochrome'}; + +% Used for Ford talk +sensorList = {'imx363','mt9v024','cyym'}; +%sensorList = {'imx363'}; + + +for ii=1:numel(sensorList) + if isequal(sensorList{ii},'mt9v024') + sensor = sensorCreate(sensorList{ii},[],'rccc'); + else + sensor = sensorCreate(sensorList{ii}); + end + + sensor = sensorSet(sensor,'pixel size',1.5e-6); + sensor = sensorSet(sensor,'hfov',hfov,oi); + sensor = sensorSet(sensor,'vfov',vfov); + sensor = sensorSet(sensor,'auto exposure',true); + sensor = sensorCompute(sensor,oi); + sensorWindow(sensor); + + switch sensorList{ii} + case 'imx363' + ip = ipCreate('imx363 RGB',sensor); + ip = ipCompute(ip,sensor); + ipWindow(ip); + case 'mt9v024' + ip = ipCreate('mt9v024 RCCC', sensor); + % NOTE: ipCreate doesn't seem to take its cue from the + % sensor that it is rccc, so we do it manually + ip = ipSet(ip,'demosaic method','analog rccc'); + ip = ipCompute(ip,sensor); + ipWindow(ip); + end + + sensor = sensorSet(sensor,'pixel size constant fill factor',6e-6); + sensor = sensorSet(sensor,'hfov',hfov,oi); + sensor = sensorSet(sensor,'vfov',vfov); + sensor = sensorSet(sensor,'auto exposure',true); + sensor = sensorCompute(sensor,oi); + + switch sensorList{ii} + case 'imx363' + ip = ipCreate('imx363 RGB',sensor); + ip = ipCompute(ip,sensor); + ipWindow(ip); + case 'mt9v024' + ip = ipCreate('mt9v024 RCCC', sensor); + % NOTE: ipCreate doesn't seem to take its cue from the + % sensor that it is rccc, so we do it manually + ip = ipSet(ip,'demosaic method','analog rccc'); + ip = ipCompute(ip,sensor); + ipWindow(ip); + end + + % [~,img] = sensorShowCFA(sensor,[],[3 3]); + sensorWindow(sensor); + +end + +%% + + + diff --git a/sensor/imx490/imx490Compute.m b/sensor/imx490/imx490Compute.m new file mode 100644 index 00000000..230e18af --- /dev/null +++ b/sensor/imx490/imx490Compute.m @@ -0,0 +1,281 @@ +function [imx490Large, metadata] = imx490Compute(oi,varargin) +% Create Sony imx490 sensor response +% +% Synopsis +% [sensorCombined, metadata] = imx490Compute(oi,varargin) +% +% Brief +% The Sony imx490 has a large and small photodiode in each pixel. +% They are each measured twice, once with high gain and once with +% low gain. It produces 4 different values from each pixel. +% +% IP seems to be a speciality of LUCID +% https://thinklucid.com/product/triton-5-mp-imx490/ - Specifications +% https://thinklucid.com/product/triton-5-mp-imx490/ - EMVA Data +% https://thinklucid.com/tech-briefs/sony-imx490-hdr-sensor-and-flicker-mitigation/ +% From the LUCID web-site: +% +% The IMX490 achieves high dynamic range using two sub-pixels for each +% pixel location which vary in sensitivity and saturation capacity. Each +% sub-pixel is readout with high and low conversion gains giving four +% 12-bit channels for each pixel. These four channels are combined into +% single linear 24-bit HDR value. The EMVA1288 standard is not directly +% applicable to the 24-bit combined data, but is applicable to the +% individual channels. Results were measured on the individual channels +% and scaled when appropriate to reflect how the channels are combined +% into a 24-bit HDR image +% +% Integration times: min of 86.128 μs to max of 5 s +% +% Input +% oi - optical image +% +% Optional key/val +% gain - Four gain values. Default: 1,4,1,4 +% noiseflag - Default 2 +% exp time - Default 1/60 s +% method - Method for combining the four values to 1 for each pixel +% Options: average, bestsnr, ... +% +% Output +% sensorCombined - Constructed combination +% metadata - Cell array of the four captures, and other metadata about +% the selection algorithm. +% +% Description +% Combine them into a single sensor struct, and return that. If +% requested, return the four individual sensors as a sensor cell +% array. +% +% See also +% s_sensorIMX490Test +% sensorCreate('imx490-large') ... + +%% Read parameters +varargin= ieParamFormat(varargin); + +p = inputParser; +p.addRequired('oi',@isstruct); +p.addParameter('gain',[1 4 1 4],@isvector); +p.addParameter('noiseflag',2,@isnumeric); +p.addParameter('exptime',1/60,@isnumeric); +p.addParameter('method','average',@(x)(ismember(ieParamFormat(x),{'average','bestsnr'}))); +p.parse(oi,varargin{:}); + +gains = p.Results.gain; +expTime = p.Results.exptime; +method = p.Results.method; + +%% Set up the two sensor sizes + +% These differ in the fill factor. +imx490Small = sensorCreate('imx490-small'); +imx490Large = sensorCreate('imx490-large'); + +imx490Small = sensorSet(imx490Small,'noise flag',p.Results.noiseflag); +imx490Large = sensorSet(imx490Large,'noise flag',p.Results.noiseflag); + +imx490Small = sensorSet(imx490Small,'exp time',expTime); +imx490Large = sensorSet(imx490Large,'exp time',expTime); + +% The oi should have 3 um sampling resolution. This will match the sensor +% fov to the oi. +assert(max(abs(oiGet(oi,'spatial resolution','um') - sensorGet(imx490Large,'pixel size','um'))) < 1e-4) + +oiSize = oiGet(oi,'size'); +imx490Large = sensorSet(imx490Large,'size',oiSize); +imx490Small = sensorSet(imx490Small,'size',oiSize); + +%{ +% This is how we were matching. +imx490Large = sensorSet(imx490Large,'fov',oiGet(oi,'fov'),oi); +imx490Small = sensorSet(imx490Small,'fov',oiGet(oi,'fov'),oi); +%} + +%% The user specifies gains as multiplicative factor + +% ISET uses gain as a divisive factor. +isetgains = 1 ./ gains; + +%% Compute the 4 different responses, prior to combination +imx490Large1 = sensorSet(imx490Large,'analog gain', isetgains(1)); +imx490Large1 = sensorSet(imx490Large1,'name',sprintf('large-%1dx',gains(1))); +imx490Large1 = sensorCompute(imx490Large1,oi); +sensorArray{1} = imx490Large1; + +imx490Large2 = sensorSet(imx490Large,'analog gain', isetgains(2)); +imx490Large2 = sensorSet(imx490Large2,'name',sprintf('large-%1dx',gains(2))); +imx490Large2 = sensorCompute(imx490Large2,oi); +sensorArray{2} = imx490Large2; + +imx490Small1 = sensorSet(imx490Small,'analog gain', isetgains(3)); +imx490Small1 = sensorSet(imx490Small1,'name',sprintf('small-%1dx',gains(3))); +imx490Small1 = sensorCompute(imx490Small1,oi); +sensorArray{3} = imx490Small1; + +imx490Small2 = sensorSet(imx490Small,'analog gain', isetgains(4)); +imx490Small2 = sensorSet(imx490Small2,'name',sprintf('small-%1dx',gains(4))); +imx490Small2 = sensorCompute(imx490Small2,oi); +sensorArray{4} = imx490Small2; + +% Retain the photodetector area and related parameters we might use to +% make an input referred calculation. +pdArea1 = sensorGet(imx490Large,'pixel pd area'); +pdArea2 = sensorGet(imx490Small,'pixel pd area'); + +% Conversion gain +cgLarge = sensorGet(imx490Large1,'pixel conversion gain'); +cgSmall = sensorGet(imx490Small1,'pixel conversion gain'); + + +%% Different algorithms for combining the 4 values. +switch ieParamFormat(method) + case 'average' + % Combine the input referred volts, exclusing saturated values. + v1 = sensorGet(imx490Large1,'volts'); + v2 = sensorGet(imx490Large2,'volts'); + v3 = sensorGet(imx490Small1,'volts'); + v4 = sensorGet(imx490Small2,'volts'); + + % Voltage swing + vSwingL = sensorGet(imx490Large,'pixel voltage swing'); + vSwingS = sensorGet(imx490Small,'pixel voltage swing'); + idx1 = (v1 < vSwingL); idx2 = (v2 < vSwingL); + idx3 = (v3 < vSwingS); idx4 = (v4 < vSwingS); + + % How to average + N = idx1 + idx2 + idx3 + idx4; + + % These are the input referred estimates. When all the + % voltages are saturated the image is rendered as black. + % volts per pixel -> (volts/m^2) * gain / (volts/electron) + % -> electrons/m2 + % Maybe we want electrons / um^2 which would be 1e-12 + in1 = sensorGet(imx490Large1,'electrons per area','um'); + in2 = sensorGet(imx490Large2,'electrons per area','um'); + in3 = sensorGet(imx490Small1,'electrons per area','um'); + in4 = sensorGet(imx490Small2,'electrons per area','um'); + + % The estimated input, which should be equal for a uniform + % field + % mean(in1(:)),mean(in2(:)),mean(in3(:)),mean(in4(:)) + + % Set the voltage to the mean of the input referred estimates. + volts = (in1 + in2 + in3 + in4) ./ N; + volts = sensorGet(imx490Large,'pixel voltage swing') * ieScale(volts,1); + imx490Large = sensorSet(imx490Large,'volts',volts); + + case 'bestsnr' + % Choose the pixel with the most electrons and thus best SNR. + e1 = sensorGet(imx490Large1,'electrons'); + e2 = sensorGet(imx490Large2,'electrons'); + e3 = sensorGet(imx490Small1,'electrons'); + e4 = sensorGet(imx490Small2,'electrons'); + + % Find pixels with electrons below well capacity. Set the + % saturated levels to zero so they do not appear as max + wcL = sensorGet(imx490Large,'pixel well capacity'); + wcS = sensorGet(imx490Small,'pixel well capacity'); + idx1 = (e1 < wcL); idx2 = (e2 < wcL); + idx3 = (e3 < wcS); idx4 = (e4 < wcS); + e1(~idx1) = 0; e2(~idx2) = 0; e3(~idx3) = 0; e4(~idx4) = 0; + + % Find the pixel with the most non-saturated electrons + [val,bestPixel] = max([e1(:), e2(:), e3(:), e4(:)],[],2); + val = reshape(val,size(e1)); + bestPixel = reshape(bestPixel,size(e1)); + %{ + ieNewGraphWin; imagesc(val); + cm = [1 0 0; 1 0.5 0; 0 0 1; 0 0.5 1; 1 1 1]; + ieNewGraphWin; colormap(cm); image(bestPixel); + %} + otherwise + error('Unknown method %s\n',method); +end + +%{ +% Choose the pixel that is (a) in range, and (b) has the most electrons. +% That value has the best SNR. +% +% Try to think about the gain. Which of the two gains should we use, given +% that we pick the pixel with more electrons that is not saturated? +%} + +% Convert and save digital values +nbits = sensorGet(imx490Large,'nbits'); +dv = 2^nbits*ieScale(volts,1); +imx490Large = sensorSet(imx490Large,'dv',dv); + +imx490Large = sensorSet(imx490Large,'name','Combined'); + +metadata.sensorArray = sensorArray; +metadata.method = method; + +end +%{ +% And now so that the fov of the two pixel sizes match by the perfect +% factor of 3. +rowcol = sensorGet(imx490Small,'size'); +rowcol = ceil(rowcol/3)*3; +imx490Small = sensorSet(imx490Small,'size',rowcol); +imx490Large = sensorSet(imx490Large,'size',rowcol/3); +%} + +%% Subsample the small pixel sensor +% +% When the sensor is RG/GB and the pixel size ratio is exactly 3:1, we +% can subsample the small pixels to match the color and spatial scale +% perfectly. + +%{ +% This finds the small pixels that correspond to the large pixel +% position. The effective pixel size becomes the size of the large +% pixel. The whole routine only works for the 3:1 ratio. +pixelSize = sensorGet(imx490Large,'pixel size'); +sSize = sensorGet(imx490Small,'size'); + +resample1 = 1:3:sSize(1); +resample2 = 1:3:sSize(2); +sSize = sensorGet(imx490Large,'size'); + +resample1 = resample1(1:sSize(1)); +resample2 = resample2(1:sSize(2)); + + +v3 = sensorGet(imx490Small1,'volts'); +v3 = v3(resample1,resample2); +imx490Small1 = sensorSet(imx490Small1, 'volts', v3); + +dv3 = sensorGet(imx490Small1,'dv'); +dv3 = dv3(resample1,resample2); +imx490Small1 = sensorSet(imx490Small1, 'dv', dv3); +imx490Small1 = sensorSet(imx490Small1,'pixel size same fill factor',pixelSize); + +% Small, high gain +v4 = sensorGet(imx490Small2,'volts'); +v4 = v4(resample1,resample2); +imx490Small2 = sensorSet(imx490Small2, 'volts', v4); + +dv4 = sensorGet(imx490Small2,'dv'); +dv4 = dv4(resample1,resample2); +imx490Small2 = sensorSet(imx490Small2, 'dv', dv4); +imx490Small2 = sensorSet(imx490Small2,'pixel size same fill factor',pixelSize); +%} + +%% Combine data from different sensors + +% The first idea is to input refer the voltages and then combine them. +% To input refer, we multiple by the ratio of their aperture (3^2) and +% divide by their gain. +% +% To properly input refer, we need to account for the conversion gain. Or +% we need to use the 'electrons' +%{ +e1 = sensorGet(imx490Large1,'electrons')*gains(1); +e2 = sensorGet(imx490Large2,'electrons')*gains(2); +e3 = sensorGet(imx490Small1,'electrons')*(pdArea1/pdArea2)*gains(3); +e4 = sensorGet(imx490Small2,'electrons')*(pdArea1/pdArea2)*gains(4); + +% For a uniform scene input, these should all be the same +% mean2(v1), mean2(v2), mean2(v3), mean2(v4) +%} \ No newline at end of file diff --git a/sensor/pixel/pixelIdeal.m b/sensor/pixel/pixelIdeal.m index b4bf85ac..025aaa8d 100644 --- a/sensor/pixel/pixelIdeal.m +++ b/sensor/pixel/pixelIdeal.m @@ -30,4 +30,4 @@ pixel = pixelSet(pixel,'darkVoltage',0); % No dark noise pixel = pixelSet(pixel,'voltage swing',1e6); % 1,000,000 volts -return \ No newline at end of file +end \ No newline at end of file diff --git a/sensor/s_sensorIMX490Test.m b/sensor/s_sensorIMX490Test.m new file mode 100644 index 00000000..4099f136 --- /dev/null +++ b/sensor/s_sensorIMX490Test.m @@ -0,0 +1,119 @@ +%% Illustrate the imx490 +% +% + +%% In this case the volts are 4x but the electrons are equal +% +% As it should be, IMHO. + +scene = sceneCreate('uniform',256); +oi = oiCreate; +oi = oiCompute(oi,scene); % oiWindow(oi); +oi = oiCrop(oi,'border'); +oi = oiSpatialResample(oi,3e-6); +[sensor,metadata] = imx490Compute(oi,'method','average','exptime',1/10); + +% For the HDR car scene use exptime of 0.1 sec +sArray = metadata.sensorArray; + +sensorWindow(sensor); + +% Note: The ratio of electron capture makes sense. The conversion gain, +% however, differs so when we plot w.r.t volts the ratios are not as you +% might naively expect. The dv values follow volts. +sensorWindow(sArray{1}); +sensorWindow(sArray{2}); +sensorWindow(sArray{3}); +sensorWindow(sArray{4}); + +%% Various checks. +e1 = sensorGet(sArray{1},'electrons'); +e2 = sensorGet(sArray{2},'electrons'); +ieNewGraphWin; plot(e1(:),e2(:),'.'); +identityLine; grid on; + +v1 = sensorGet(sArray{1},'volts'); +v2 = sensorGet(sArray{2},'volts'); +ieNewGraphWin; plot(v1(:),v2(:),'.'); +identityLine; grid on; + +% e3 is 1/9th the area, so 1/9th the electrons of e1 +e3 = sensorGet(sArray{3},'electrons'); +ieNewGraphWin; plot(e1(:),e3(:),'.'); +identityLine; grid on; + +dv1 = sensorGet(sArray{1},'dv'); +dv2 = sensorGet(sArray{2},'dv'); +ieNewGraphWin; plot(dv1(:),dv2(:),'.'); +identityLine; grid on; + + +%% Now try with a complex image + +load('HDR-02-Brian','scene'); +oi = oiCreate; +oi = oiCompute(oi,scene); % oiWindow(oi); +oi = oiCrop(oi,'border'); +oi = oiSpatialResample(oi,3,'um'); % oiWindow(oi); +oi2 = oiCompute(oi,scene,'crop',true,'pixel size',3e-6); % oiWindow(oi2); +oi2 = oiSpatialResample(oi2,3,'um'); % oiWindow(oi); + +[sensor,metadata] = imx490Compute(oi,'method','average','exptime',1/10); +sArray = metadata.sensorArray; + +% Note that the electrons match up to voltage saturation +e1 = sensorGet(sArray{1},'electrons'); +e2 = sensorGet(sArray{2},'electrons'); +ieNewGraphWin; plot(e1(:),e2(:),'.'); +identityLine; grid on; + +v1 = sensorGet(sArray{1},'volts'); +v2 = sensorGet(sArray{2},'volts'); +ieNewGraphWin; plot(v1(:),v2(:),'.'); +identityLine; grid on; + +sensorWindow(sArray{1}); +sensorWindow(sArray{2}); + +%% Make an ideal form of the image + +scene = sceneCreate('uniform',256); +oi = oiCreate; +oi = oiCompute(oi,scene); % oiWindow(oi); +oi = oiCrop(oi,'border'); +oi = oiSpatialResample(oi, 3,'um'); +oiGet(oi,'size') + +% Calculate the imx490 sensor +sensor = imx490Compute(oi,'method','average','exptime',1/10); + +% Could just do an oiGet(oi,'xyz') +% +% Or we can create a matched, ideal X,Y,Z sensors that can calculate +% the XYZ values at each pixel. +sensorI = sensorCreateIdeal('match xyz',sensor); +sensorI = sensorCompute(sensorI,oi); +sensorWindow(sensorI(3)); +sensorGet(sensorI(1),'pixel fill factor') + +% The sensor data and the oi data have the same vector length. Apart from +% maybe a pixel at one edge or the other, they should be aligned +% + +%% +[sensor,metadata] = imx490Compute(oi,'method','best snr','exptime',1/3); + +%% +ip = ipCreate; +ip = ipCompute(ip,sensor); +ipWindow(ip); + +%% For the uniform case, these should be about 4x +uData1 = sensorPlot(sArray{1},'electrons hline',[55 1]); +sensorPlot(sArray{2},'electrons hline',[55 1]); + +% These are OK. A factor of 4. +uData2 = sensorPlot(sArray{3},'electrons hline',[150 1]); +sensorPlot(sArray{4},'electrons hline',[150 1]); + +%% END diff --git a/sensor/sensorCreate.m b/sensor/sensorCreate.m index b8a0edb9..7cd8cb9a 100644 --- a/sensor/sensorCreate.m +++ b/sensor/sensorCreate.m @@ -27,8 +27,12 @@ % sensorCreate('MT9V024',[],{'rgb','mono','rccc'}) % {'ar0132at'} - An ON sensor used in automotive applications % sensorCreate('ar0132at',[],{'rgb','rgbw','rccc'}) +% % {'imx363'} - A widely used Sony digital camera sensor (used % in the Google Pixel 4a) +% {'imx490-large'} - The Sony imx490 sensor large +% {'imx490-small'} - The Sony imx490 sensor large +% % {'nikond100'} - An older model Nikon (D100) % % Other types @@ -289,6 +293,49 @@ % sensorCreate('imx363',[],'row col',[300 400]); sensor = sensorIMX363('row col',[600 800], varargin{:}); + case {'imx490-large'} + % Variant of the IMX363 that contains a big pixel and a small + % pixel. These pixel parameters were determined by Zhenyi as + % part of ISETAuto. Each one of these pixels, the large and + % small + % + % From the Lucid site. + % Integration times + % min of 86.128 μs to max of 5 s + % + % Original value from ZL - 5.5845e-06. But Lucid site says 3 um. + % I adjusted to 3um per the site, but shrunk the fill factor. The + % small pixel fits into the space and 0.85/.15 + sensor = sensorIMX363('row col',[600 800], ... + 'pixel size',3e-06, ... + 'dn2volts',0.25e-3, ... + 'digitalblacklevel', 64, ... + 'digitalwhitelevel', 4096, ... + 'wellcapacity', 120000, ... + 'fillfactor',0.9, ... + 'isospeed',55, ... + 'read noise',1,... + 'quantization','12 bit',... + 'name','imx490-large'); + + case {'imx490-small'} + % Variant of the IMX363 that contains a big pixel and a small + % pixel. These pixel parameters were determined by Zhenyi as + % part of ISETAuto. Each one of these pixels, the large and + % small + + sensor = sensorIMX363('row col',[600 800], ... + 'pixel size',3e-06, ... + 'dn2volts',0.25e-3, ... + 'digitalblacklevel', 64, ... + 'digitalwhitelevel', 4096, ... + 'wellcapacity', 60000, ... + 'fillfactor',0.1, ... + 'isospeed',55, ... + 'read noise',1,... + 'quantization','12 bit',... + 'name','imx490-small'); + case 'nikond100' % Old model. I increased the spatial samples before % returning the sensor. @@ -414,7 +461,7 @@ sensor = sensorSet(sensor,'autoexposure',1); sensor = sensorSet(sensor,'CDS',0); -if ~isequal(sensorType, 'imx363') +if ~(isequal(sensorType, 'imx363') || isequal(sensorType,'imx490')) % Put in a default infrared filter. All ones. sensor = sensorSet(sensor,'irfilter',ones(sensorGet(sensor,'nwave'),1)); end @@ -615,8 +662,8 @@ % Set the default values here p.addParameter('rowcol',[3024 4032],@isvector); -p.addParameter('pixelsize',1.4 *1e-6,@isnumeric); -p.addParameter('analoggain',1.4 *1e-6,@isnumeric); +p.addParameter('pixelsize',1.4e-6,@isnumeric); +p.addParameter('analoggain',1.4e-6,@isnumeric); p.addParameter('isospeed',270,@isnumeric); p.addParameter('isounitygain', 55, @isnumeric); p.addParameter('quantization','10 bit',@(x)(ismember(x,{'12 bit','10 bit','8 bit','analog'}))); @@ -631,9 +678,9 @@ p.addParameter('exposuretime',1/60,@isnumeric); p.addParameter('wave',390:10:710,@isnumeric); p.addParameter('readnoise',5,@isnumeric); -p.addParameter('qefilename', fullfile(isetRootPath,'data','sensor','qe_IMX363_public.mat'), @isfile); -p.addParameter('irfilename', fullfile(isetRootPath,'data','sensor','ircf_public.mat'), @isfile); -p.addParameter('nbits', 10, @isnumeric); +p.addParameter('qefilename', fullfile(isetRootPath,'data','sensor','colorfilters','auto','SONY','cf_imx490.mat'), @isfile); +p.addParameter('irfilename', fullfile(isetRootPath,'data','sensor','irfilters','ircf_public.mat'), @isfile); +p.addParameter('name','imx363',@ischar); % Parse the varargin to get the parameters p.parse(varargin{:}); @@ -657,7 +704,6 @@ readnoise = p.Results.readnoise; % Read noise in electrons qefilename = p.Results.qefilename; % QE curve file name irfilename = p.Results.irfilename; % IR cut filter file name -nbits = p.Results.nbits; % needs to be set for bracketing to work %% Initialize the sensor object @@ -673,7 +719,6 @@ sensor = sensorSet(sensor,'pixel voltage swing', voltageSwing); sensor = sensorSet(sensor,'pixel dark voltage', darkvoltage) ; sensor = sensorSet(sensor,'pixel read noise electrons', readnoise); - % Gain and offset - Principles % % In ISETCam we use this formula to incorporate channel gain and offset @@ -713,7 +758,6 @@ sensor = sensorSet(sensor,'exp time',exposuretime); sensor = sensorSet(sensor,'quantization method', quantization); sensor = sensorSet(sensor,'wave', wavelengths); -sensor = sensorSet(sensor,'quantization method','10 bit'); % Adjust the pixel fill factor sensor = pixelCenterFillPD(sensor,fillfactor); @@ -722,7 +766,7 @@ [data,filterNames] = ieReadColorFilter(wavelengths,qefilename); sensor = sensorSet(sensor,'filter spectra',data); sensor = sensorSet(sensor,'filter names',filterNames); -sensor = sensorSet(sensor,'Name','IMX363'); +sensor = sensorSet(sensor,'Name',p.Results.name); % import IR cut filter sensor = sensorReadFilter('infrared', sensor, irfilename); diff --git a/sensor/sensorCreateIdeal.m b/sensor/sensorCreateIdeal.m index c934d036..2c3899d9 100644 --- a/sensor/sensorCreateIdeal.m +++ b/sensor/sensorCreateIdeal.m @@ -1,56 +1,80 @@ function sensor = sensorCreateIdeal(idealType,sensorExample,varargin) %Create an ideal image sensor array based on the sensor example % +% Synopsis % sensor = sensorCreateIdeal(idealType,[sensorExample],varargin) % -% Create an ideal image sensor array. The array contains ideal pixels (no -% read noise, dark voltage, 100% fill-factor). Such an array can be used as -% a comparison to a typical sensor. We also use this to simply calculate -% the number of photons incident at each pixel. +% Brief +% Create an ideal image sensor array. The key step is how we set the +% noise flag at the end (to -1). The other step is we can swap in +% the XYZ filters for the existing filters to match a matched 'XYZ' +% sensor. % -% For the ideal pixel, the spectral quantum efficiency of the detector is -% 100% at all wavelengths. -% -% For a general sensor, you may wish to control its noise properties using -% the 'noise flag' option in sensorSet(). That option is a little -% different from this because it does not force a 100% fill factor. +% It is possible to get the XYZ from the oi directly, without even +% going through the sensor! (oiGet(oi,'xyz')). % % Inputs: -% idealType: The type of ideal sensor array we create is determined by -% this parameter and the sensorExample. The options are +% idealType: The sensor array we create is determined by this parameter +% and the sensorExample. The fill factor or other geometry parameters +% are not adjusted. % % * match: - Same as sensor example, but noise turned off % * match xyz: - As above, but also replace color filters with % XYZQuanta filters % -% These do not involve a match, they are just sensor arrays with no DSNU, -% PRNU, and the noiseFlag set to 1. The oi, sensor parameters are the -% defaults +% These do not involve a match, they are just sensor arrays with +% no DSNU, PRNU, and the noiseFlag set to -1. The oi, sensor +% parameters are the defaults % % * monochrome - 100% filter transmission (Clear), default sensor % * XYZ - XYZQuanta filters, default sensor % -% When the string is chosen, the default parameters for the sensor and -% pixel are created and then these are made ideal. +% When the string is chosen, the default parameters for the sensor and +% pixel are created and then these are made ideal. % -% See also: cameraCreate - Calls this one for certain tests. +% sensorExample - A sensor that is matched % -% Example -% pixSize = 3*1e-6; -% sensor = sensorCreateIdeal('monochrome',[],pixSize); % 3 micron, ideal monochrome +% Output +% sensorI - An array of monochrome sensors, one for each of the color +% channels of the idealType sensor. % -% Or, 3 micron, ideal, stockman, regular grid -% pixSize = 2*1e-6; -% sensor = sensorCreateIdeal('human',pixSize); +% Description: +% The array contains ideal pixels (no read noise, dark voltage, 100% +% fill-factor). Such an array can be used as a comparison to a typical +% sensor. We also use this to simply calculate the number of photons +% incident at each pixel. % -% Match a single mosaicked sensor with an array of monochrome sensors that -% have the noise terms set to zero. -% sensor = sensorCreate; -% sensor = sensorCreateIdeal('match',sensor); +% For the ideal pixel, the spectral quantum efficiency of the detector is +% 100% at all wavelengths. % +% For a general sensor, you may wish to control its noise properties using +% the 'noise flag' option in sensorSet(). That option is a little +% different from this because it does not force a 100% fill factor. % -% Copyright ImagEval Consultants, LLC, 2005 +% See also: +% cameraCreate - Calls this one for certain tests. + +% Examples: +%{ +pixSize = 3*1e-6; +sensorI = sensorCreateIdeal('monochrome',[],pixSize); % 3 micron, ideal monochrome +%} +%{ +% Or, 2 micron, ideal, XYZ +sensor = sensorCreate; +sensor = sensorSet(sensor,'pixel size same fill factor',2e-6); +% sensor = sensorSet(sensor,'pixel fill factor',1); +sensorI = sensorCreateIdeal('match xyz',sensor); +%} +%{ +% Match a single mosaicked sensor with an array of monochrome sensors that +% have the noise terms set to zero. +sensor = sensorCreate; +sensorI = sensorCreateIdeal('match',sensor); +%} +% TODO +% Replace idealPixel in this function with the external pixelIdeal %% Read arguments if ieNotDefined('idealType'), idealType = 'monochrome'; end @@ -64,14 +88,14 @@ % original and the noise flag is set to photon noise only. % We also set the exposure time to be equal to either the first, or % the default (if auto exposure is set). - + if ieNotDefined('sensorExample'), error('Example needed'); end - + % Determine key parameters for the example N = sensorGet(sensorExample,'nfilters'); colorFilters = sensorGet(sensorExample,'color filters'); colorFilterNames = sensorGet(sensorExample,'filter names'); - + % We can't have autoexposure for this case, because each array % would have its own time. We want the integration times to be % equal for the separate channels. @@ -81,14 +105,14 @@ else expTime = sensorGet(sensorExample,'exp time'); end - + cfilters = sensorGet(sensorExample,'color filters'); sensorExample = sensorSet(sensorExample,'color filters',cfilters(:,1)); sensorExample = sensorSet(sensorExample,'pattern',1); sensorExample = sensorSet(sensorExample,'filter name',{'dummy'}); - + for ii=N:-1:1, sensor(ii) = sensorExample; end - + % Edit the sensor parameters: monochrome, named, zero noise, and % noise flag set for photons only. It would be OK to just use the % noise flag, except for the fact that we need demosaic @@ -96,19 +120,23 @@ sensor(ii) = sensorSet(sensor(ii),'name',sprintf('mono-%s',colorFilterNames{ii})); %#ok<*AGROW> sensor(ii) = sensorSet(sensor(ii),'filter spectra',colorFilters(:,ii)); sensor(ii) = sensorSet(sensor(ii),'filter names',{colorFilterNames{ii}}); - + % Rather than set noise to zero, we set noise flag to 1. - sensor(ii) = sensorSet(sensor(ii),'noise flag',1); - + % sensor(ii) = sensorSet(sensor(ii),'noise flag',1); + % Equal exposure times. sensor(ii) = sensorSet(sensor(ii),'integration time',expTime); - end - + case 'matchxyz' + % Create a sensor array with CIE XYZ filters that matches the + % example. + if ieNotDefined('sensorExample'), error('Example needed'); end + + % Clean up the noise sensor = sensorCreateIdeal('match',sensorExample); - + % Replace current filters with XYZQuanta filters. % We scale the XYZQuanta to a peak of one. This means the volts % are not in units of candelas/m2. @@ -125,11 +153,11 @@ sensor(ii) = sensorSet(sensor(ii),'filter spectra',transmissivities(:,ii)); sensor(ii) = sensorSet(sensor(ii),'filter names',filterNames{ii}); end - + case 'monochrome' - % An ideal monochrome sensor. Photon noise in computation. Ideal - % pixel, with no significant noise characteristics. DSNU and PRNU - % are not calculated because noise flag is set to 1. + % Create an ideal monochrome sensor. Photon noise in computation. + % Ideal pixel, with no significant noise characteristics. DSNU and + % PRNU are not calculated because noise flag is set to 1. sensor = sensorCreate('monochrome'); sensor = sensorSet(sensor,'name','Monochrome'); if ~isempty(varargin) @@ -148,11 +176,11 @@ pixel = sensorGet(sensor,'pixel'); sensor = sensorSet(sensor,'pixel',idealPixel(pixel,pixelSizeM)); - sensor = sensorSet(sensor,'noise flag',1); % Photon noise only - + case {'xyz'} - % Create an array of XYZ monochrome sensors. - + % Create an array of XYZ monochrome sensors that match the default + % in sensorCreate. + % Creating the last one this way forces 1 and 2 to be the same type % of structure. Not sure about a simpler way to do this sensor(3) = struct(sensorCreate('monochrome')); @@ -161,15 +189,17 @@ sensor(2) = sensor(3); end sensorNames = {'CIE-X-ideal','CIE-Y-ideal','CIE-Z-ideal'}; - + % CIE XYZ quanta fundamentals. pixel = sensorGet(sensor(1),'pixel'); if ieNotDefined('pixelSizeInMeters') disp('2.8 micron sensor created'); pixelSizeInMeters = 2.8e-6; end + + % No noise and fill factor 1 pixel = idealPixel(pixel,pixelSizeInMeters); - + % We scale the XYZ Quanta to a peak of one. This means the volts % are not in units of candelas/m2. % @@ -178,11 +208,11 @@ % scaling is not optional. fname = fullfile(isetRootPath,'data','human','XYZQuanta.mat'); wave = 400:10:700; - transmissivities = vcReadSpectra(fname, wave); %Load and interpolate filters + transmissivities = ieReadSpectra(fname, wave); %Load and interpolate filters transmissivities = transmissivities/max(transmissivities(:)); - + filterNames = {{'rX'}, {'gY'}, {'bZ'}}; %Names for color plots - + for ii=1:3 sensor(ii) = sensorSet(sensor(ii),'pixel',pixel); sensor(ii) = sensorSet(sensor(ii),'name',sensorNames{ii}); @@ -190,20 +220,32 @@ sensor(ii) = sensorSet(sensor(ii),'filter names',filterNames{ii}); sensor(ii) = sensorSet(sensor(ii),'integration time',1); end - + otherwise error('Unknown sensor type.'); end -return +% Turn off all noise for the ideal sensors. +for ii=1:numel(sensor) + % No photon noise, no electrical pixel noisem no sensor fixed + % pattern noise. The volts are always available and continuous, + % the dv field depends on the quantization method. + sensor(ii) = sensorSet(sensor(ii),'noise flag',-1); +end + +end + % Consider pixelCreate('ideal',pixelSizeInMeters); function pixel = idealPixel(pixel,pixelSizeInMeters) -% % Ideal (noise-free) pixel But, I think this subroutine should go away and % we should use pixelIdeal and sensorIdeal. +if numel(pixelSizeInMeters) == 1 + pixelSizeInMeters(2) = pixelSizeInMeters; +end + pixel = pixelSet(pixel,'readNoiseVolts',0); pixel = pixelSet(pixel,'darkVoltage',0); pixel = pixelSet(pixel,'width',pixelSizeInMeters(2)); @@ -214,4 +256,4 @@ pixel = pixelSet(pixel,'darkVoltage',0); pixel = pixelSet(pixel,'voltage swing',1e6); -return +end diff --git a/sensor/sensorCrop.m b/sensor/sensorCrop.m index 467bedf9..7b810831 100644 --- a/sensor/sensorCrop.m +++ b/sensor/sensorCrop.m @@ -130,4 +130,6 @@ sensor = sensorSet(sensor,'digital values',newDV); end +sensor = sensorSet(sensor,'metadata crop',rect); + end diff --git a/sensor/sensorGet.m b/sensor/sensorGet.m index cdde9271..dcc32a2d 100644 --- a/sensor/sensorGet.m +++ b/sensor/sensorGet.m @@ -76,6 +76,9 @@ % 'electrons' - Sensor output in electrons % A single color plane can be returned % sensorGet(sensor,'electrons',2); +% 'electrons per area' - Normalize by the pixel area. +% Default units is meter^2, but you can specify unit, um^2 +% sensorGet(sensor,'electrons per area','um') % 'chromaticity' - Sensor rg-chromaticity after Demosaicking (roiRect allowed) % 'dv or volts' - Return either dv if present, otherwise volts % 'roi locs' - Stored region of interest (roiLocs) @@ -443,19 +446,54 @@ val = sensorColorData(val,sensor,varargin{1}); end - case {'electron','electrons','photons'} + case {'electron','electrons'} % sensorGet(sensor,'electrons'); + % sensorGet(sensor,'electrons',[colorband]); % sensorGet(sensor,'electrons',2); - % This is also used for human case, where we call the data photons - % as in photon absorptions. + % + % Removed 'photons' from the list March 25, 2024 (BW) pixel = sensorGet(sensor,'pixel'); - val = sensorGet(sensor,'volts')/pixelGet(pixel,'conversionGain'); - + + % The volts also have an analog gain and offset that must + % be discounted. Until March 25, 2024 we did not account + % for this. Mostly gain/offset was 1/0, and we didn't + % notice. Then with the imx490 gain manipulations, the bug + % was found. This was never a problem with computed + % voltage or dv. But if we estimated the electrons in a + % sensor with a nonunity gain or an offset, the estimated + % number of electrons at capture was off. + + % The sensor compute function: + % volts = (voltsRaw + ao)/ag; + % To invert + % voltsRaw = volts*ag - ao + ag = sensorGet(sensor,'analog gain'); + ao = sensorGet(sensor,'analog offset'); + cg = pixelGet(pixel,'conversionGain'); + val = (sensorGet(sensor,'volts')*ag - ao)/cg; + % Pull out a particular color plane - if ~isempty(varargin), val = sensorColorData(val,sensor,varargin{1}); end - % Electrons are ints - val = round(val); + if ~isempty(varargin) + val = sensorColorData(val,sensor,varargin{1}); + end + % Electrons are integers + val = round(val); + case {'electronsperarea'} + % sensorGet(sensor,'electrons per area','unit',channel) + % sensorGet(sensor,'electrons per area','m',2) + % Default is 'um' + units = 'm'; + if ~isempty(varargin), units = varargin{1}; end + + val = sensorGet(sensor,'electrons'); + pdArea = sensorGet(sensor,'pixel pd area'); + val = (val/pdArea)/(ieUnitScaleFactor(units)^2); + % Pull out a particular color plane + if length(varargin) > 1 + val = sensorColorData(val,sensor,varargin{2}); + end + case {'dvorvolts'} val = sensorGet(sensor,'dv'); if isempty(val) @@ -1138,23 +1176,6 @@ fov = sensorGet(sensor,'vfov'); val = fov/width; - % Computational flags - %{ - case {'sensorcompute','sensorcomputemethod'} - % Swap in a sensorCompute routine. If this is empty, then the - % standard vcamera\sensor\mySensorCompute routine will be used. - if checkfields(sensor,'sensorComputeMethod'), val = sensor.sensorComputeMethod; - else, val = 'mySensorCompute'; end - %} - %{ - case {'consistency','computationalconsistency'} - % If the consistency field is not present, assume false and set it - % false. This checks whether the parameters and the displayed - % image are consistent/updated. - if checkfields(sensor,'consistency'), val = sensor.consistency; - else, sensorSet(sensor,'consistency',0); val = 0; - end - %} case {'chartparameters'} % Struct of chart parameters if checkfields(sensor,'chartP'), val = sensor.chartP; end @@ -1197,7 +1218,15 @@ val = false; end - % Human cone case + % Metadata - more to be added. See sensorSet() + case 'metadatacrop' + val = sensor.metadata.crop; + case 'metadatascenename' + val = sensor.metadata.scenename; + case 'metadataopticsname' + val = sensor.metadata.opticsname; + + % Human cone case - Many of these should go way (BW) case {'human'} % Structure containing information about human cone case % Only applies when the name field has the string 'human' in it. diff --git a/sensor/sensorIMX363V2.m b/sensor/sensorIMX363V2.m new file mode 100644 index 00000000..a71c77ad --- /dev/null +++ b/sensor/sensorIMX363V2.m @@ -0,0 +1,170 @@ +function sensor = sensorIMX363V2(varargin) +% This is the modified code from Zhenyi in ISETAuto +% I tried to make the imx490 match this. +% +% Create the sensor structure for the IMX363 +% +% Synopsis +% sensor = sensorIMX363(varargin); +% +% Brief description +% Creates the default IMX363 sensor model +% +% Inputs +% N/A +% +% Optional Key/val pairs +% +% Return +% sensor - struct with the IMX363 model parameters +% +% Examples: ieExamplesPrint('sensorIMX363V2'); +% +% See also +% sensorCreate +% +% TODO: +% We need to make this one the one inside of sensorCreate +% Examples: +%{ + ieExamplesPrint('sensorIMX363V2'); +%} +%{ + % The defaults and some plots + sensor = sensorCreate('IMX363'); + sensorPlot(sensor,'spectral qe'); + sensorPlot(sensor,'cfa block'); + sensorPlot(sensor,'pixel snr'); +%} +%{ + % Adjust a parameter + sensor = sensorCreate('IMX363',[],'row col',[256 384]); + sensorPlot(sensor,'cfa full'); +%} + +%% Parse parameters + +% Building up the input parser will let you do more experiments with the +% sensor. + +% This removes spaces and lowers all the letters so you don't have to +% remember the syntax when you call the argument +varargin = ieParamFormat(varargin); + +% Start parsing +p = inputParser; + +% Set the default values here +p.addParameter('rowcol',[3024 4032],@isvector); +p.addParameter('pixelsize',1.4 *1e-6,@isnumeric); +p.addParameter('analoggain',1.4 *1e-6,@isnumeric); +p.addParameter('isospeed',270,@isnumeric); +p.addParameter('isounitygain', 55, @isnumeric); +p.addParameter('quantization','10 bit',@(x)(ismember(x,{'12 bit','10 bit','8 bit','analog'}))); +p.addParameter('dsnu',0,@isnumeric); % 0.0726 +p.addParameter('prnu',0.7,@isnumeric); +p.addParameter('fillfactor',1,@isnumeric); +p.addParameter('darkvoltage',0,@isnumeric); +p.addParameter('dn2volts',0.25 * 1e-3,@isnumeric); +p.addParameter('digitalblacklevel', 0, @isnumeric); +p.addParameter('digitalwhitelevel', 4096, @isnumeric); +p.addParameter('wellcapacity',6000,@isnumeric); +p.addParameter('exposuretime',1/60,@isnumeric); +p.addParameter('wave',390:10:710,@isnumeric); +p.addParameter('readnoise',1,@isnumeric); +p.addParameter('qefilename', fullfile(isetRootPath,'data','sensor','qe_IMX363_public.mat'), @isfile); +p.addParameter('irfilename', fullfile(isetRootPath,'data','sensor/irfilters','infrared.mat'), @isfile); +p.addParameter('nbits', 10, @isnumeric); + +% Parse the varargin to get the parameters +p.parse(varargin{:}); + +rows = p.Results.rowcol(1); % Number of row samples +cols = p.Results.rowcol(2); % Number of col samples +pixelsize = p.Results.pixelsize; % Meters +isoSpeed = p.Results.isospeed; % ISOSpeed, whatever that is +isoUnityGain = p.Results.isounitygain; % ISO speed equivalent to analog gain of 1x, for Pixel 4: ISO55 +quantization = p.Results.quantization; % quantization method - could be 'analog' or '10 bit' or others +wavelengths = p.Results.wave; % Wavelength samples (nm) +dsnu = p.Results.dsnu; % Dark signal nonuniformity +fillfactor = p.Results.fillfactor; % A fraction of the pixel area +darkvoltage = p.Results.darkvoltage; % Volts/sec +dn2volts = p.Results.dn2volts; % volt per DN +blacklevel = p.Results.digitalblacklevel; % black level offset in DN +whitelevel = p.Results.digitalwhitelevel; % white level in DN +wellcapacity = p.Results.wellcapacity; % Electrons +exposuretime = p.Results.exposuretime; % in seconds +prnu = p.Results.prnu; % Photoresponse nonuniformity +readnoise = p.Results.readnoise; % Read noise in electrons +qefilename = p.Results.qefilename; % QE curve file name +irfilename = p.Results.irfilename; % IR cut filter file name +nbits = p.Results.nbits; % needs to be set for bracketing to work + +%% Initialize the sensor object + +sensor = sensorCreate('bayer-rggb'); + +%% Pixel properties +voltageSwing = whitelevel * dn2volts; +conversiongain = voltageSwing/wellcapacity; % V/e- + +% set the pixel properties +sensor = sensorSet(sensor,'pixel size same fill factor',[pixelsize pixelsize]); +sensor = sensorSet(sensor,'pixel conversion gain', conversiongain); +sensor = sensorSet(sensor,'pixel voltage swing', voltageSwing); +sensor = sensorSet(sensor,'pixel dark voltage', darkvoltage) ; +sensor = sensorSet(sensor,'pixel read noise electrons', readnoise); + +% Gain and offset - Principles +% +% In ISETCam we use this formula to incorporate channel gain and offset +% +% (volts + offset)/gain +% +% Higher ISOspeed requires a bigger multiplier, so we use a formulat like +% this to convert speed to gain. We should probably make 55 a parameter of +% the system in the inputs, defaulting to 55. +analogGain = isoUnityGain/isoSpeed; % For Pixel 4, ISO55 = gain of 1 + +% A second goal is that the offset in digital counts is intended to be a +% fixed level, no matter what the gain might be. To achieve that we need +% to multiply the 64*one_lsb by the analogGain +% +analogOffset = (blacklevel * dn2volts) * analogGain; % sensor black level, in volts + +% The result is that the output volts are +% +% outputV = (inputV + analogOffset)/analogGain +% outputV = inputV*ISOSpeed/55 + analogOffset/analogGain +% outputV = inputV*ISOSpeed/55 + 64*dn2volts +% +% Since the ADC always operates linearly on the voltage, and the step size +% is one_lsb, the black level for the outputV is always 64. The gain on +% the input signal is (ISOSpeed/55) +% +% +%% Set sensor properties +%sensor = sensorSet(sensor,'auto exposure',true); +sensor = sensorSet(sensor,'rows',rows); +sensor = sensorSet(sensor,'cols',cols); +sensor = sensorSet(sensor,'dsnu level',dsnu); +sensor = sensorSet(sensor,'prnu level',prnu); +sensor = sensorSet(sensor,'analog Gain',analogGain); +sensor = sensorSet(sensor,'analog Offset',analogOffset); +sensor = sensorSet(sensor,'exp time',exposuretime); +sensor = sensorSet(sensor,'quantization method', quantization); +sensor = sensorSet(sensor,'wave', wavelengths); + +% Adjust the pixel fill factor +sensor = pixelCenterFillPD(sensor,fillfactor); + +% import QE curve +[data,filterNames] = ieReadColorFilter(wavelengths,qefilename); +sensor = sensorSet(sensor,'filter spectra',data); +sensor = sensorSet(sensor,'filter names',filterNames); +sensor = sensorSet(sensor,'Name','IMX363'); + +% import IR cut filter +sensor = sensorReadFilter('infrared', sensor, irfilename); + +end \ No newline at end of file diff --git a/sensor/sensorSet.m b/sensor/sensorSet.m index 03824c79..6f4c8d38 100644 --- a/sensor/sensorSet.m +++ b/sensor/sensorSet.m @@ -116,6 +116,7 @@ % -- TBD: Other recipe info % -- Like illumination % 'metadata lensname' +% 'metadata crop' -- Stored rect % % Private % 'editfilternames' @@ -664,7 +665,8 @@ sensor.metadata.scenename = val; case 'metadataopticsname' sensor.metadata.opticsname = val; - + case 'metadatacrop' + sensor.metadata.crop = val; otherwise error('Unknown parameter.'); end diff --git a/sensor/simulation/noise/noiseShot.m b/sensor/simulation/noise/noiseShot.m index e79f5690..784a30a4 100644 --- a/sensor/simulation/noise/noiseShot.m +++ b/sensor/simulation/noise/noiseShot.m @@ -71,7 +71,8 @@ % Convert the electron data into voltage signals conversionGain = sensorGet(sensor,'pixel conversion gain'); -% In volts +% In volts, needs both conversion Gain and Sensor analog gain +% noisyImage = conversionGain*round(electronImage + electronNoise); theNoise = conversionGain*electronNoise; diff --git a/sensor/simulation/sensorCompute.m b/sensor/simulation/sensorCompute.m index 00ca1780..42e4d15b 100644 --- a/sensor/simulation/sensorCompute.m +++ b/sensor/simulation/sensorCompute.m @@ -32,7 +32,7 @@ % % The conditions are: % -% noiseFlag | photon e-noises gain/offset clipping CDS +% noiseFlag | photon e-noises PRNU/DSNU clipping CDS % -2 | + 0 0 0 0 ('no pixel no system') % -1 | 0 0 0 0 0 ('no photon no pixel no system') % 0 | 0 0 + + + ('no photon no pixel') @@ -195,7 +195,7 @@ case 'log' % We need to keep the smallest value above zero. We also % want the read noise level to make sense with respect to - % the voltage swing. + % the voltage swing. Very little tested (BW). readNoise = sensorGet(sensor,'pixel read noise'); if readNoise == 0 warning('Invalid read noise for log response type. Using 2^16 of voltage swing'); @@ -223,7 +223,12 @@ % See sensorComputeNoise to run just this noise section when you have a % mean image and just want many noisy, clipped, quantized examples. noiseFlag = sensorGet(sensor,'noise flag'); - + + ag = sensorGet(sensor,'analogGain'); + ao = sensorGet(sensor,'analogOffset'); + sensor = sensorSet(sensor,'analog gain',1); + sensor = sensorSet(sensor,'analog offset',0); + % The noise flag rules for different integer values are described in % the header to this function. % @@ -236,6 +241,10 @@ if noiseFlag > 0 % if noiseFlag = 1, add photon noise, but not other noises % if noiseFlag = 2, add photon noise and other noises + + % At this point, the analog gain and offset have not yet + % been applied. So we over-ride the values that were sent + % in, and put them back after computing the noise. sensor = sensorAddNoise(sensor); end @@ -251,8 +260,6 @@ % Also, some people use gain as a multipler and some as a divider. % Sorry for that. Here you can see the formula. We divide by the % gain. - ag = sensorGet(sensor,'analogGain'); - ao = sensorGet(sensor,'analogOffset'); if ag ~=1 || ao ~= 0 if strcmp(responseType,'log') % We added a warning for the 'log' sensor type. Offset @@ -277,8 +284,14 @@ % volts = (volts + ao)/ag; sensor = sensorSet(sensor,'volts',volts); + + % Save the ag and ao. If we aren't in this section, we + % can just leave them as 1 and 0. + sensor = sensorSet(sensor,'analog gain',ag); + sensor = sensorSet(sensor,'analog offset',ao); end - + + %% Clipping % Applied for 0,1,2 diff --git a/tutorials/optics/t_wvfZernikeSet.m b/tutorials/optics/t_wvfZernikeSet.m index 92fb8d0b..05494f6e 100644 --- a/tutorials/optics/t_wvfZernikeSet.m +++ b/tutorials/optics/t_wvfZernikeSet.m @@ -29,7 +29,6 @@ % This method attaches the human lens to the optics [oi, wvf] = oiCreate('wvf human'); - wvfPlot(wvf,'psf','unit','um','wave',550,'plotrange',20); % PSF in micron scale oiPlot(oi,'psf'); @@ -42,16 +41,17 @@ oiWindow(oi); %% Change the defocus coefficient - +% +% Units are microns. wvf = wvfCreate('wave',sceneGet(scene,'wave')); -D = [0 1 2]; % Amount of defocus +D = [0 1 2]; for ii=1:length(D) wvf = wvfSet(wvf,'zcoeffs',D(ii),{'defocus'}); wvf = wvfCompute(wvf,'human lca',true); wvfPlot(wvf,'psf','unit','um','wave',550,'plotrange',40); % PSF in micron scale oi = wvf2oi(wvf,'human lens',true); oi = oiCompute(oi,scene); - oi = oiSet(oi,'name',sprintf('Human D %.1f',D(ii))); + oi = oiSet(oi,'name',sprintf('Human defocus D %.1f microns',D(ii))); oiWindow(oi); end @@ -65,7 +65,7 @@ wvf = wvfCompute(wvf,'human lca',true); wvfPlot(wvf,'psf','unit','um','wave',550,'plotrange',40); % PSF in micron scale oi = oiCompute(wvf,scene); - oi = oiSet(oi,'name',sprintf('Human D %.1f, A %.1f',0.5,A(ii))); + oi = oiSet(oi,'name',sprintf('Human D %.1f microns, A %.1f microns',0.5,A(ii))); oiWindow(oi); end diff --git a/tutorials/sensor/t_sensorInputRefer.m b/tutorials/sensor/t_sensorInputRefer.m index 4c1eef03..710dd522 100644 --- a/tutorials/sensor/t_sensorInputRefer.m +++ b/tutorials/sensor/t_sensorInputRefer.m @@ -65,14 +65,14 @@ sensor = sensorCompute(sensor,oi); -% Get the photons from the first pixel type -photons = sensorGet(sensor,'photons'); +% Get the electrons from the first pixel type +electrons = sensorGet(sensor,'electrons'); % In the middle of the image to avoid the edges -photons = getMiddleMatrix(photons,[40,40]); +electrons = getMiddleMatrix(electrons,[40,40]); % This is the Photon absorptions per exposure (which is 1 sec) -pRate = mean(photons(:)); +pRate = mean(electrons(:)); % ieAddObject(oi); ieAddObject(sensor); % oiWindow; sensorImageWindow; @@ -90,13 +90,13 @@ fprintf('Through the optics the illuminance is %e lux\n',oiGet(oi,'mean illuminance')); sensor = sensorCompute(sensor,oi); -photons = sensorGet(sensor,'photons',1); +electrons = sensorGet(sensor,'electrons',1); % Photon absorptions per exposure -fprintf('Computed mean photon rate %e\n',mean(photons(:))) +fprintf('Computed mean photon rate %e\n',mean(electrons(:))) % Show the distribution -% vcNewGraphWin; histogram(photons(:),50) +% vcNewGraphWin; histogram(electrons(:),50) c2e = sensorGet(sensor,'integration time')/ q; @@ -113,12 +113,12 @@ nSamp = round(max(2*sqrt(tRate)*50,1000)); val = poissrnd(tRate,nSamp); -xval = min(photons(:)):max(photons(:)); +xval = min(electrons(:)):max(electrons(:)); vcNewGraphWin; n = hist(val(:),xval); bar(xval,n/sum(n(:))); hold on; -[n,c] = hist(photons(:),xval); +[n,c] = hist(electrons(:),xval); n = n/sum(n(:)); lst = (n > 0); plot(c(lst),n(lst),'ro-','linewidth',2); diff --git a/utility/file/ieWebGet.m b/utility/file/ieWebGet.m index 69e6e3f9..ee93076b 100644 --- a/utility/file/ieWebGet.m +++ b/utility/file/ieWebGet.m @@ -30,6 +30,7 @@ % remove temp files: Remove downloaded temporary file (default: true) % unzip: : Unzip the local file (default: true) % verbose : Print a report to the command window +% downloaddir : Directory for download % % Output % localFile: Name of the local download file @@ -131,6 +132,7 @@ p.addParameter('localname','',@ischar); % Defaults to remote name p.addParameter('removetempfiles', true, @islogical); p.addParameter('verbose',true,@islogical); % Tell the user what happened +p.addParameter('downloaddir','',@ischar); p.parse(varargin{:}); @@ -155,12 +157,16 @@ % s = ieWebGet('resource type','pbrtv4','resource name','kitchen'); % ISET3d must be on your path. - % The download directory is ignored by git in ISET3d. - switch resourceType - case 'pbrtv3' - downloadDir = fullfile(piRootPath,'data','v3','web'); - case 'pbrtv4' - downloadDir = fullfile(piRootPath,'data','scenes','web'); + if ~isempty(p.Results.downloaddir) + % The user gave us a place to download to. + downloadDir = p.Results.downloaddir; + else + switch resourceType + case 'pbrtv3' + downloadDir = fullfile(piRootPath,'data','v3','web'); + case 'pbrtv4' + downloadDir = fullfile(piRootPath,'data','scenes','web'); + end end % This should never happen. The directory is part of ISET3d and is @@ -207,9 +213,14 @@ remoteFileName = strcat(resourceName, '.mat'); resourceURL = strcat(baseURL, remoteFileName); - downloadDir = fullfile(isetRootPath,'local','scenes', resourceType); - if ~isfolder(downloadDir), mkdir(downloadDir); end + if ~isempty(p.Results.downloaddir) + % The user gave us a place to download to. + downloadDir = p.Results.downloaddir; + else + downloadDir = fullfile(isetRootPath,'local','scenes', resourceType); + end + if ~isfolder(downloadDir), mkdir(downloadDir); end localFile = fullfile(downloadDir, remoteFileName); if askFirst diff --git a/utility/image/imageSlantedEdge.m b/utility/image/imageSlantedEdge.m index 3d3a24c6..002d2a62 100644 --- a/utility/image/imageSlantedEdge.m +++ b/utility/image/imageSlantedEdge.m @@ -1,17 +1,18 @@ function img = imageSlantedEdge(imSize,slope, darklevel) -% Make a slanted edge binary image +% Make a slanted edge image - always square and odd number of rows/cols % -% Syntax -% img = imageSlantedEdge(imSize,slope,darklevel); +% Brief +% This target is used for the ISO 12233 standard. By construction +% the image size is always returned as odd. The bright side is +% always 1. The dark level is a parameter. % -% Description -% Create a slanted edge image. Useful for resolution -% testing. +% Synopsis +% img = imageSlantedEdge(imSize,slope,darklevel); % % Inputs -% imSize - (row,col) (384,384) by default -% slope - slope of the edge (2.6 default) -% darklevel - The white level is 1. This sets the dark level (0 default) +% imSize - (row,col) (384,384) by default +% slope - slope of the edge (2.6 default) +% darklevel - Dark side (0 default). White side is always 1. % % Key/val pairs % N/A @@ -19,7 +20,10 @@ % Outputs % img: A slanted edge image with a slope at the edge. % -% JEF/BW 2019 +% Description +% The axes for an image start with (1,1) at the upper left. So a +% slope of 2.6 produces a line y = slope*x that becomes an image +% with the edge sloping downwards. % % See also % sceneCreate('slanted edge') @@ -45,7 +49,13 @@ if numel(imSize) == 1, imSize(2) = imSize; end %% Make the image -imSize = round(imSize/2); + +% We force the image size to be odd. + +% This is even +imSize = round(imSize/2); + +% This is always odd [X,Y] = meshgrid(-imSize(2):imSize(2),-imSize(1):imSize(1)); img = ones(size(X))*darklevel; diff --git a/utility/plots/plotRadiance.m b/utility/plots/plotRadiance.m index a1ae03f3..6e80e251 100644 --- a/utility/plots/plotRadiance.m +++ b/utility/plots/plotRadiance.m @@ -11,6 +11,8 @@ % Optional key/value pairs % title - % hdl - Use this hdl instead of ieNewGraphWin +% line width +% color % % Returns; % hdl - ieNewGraphWin handle @@ -24,12 +26,18 @@ % %% Parse + +varargin = ieParamFormat(varargin); + p = inputParser; p.addRequired('wavelength',@isvector); p.addRequired('radiance',@isnumeric); p.addParameter('title','Spectral radiance',@ischar); -p.addParameter('hdl',[],@(x)(isa(x,'matlab.ui.Figure'))) +p.addParameter('hdl',[],@(x)(isa(x,'matlab.ui.Figure'))); +p.addParameter('color','r'); +p.addParameter('linewidth',2,@isnumeric); + p.parse(wavelength,radiance,varargin{:}); strTitle = p.Results.title; @@ -47,9 +55,13 @@ % The dimension that matches wavelength is the right one nWave = length(wavelength); if nWave == size(radiance,1) - thisPlot = plot(wavelength(:),radiance,'LineWidth',2); + thisPlot = plot(wavelength(:),radiance,... + 'LineWidth',p.Results.linewidth, ... + 'Color',p.Results.color); elseif length(wavelength) == size(radiance,2) - thisPlot = plot(wavelength(:),radiance','LineWidth',2); + thisPlot = plot(wavelength(:),radiance',... + 'LineWidth',p.Results.linewidth, ... + 'Color',p.Results.color); end %% Label it diff --git a/utility/plots/sensorPlotLine.m b/utility/plots/sensorPlotLine.m index 76cc019c..4a30dd5d 100644 --- a/utility/plots/sensorPlotLine.m +++ b/utility/plots/sensorPlotLine.m @@ -51,7 +51,7 @@ %% -if ieNotDefined('sensor'), sensor = vcGetObject('sensor'); end +if ieNotDefined('sensor'), sensor = ieGetObject('sensor'); end if ieNotDefined('ori'), ori = 'h'; end if ieNotDefined('dataType'), dataType = 'electrons'; end if ieNotDefined('sORt'), sORt = 'space'; end diff --git a/utility/programming/iePTable.m b/utility/programming/iePTable.m index e43a2f6b..cd0dd595 100644 --- a/utility/programming/iePTable.m +++ b/utility/programming/iePTable.m @@ -191,6 +191,7 @@ end +%% function data = tableOI(oi, format) % iePTable(oiCreate); @@ -252,6 +253,7 @@ end +%% function data = tableOptics(optics,format) % iePTable(opticsCreate); @@ -330,6 +332,7 @@ end +%% function data = tablePixel(pixel,format) % iePTable(pixel); @@ -366,6 +369,7 @@ end end +%% function data = tableIP(ip,format) % iePtable(ipCreate) @@ -420,6 +424,7 @@ end end +%% function data = tableCamera(camera,format) % Creates separate tables for each of the main camera components @@ -435,6 +440,7 @@ end +%% function data = cellCombine(oData,sData) % Related to cellMerge, but works for matrices of cell arrays. @@ -449,12 +455,13 @@ cols = size(oData,2); for ii=1:cols - data(1:oRows,ii) = oData(:,ii); - data((oRows+1):end,ii) = sData(:,ii); + data((sRows+1):end,ii) = oData(:,ii); + data(1:sRows,ii) = sData(:,ii); end end +%% function mnuExportSelected(src,event) % allow user to export the current parameters paramFolder = fullfile(isetRootPath, "local", "parameters");