diff --git a/doc/user_manual/PostProcessors/Validation.tex b/doc/user_manual/PostProcessors/Validation.tex index 122a7fb40b..49f6db313d 100644 --- a/doc/user_manual/PostProcessors/Validation.tex +++ b/doc/user_manual/PostProcessors/Validation.tex @@ -23,6 +23,7 @@ \subsubsection{Validation PostProcessors} \hline \textbf{Validation Algorithm} & \textbf{DataObject} & \textbf{Available Metrics} \\ \hline Probabilistic & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet\end{tabular} & \begin{tabular}[c]{@{}c@{}}CDFAreaDifference\\ \\ PDFCommonArea\end{tabular} \\ \hline +Representativity & \begin{tabular}[c]{@{}c@{}}PointSet \\ HistorySet \\DataSet\end{tabular} & \begin{tabular}[c]{@{}c@{}}BiasFactor\end{tabular} \\ \hline PPDSS & HistorySet & DSS \\ \hline \end{tabular} \end{table} @@ -148,3 +149,95 @@ \subsubsection{Validation PostProcessors} ... \end{lstlisting} + + +\paragraph{Representativity} +The \textbf{Representativity} post-processor is one of three \textbf{Validation} post-processors, in fact there is a +post-processor interface that acts as a gate for applying these validation algorithms +(i.e., representativity, Physics-guided Convergence Mapping (PCM), and Dynamic System Scaling (DSS)). +The post-processor is in charge of deploying a common infrastructure for the user of \textbf{Validation} problems. +The usage of this post-processor is three fold. one, to quantitatively assess if a mock/prototype model/experiment +form a good representation of a target model. Two, if a set of experiments can represent a target model and can +claim a full coverage of the design space and scenarios, and three, if the available set of experiments are not +enough to declare coverage what are the remaining experiments required in order to achieve full coverage and +increase the representativity/bias factor. The representativity theory was first founded in the +Neutronics community \ref{} then shortly after, was transformed to the thermal hydraulics \ref{}. +So far several algorithms are implemented within this post-processor: +% +\ppType{Representativity}{Representativity} +% + +\begin{itemize} + \item \xmlNode{Features}, \xmlDesc{comma separated string, required field}, specifies the names of the features, which can be the measuables/observables of the mock model. Reader should be warned that this nomenclature is different than the Machine learning nomenclature. + + \item \xmlNode{Targets}, \xmlDesc{comma separated string, required field}, contains a comma separated list of + targets. These are the Figures of merit (FOMs) in the target model against which the mock model is being validated. + + \item \xmlNode{featureParameters}, \xmlDesc{comma separated string, required field}, specifies the names of the parameters/inputrs to the mock model. + + \item \xmlNode{targetParameters}, \xmlDesc{comma separated string, required field}, contains a comma separated list of + target parameters/inputs. + + \item \xmlNode{pivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable of the moch model. Default is ``time''. + \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). + \item \xmlNode{targetPivotParameter}, \xmlDesc{string, optional field}, ID of the temporal variable in the target model. Default is ``time''. + \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation). + + \item \xmlNode{Metric}, \xmlDesc{string, required field}, specifies the \textbf{Metric} name that is defined via + \textbf{Metrics} entity. In this xml-node, the following xml attributes need to be specified: + \begin{itemize} + \item \xmlAttr{class}, \xmlDesc{required string attribute}, the class of this metric (e.g. Metrics) + \item \xmlAttr{type}, \xmlDesc{required string attribute}, the sub-type of this Metric (e.g. SKL, Minkowski) + \end{itemize} + The choice of the available metrics depends on the specific validation algorithm that is chosen (see table \ref{tab:ValidationAlgorithms}) +\end{itemize} + + +The \textbf{Represntativity} post-processor can make use of the \textbf{Metric} system (See Chapter \ref{sec:Metrics}), +in conjunction with the specific algorithm chosen from the list above, +to report validation scores for both static and time-dependent data. +Indeed, Both \textbf{PointSet} and \textbf{HistorySet} can be accepted by this post-processor. +If the name of given variable to be compared is unique, it can be used directly, otherwise the variable can be specified +with $DataObjectName|InputOrOutput|VariableName$ nomenclature. + +\textbf{Example:} +\begin{lstlisting}[style=XML,morekeywords={subType}] + +... + + + inputPlaceHolder2 + linModel + MC_external + outputDataMC1 + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + +... + +... + + outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 + outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 + simIndex + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + outputDataMC2|time + +... + +... + + + +... + +\end{lstlisting} diff --git a/doc/user_manual/postprocessor.tex b/doc/user_manual/postprocessor.tex index 2f533a96fd..9382de7552 100644 --- a/doc/user_manual/postprocessor.tex +++ b/doc/user_manual/postprocessor.tex @@ -72,6 +72,7 @@ \subsection{PostProcessor} %%%%% PP ComparisonStatistics %%%%%%% \input{PostProcessors/ComparisonStatistics.tex} + %%%%% PP ImportanceRank %%%%%%% \input{PostProcessors/ImportanceRank.tex} diff --git a/framework/Models/PostProcessors/validationAlgorithms/Representativity.py b/framework/Models/PostProcessors/validationAlgorithms/Representativity.py new file mode 100644 index 0000000000..239d446cfc --- /dev/null +++ b/framework/Models/PostProcessors/validationAlgorithms/Representativity.py @@ -0,0 +1,247 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Created on April 29, 2021 + + @author: Mohammad Abdo (@Jimmy-INL) + + This class represents a base class for the validation algorithms + It inherits from the PostProcessor directly + ##TODO: Recast it once the new PostProcesso API gets in place +""" + +#External Modules------------------------------------------------------------------------------------ +import numpy as np +import xarray as xr +#External Modules End-------------------------------------------------------------------------------- + +#Internal Modules------------------------------------------------------------------------------------ +#from utils import xmlUtils +from utils import InputData, InputTypes +#import Files +#import Distributions +#import MetricDistributor +from utils import utils +from ..Validation import Validation +# from utils.mathUtils import partialDerivative, derivatives +#Internal Modules End-------------------------------------------------------------------------------- + +class Representativity(Validation): + """ + Representativity is a base class for validation problems + It represents the base class for most validation problems + """ + + @classmethod + def getInputSpecification(cls): + """ + Method to get a reference to a class that specifies the input data for + class cls. + @ In, cls, the class for which we are retrieving the specification + @ Out, specs, InputData.ParameterInput, class to use for + specifying input of cls. + """ + specs = super(Representativity, cls).getInputSpecification() + parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType) + parametersInput.addParam("type", InputTypes.StringType) + specs.addSub(parametersInput) + targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType) + targetParametersInput.addParam("type", InputTypes.StringType) + specs.addSub(targetParametersInput) + targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType) + specs.addSub(targetPivotParameterInput) + return specs + + def __init__(self): + """ + Constructor + @ In, None + @ Out, None + """ + super().__init__() + from Models.PostProcessors import factory as ppFactory # delay import to allow definition + self.printTag = 'POSTPROCESSOR Representativity' + self.dynamicType = ['static','dynamic'] # for now only static is available + self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics + self.name = 'Represntativity' + self.stat = ppFactory.returnInstance('BasicStatistics') + self.stat.what = ['NormalizedSensitivities'] # expected value calculation + + + # def inputToInternal(self, currentInputs): + # """ + # Method to convert an input object into the internal format that is + # understandable by this pp. + # @ In, currentInputs, list or DataObject, data object or a list of data objects + # @ Out, measureList, list of (feature, target), the list of the features and targets to measure the distance between + # """ + # if type(currentInputs) != list: + # currentInputs = [currentInputs] + # hasPointSet = False + # hasHistorySet = False + # #Check for invalid types + # for currentInput in currentInputs: + # inputType = None + # if hasattr(currentInput, 'type'): + # inputType = currentInput.type + + # if isinstance(currentInput, Files.File): + # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") + # elif isinstance(currentInput, Distributions.Distribution): + # pass #Allowed type + # elif inputType == 'HDF5': + # self.raiseAnError(IOError, "Input type '", inputType, "' can not be accepted") + # elif inputType == 'PointSet': + # hasPointSet = True + # elif inputType == 'HistorySet': + # hasHistorySet = True + # if self.multiOutput == 'raw_values': + # self.dynamic = True + # if self.pivotParameter not in currentInput.getVars('indexes'): + # self.raiseAnError(IOError, self, 'Pivot parameter', self.pivotParameter,'has not been found in DataObject', currentInput.name) + # if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): + # self.raiseAnError(IOError, "HistorySet", currentInput.name," is not syncronized, please use Interfaced PostProcessor HistorySetSync to pre-process it") + # pivotValues = currentInput.asDataset()[self.pivotParameter].values + # if len(self.pivotValues) == 0: + # self.pivotValues = pivotValues + # elif set(self.pivotValues) != set(pivotValues): + # self.raiseAnError(IOError, "Pivot values for pivot parameter",self.pivotParameter, "in provided HistorySets are not the same") + # else: + # self.raiseAnError(IOError, "Metric cannot process "+inputType+ " of type "+str(type(currentInput))) + # if self.multiOutput == 'raw_values' and hasPointSet and hasHistorySet: + # self.multiOutput = 'mean' + # self.raiseAWarning("Reset 'multiOutput' to 'mean', since both PointSet and HistorySet are provided as Inputs. Calculation outputs will be aggregated by averaging") + + # measureList = [] + + # for cnt in range(len(self.features)): + # feature = self.features[cnt] + # target = self.targets[cnt] + # featureData = self.__getMetricSide(feature, currentInputs) + # targetData = self.__getMetricSide(target, currentInputs) + # measureList.append((featureData, targetData)) + + # return measureList + + # def initialize(self, features, targets, **kwargs): + # """ + # Set up this interface for a particular activity + # @ In, features, list, list of features + # @ In, targets, list, list of targets + # @ In, kwargs, dict, keyword arguments + # """ + # super().initialize(features, targets, **kwargs) + # self.stat.toDo = {'NormalizedSensitivity':[{'targets':set(self.targets), 'prefix':'nsen'}]} + # # self.stat.toDo = {'NormalizedSensitivity'[{'targets':set([self.targets]), 'prefix':'nsen'}]} + # fakeRunInfo = {'workingDir':'','stepName':''} + # self.stat.initialize(fakeRunInfo, self.Parameters, features, **kwargs) + + def _handleInput(self, paramInput): + """ + Function to handle the parsed paramInput for this class. + @ In, paramInput, ParameterInput, the already parsed input. + @ Out, None + """ + super()._handleInput(paramInput) + for child in paramInput.subparts: + if child.getName() == 'featureParameters': + self.Parameters = child.value + elif child.getName() == 'targetParameters': + self.targetParameters = child.value + elif child.getName() == 'targetPivotParameter': + self.targetPivotParameter = child.value + + def run(self, inputIn): + """ + This method executes the postprocessor action. In this case it loads the + results to specified dataObject + @ In, inputIn, list, dictionary of data to process + @ Out, outputDict, dict, dictionary containing the post-processed results + """ + dataSets = [data for _, _, data in inputIn['Data']] + pivotParameter = self.pivotParameter + names=[] + if isinstance(inputIn['Data'][0][-1], xr.Dataset): + names = [inp[-1].attrs['name'] for inp in inputIn['Data']] + if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: + if 'dynamic' not in self.dynamicType: #self.model.dataType: + self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) + else: + pivotParameter = self.pivotParameter + evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()} + if pivotParameter: + if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): + self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) + if pivotParameter not in evaluation: + evaluation[pivotParameter] = dataSets[0][pivotParameter] + return evaluation + + def _evaluate(self, datasets, **kwargs): + """ + Main method to "do what you do". + @ In, datasets, list, list of datasets (data1,data2,etc.) to used. + @ In, kwargs, dict, keyword arguments + @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} + """ + # self.stat.run({'targets':{self.target:xr.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] + for data in datasets: + sen = self.stat.run(data) + names = kwargs.get('dataobjectNames') + outs = {} + for feat, targ, param, targParam in zip(self.features, self.targets, self.Parameters, self.targetParameters): + featData = self._getDataFromDatasets(datasets, feat, names) + targData = self._getDataFromDatasets(datasets, targ, names) + Parameters = self._getDataFromDatasets(datasets, param, names) + targetParameters = self._getDataFromDatasets(datasets, targParam, names) + # senFOMs = partialDerivative(featData.data,np.atleast_2d(Parameters.data)[0,:],'x1') + senFOMs = np.atleast_2d(Parameters[0])#.data + senMeasurables = np.atleast_2d(targetParameters[0]) + covParameters = senFOMs @ senMeasurables.T + for metric in self.metrics: + name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.name) + outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) + return outs + + def _getDataFromDatasets(self, datasets, var, names=None): + """ + Utility function to retrieve the data from datasets + @ In, datasets, list, list of datasets (data1,data2,etc.) to search from. + @ In, names, list, optional, list of datasets names (data1,data2,etc.). If not present, the search will be done on the full list. + @ In, var, str, the variable to find (either in fromat dataobject|var or simply var) + @ Out, data, tuple(numpy.ndarray, xarray.DataArray or None), the retrived data (data, probability weights (None if not present)) + """ + data = None + pw = None + dat = None + if "|" in var and names is not None: + do, feat = var.split("|") + doindex = names.index(do) + dat = datasets[doindex][feat] + else: + for doindex, ds in enumerate(datasets): + if var in ds: + dat = ds[var] + break + if 'ProbabilityWeight-{}'.format(feat) in datasets[names.index(do)]: + pw = datasets[doindex]['ProbabilityWeight-{}'.format(feat)].values + elif 'ProbabilityWeight' in datasets[names.index(do)]: + pw = datasets[doindex]['ProbabilityWeight'].values + dim = len(dat.shape) + # (numRealizations, numHistorySteps) for MetricDistributor + dat = dat.values + if dim == 1: + # the following reshaping does not require a copy + dat.shape = (dat.shape[0], 1) + data = dat, pw + return data \ No newline at end of file diff --git a/ravenframework/Metrics/metrics/Factory.py b/ravenframework/Metrics/metrics/Factory.py index f37d0cb576..a53c4cb70d 100644 --- a/ravenframework/Metrics/metrics/Factory.py +++ b/ravenframework/Metrics/metrics/Factory.py @@ -25,6 +25,7 @@ from .CDFAreaDifference import CDFAreaDifference from .PDFCommonArea import PDFCommonArea from .ScipyMetric import ScipyMetric +from .RepresentativityFactors import RepresentativityFactors from .DSS import DSS factory = EntityFactory('Metrics') diff --git a/ravenframework/Metrics/metrics/RepresentativityFactors.py b/ravenframework/Metrics/metrics/RepresentativityFactors.py new file mode 100644 index 0000000000..b86af5cdcb --- /dev/null +++ b/ravenframework/Metrics/metrics/RepresentativityFactors.py @@ -0,0 +1,92 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Created on April 29 2021 + +@author: Mohammad Abdo (@Jimmy-INL) +""" +#External Modules------------------------------------------------------------------------------------ +import numpy as np +import scipy as sp +from scipy.linalg import sqrtm +import copy +#External Modules End-------------------------------------------------------------------------------- + +#Internal Modules------------------------------------------------------------------------------------ +from .MetricInterface import MetricInterface +from ...utils import InputData, InputTypes +#Internal Modules End-------------------------------------------------------------------------------- + +class RepresentativityFactors(MetricInterface): + """ + RepresntativityFactors is the metric class used to quantitatively + assess the relativeness of a mock experiment to the target plant. + """ + availScaling ={} + + @classmethod + def getInputSpecification(cls): + """ + Method to get a reference to a class that specifies the input data for + class cls. + @ In, cls, the class for which we are retrieving the specification + @ Out, inputSpecification, InputData.ParameterInput, class to use for + specifying input of cls. + """ + inputSpecification = super(RepresentativityFactors, cls).getInputSpecification() + actionTypeInput = InputData.parameterInputFactory("actionType", contentType=InputTypes.StringType) + inputSpecification.addSub(actionTypeInput) + + return inputSpecification + + def __init__(self): + """ + Constructor + @ In, None + @ Out, None + """ + super().__init__() + # The type of given analysis + self.actionType = None + # True indicates the metric needs to be able to handle dynamic data + self._dynamicHandling = True + # True indicates the metric needs to be able to handle pairwise data + self._pairwiseHandling = False + + def run(self, x, y, weights = None, axis = 0, **kwargs): + """ + This method computes DSS distance between two inputs x and y based on given metric + @ In, x, numpy.ndarray, array containing data of x, if 1D array is provided, + the array will be reshaped via x.reshape(-1,1), shape (n_samples, ), if 2D + array is provided, shape (n_samples, n_time_steps) + @ In, y, numpy.ndarray, array containing data of y, if 1D array is provided, + the array will be reshaped via y.reshape(-1,1), shape (n_samples, ), if 2D + array is provided, shape (n_samples, n_time_steps) + @ In, weights, array_like (numpy.array or list), optional, weights associated + with input, shape (n_samples) if axis = 0, otherwise shape (n_time_steps) + @ In, axis, integer, optional, axis along which a metric is performed, default is 0, + i.e. the metric will performed along the first dimension (the "rows"). + If metric postprocessor is used, the first dimension is the RAVEN_sample_ID, + and the second dimension is the pivotParameter if HistorySet is provided. + @ In, kwargs, dict, dictionary of parameters characteristic of each metric + @ Out, value, float, metric result + """ + senMeasurables = kwargs['senMeasurables'] + senFOMs = kwargs['senFOMs'] + covParameters = kwargs['covParameters'] + # r = (senFOMs.T @ covParameters @ senMeasurables)/\ + # np.sqrt(senFOMs.T @ covParameters @ senFOMs)/\ + # np.sqrt(senMeasurables.T @ covParameters @ senMeasurables) + r = (sp.linalg.pinv(sqrtm(senFOMs @ covParameters @ senFOMs.T)) @ sqrtm(senFOMs @ covParameters @ senMeasurables.T) @ sqrtm(senFOMs @ covParameters @ senMeasurables.T) @ sp.linalg.pinv(sqrtm(senMeasurables @ covParameters @ senMeasurables.T))).real + return r diff --git a/ravenframework/Models/PostProcessors/BasicStatistics.py b/ravenframework/Models/PostProcessors/BasicStatistics.py index 3a54d20c48..b08a0f556e 100644 --- a/ravenframework/Models/PostProcessors/BasicStatistics.py +++ b/ravenframework/Models/PostProcessors/BasicStatistics.py @@ -27,14 +27,14 @@ #External Modules End----------------------------------------------------------- #Internal Modules--------------------------------------------------------------- -from .PostProcessorInterface import PostProcessorInterface +from .PostProcessorReadyInterface import PostProcessorReadyInterface from ...utils import utils from ...utils import InputData, InputTypes from ...utils import mathUtils from ... import Files #Internal Modules End----------------------------------------------------------- -class BasicStatistics(PostProcessorInterface): +class BasicStatistics(PostProcessorReadyInterface): """ BasicStatistics filter class. It computes all the most popular statistics """ @@ -164,92 +164,47 @@ def __init__(self): self.sampleSize = None # number of sample size self.calculations = {} self.validDataType = ['PointSet', 'HistorySet', 'DataSet'] # The list of accepted types of DataObject + self.inputDataObjectName = None # name for input data object + self.setInputDataType('xrDataset') - def inputToInternal(self, currentInp): + def inputToInternal(self, inputIn): """ - Method to convert an input object into the internal format that is + Method to select corresponding data from Data Objects and normalize the ProbabilityWeight of corresponding data understandable by this pp. - @ In, currentInp, object, an object that needs to be converted + @ In, inputIn, dict, a dictionary that contains the input Data Object information @ Out, (inputDataset, pbWeights), tuple, the dataset of inputs and the corresponding variable probability weight """ - # The BasicStatistics postprocessor only accept DataObjects - self.dynamic = False - currentInput = currentInp [-1] if type(currentInp) == list else currentInp - if len(currentInput) == 0: - self.raiseAnError(IOError, "In post-processor " +self.name+" the input "+currentInput.name+" is empty.") - + inpVars, outVars, dataSet = inputIn['Data'][0] pbWeights = None - if type(currentInput).__name__ == 'tuple': - return currentInput - # TODO: convert dict to dataset, I think this will be removed when DataSet is used by other entities that - # are currently using this Basic Statisitics PostProcessor. - if type(currentInput).__name__ == 'dict': - if 'targets' not in currentInput.keys(): - self.raiseAnError(IOError, 'Did not find targets in the input dictionary') - inputDataset = xr.Dataset() - for var, val in currentInput['targets'].items(): - inputDataset[var] = val - if 'metadata' in currentInput.keys(): - metadata = currentInput['metadata'] - self.pbPresent = True if 'ProbabilityWeight' in metadata else False - if self.pbPresent: - pbWeights = xr.Dataset() - self.realizationWeight = xr.Dataset() - self.realizationWeight['ProbabilityWeight'] = metadata['ProbabilityWeight']/metadata['ProbabilityWeight'].sum() - for target in self.parameters['targets']: - pbName = 'ProbabilityWeight-' + target - if pbName in metadata: - pbWeights[target] = metadata[pbName]/metadata[pbName].sum() - elif self.pbPresent: - pbWeights[target] = self.realizationWeight['ProbabilityWeight'] - else: - self.raiseAWarning('BasicStatistics postprocessor did not detect ProbabilityWeights! Assuming unit weights instead...') - else: - self.raiseAWarning('BasicStatistics postprocessor did not detect ProbabilityWeights! Assuming unit weights instead...') - if 'RAVEN_sample_ID' not in inputDataset.sizes.keys(): - self.raiseAWarning('BasicStatisitics postprocessor did not detect RAVEN_sample_ID! Assuming the first dimension of given data...') - self.sampleTag = utils.first(inputDataset.sizes.keys()) - return inputDataset, pbWeights - - if currentInput.type not in ['PointSet','HistorySet']: - self.raiseAnError(IOError, self, 'BasicStatistics postprocessor accepts PointSet and HistorySet only! Got ' + currentInput.type) - # extract all required data from input DataObjects, an input dataset is constructed - dataSet = currentInput.asDataset() try: inputDataset = dataSet[self.parameters['targets']] except KeyError: missing = [var for var in self.parameters['targets'] if var not in dataSet] - self.raiseAnError(KeyError, "Variables: '{}' missing from dataset '{}'!".format(", ".join(missing),currentInput.name)) - self.sampleTag = currentInput.sampleTag + self.raiseAnError(KeyError, "Variables: '{}' missing from dataset '{}'!".format(", ".join(missing),self.inputDataObjectName)) + self.sampleTag = utils.first(dataSet.dims) - if currentInput.type == 'HistorySet': + if self.dynamic: dims = inputDataset.sizes.keys() if self.pivotParameter is None: - if len(dims) > 1: - self.raiseAnError(IOError, self, 'Time-dependent statistics is requested (HistorySet) but no pivotParameter \ - got inputted!') + self.raiseAnError(IOError, self, 'Time-dependent statistics is requested (HistorySet) but no pivotParameter \ + got inputted!') elif self.pivotParameter not in dims: self.raiseAnError(IOError, self, 'Pivot parameter', self.pivotParameter, 'is not the associated index for \ requested variables', ','.join(self.parameters['targets'])) - else: - self.dynamic = True - if not currentInput.checkIndexAlignment(indexesToCheck=self.pivotParameter): - self.raiseAnError(IOError, "The data provided by the data objects", currentInput.name, "is not synchronized!") - self.pivotValue = inputDataset[self.pivotParameter].values - if self.pivotValue.size != len(inputDataset.groupby(self.pivotParameter)): - msg = "Duplicated values were identified in pivot parameter, please use the 'HistorySetSync'" + \ - " PostProcessor to syncronize your data before running 'BasicStatistics' PostProcessor." - self.raiseAnError(IOError, msg) + self.pivotValue = dataSet[self.pivotParameter].values + if self.pivotValue.size != len(dataSet.groupby(self.pivotParameter)): + msg = "Duplicated values were identified in pivot parameter, please use the 'HistorySetSync'" + \ + " PostProcessor to syncronize your data before running 'BasicStatistics' PostProcessor." + self.raiseAnError(IOError, msg) # extract all required meta data - metaVars = currentInput.getVars('meta') - self.pbPresent = True if 'ProbabilityWeight' in metaVars else False + self.pbPresent = 'ProbabilityWeight' in dataSet if self.pbPresent: pbWeights = xr.Dataset() self.realizationWeight = dataSet[['ProbabilityWeight']]/dataSet[['ProbabilityWeight']].sum() for target in self.parameters['targets']: pbName = 'ProbabilityWeight-' + target - if pbName in metaVars: + if pbName in dataSet: pbWeights[target] = dataSet[pbName]/dataSet[pbName].sum() elif self.pbPresent: pbWeights[target] = self.realizationWeight['ProbabilityWeight'] @@ -267,6 +222,12 @@ def initialize(self, runInfo, inputs, initDict): @ In, initDict, dict, dictionary with initialization options @ Out, None """ + if len(inputs)>1: + self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one DataObject') + if self.pivotParameter is not None: + if not inputs[-1].checkIndexAlignment(indexesToCheck=self.pivotParameter): + self.raiseAnError(IOError, "The data provided by the input data object is not synchronized!") + self.inputDataObjectName = inputs[-1].name #construct a list of all the parameters that have requested values into self.allUsedParams self.allUsedParams = set() for metricName in self.scalarVals + self.vectorVals: @@ -1544,6 +1505,21 @@ def spearmanCorrelation(self, featVars, targVars, featSamples, targSamples, pbWe da = xr.DataArray(spearmanMat, dims=('targets','features'), coords={'targets':targVars,'features':featVars}) return da + def _runLegacy(self, inputIn): + """ + This method executes the postprocessor action with the old data format. In this case, it computes all the requested statistical FOMs + @ In, inputIn, object, object contained the data to process. (inputToInternal output) + @ Out, outputSet, xarray.Dataset or dictionary, dataset or dictionary containing the results + """ + if type(inputIn).__name__ == 'PointSet': + merged = inputIn.asDataset() + elif 'metadata' in inputIn: + merged = xr.merge([inputIn['metadata'],inputIn['targets']]) + else: + merged = xr.merge([inputIn['targets']]) + newInputIn = {'Data':[[None,None,merged]]} + return self.run(newInputIn) + def run(self, inputIn): """ This method executes the postprocessor action. In this case, it computes all the requested statistical FOMs diff --git a/ravenframework/Models/PostProcessors/Factory.py b/ravenframework/Models/PostProcessors/Factory.py index 732a57cf8b..67d7841a35 100644 --- a/ravenframework/Models/PostProcessors/Factory.py +++ b/ravenframework/Models/PostProcessors/Factory.py @@ -37,6 +37,7 @@ from .EconomicRatio import EconomicRatio from .ValidationBase import ValidationBase from .Validations import Probabilistic +from .Validations import Representativity from .Validations import PPDSS from .TSACharacterizer import TSACharacterizer diff --git a/ravenframework/Models/PostProcessors/LimitSurfaceIntegral.py b/ravenframework/Models/PostProcessors/LimitSurfaceIntegral.py index 9ff0a78319..d05a94e79b 100644 --- a/ravenframework/Models/PostProcessors/LimitSurfaceIntegral.py +++ b/ravenframework/Models/PostProcessors/LimitSurfaceIntegral.py @@ -256,9 +256,9 @@ def run(self, input): f = np.vectorize(self.variableDist[varName].ppf, otypes=[np.float]) randomMatrix[:, index] = f(randomMatrix[:, index]) tempDict[varName] = randomMatrix[:, index] - pb = self.stat.run({'targets':{self.target:xarray.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] + pb = self.stat._runLegacy({'targets':{self.target:xarray.DataArray(self.functionS.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target] if self.errorModel: - boundError = abs(pb-self.stat.run({'targets':{self.target:xarray.DataArray(self.errorModel.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target]) + boundError = abs(pb-self.stat._runLegacy({'targets':{self.target:xarray.DataArray(self.errorModel.evaluate(tempDict)[self.target])}})[self.computationPrefix +"_"+self.target]) else: self.raiseAnError(NotImplemented, "quadrature not yet implemented") return pb, boundError diff --git a/ravenframework/Models/PostProcessors/SafestPoint.py b/ravenframework/Models/PostProcessors/SafestPoint.py index 13ba857e2f..bbce61b0a7 100644 --- a/ravenframework/Models/PostProcessors/SafestPoint.py +++ b/ravenframework/Models/PostProcessors/SafestPoint.py @@ -335,7 +335,7 @@ def run(self, input): rlz['ProbabilityWeight'][ncLine] = np.prod(probList) metadata = {'ProbabilityWeight':xarray.DataArray(rlz['ProbabilityWeight'])} targets = {tar:xarray.DataArray( rlz[tar]) for tar in self.controllableOrd} - rlz['ExpectedSafestPointCoordinates'] = self.stat.run({'metadata':metadata, 'targets':targets}) + rlz['ExpectedSafestPointCoordinates'] = self.stat._runLegacy({'metadata':metadata, 'targets':targets}) self.raiseADebug(rlz['ExpectedSafestPointCoordinates']) return rlz diff --git a/ravenframework/Models/PostProcessors/Validations/Probabilistic.py b/ravenframework/Models/PostProcessors/Validations/Probabilistic.py index 7f6bdc637e..6b8bb02cbe 100644 --- a/ravenframework/Models/PostProcessors/Validations/Probabilistic.py +++ b/ravenframework/Models/PostProcessors/Validations/Probabilistic.py @@ -81,6 +81,29 @@ def run(self, inputIn): @ In, inputIn, list, dictionary of data to process @ Out, outputDict, dict, dictionary containing the post-processed results """ + # inpVars, outVars, dataSet = inputIn['Data'][0] + # dataSets = [data for _, _, data in inputIn['Data']] + dataDict = {data.attrs['name']: data for _, _, data in inputIn['Data']} + pivotParameter = self.pivotParameter + names = [inp[-1].attrs['name'] for inp in inputIn['Data']] + if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: + if 'dynamic' not in self.dynamicType: #self.model.dataType: + self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) + # else: + # pivotParameter = self.pivotParameter + # # check if pivotParameter + # if pivotParameter: + # # in case of dataobjects we check that the dataobject is either an HistorySet or a DataSet + # if isinstance(inputIn['Data'][0][-1], xr.Dataset) and not all([True if inp.type in ['HistorySet', 'DataSet'] else False for inp in inputIn]): + # self.raiseAnError(RuntimeError, "The pivotParameter '{}' has been inputted but PointSets have been used as input of PostProcessor '{}'".format(pivotParameter, self.name)) + # if not all([True if pivotParameter in inp else False for inp in dataSets]): + # self.raiseAnError(RuntimeError, "The pivotParameter '{}' not found in datasets used as input of PostProcessor '{}'".format(pivotParameter, self.name)) + + + evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataDict, **{'dataobjectNames': names}).items()} + + if pivotParameter: + #if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): dataDict = {self.getDataSetName(data): data for _, _, data in inputIn['Data']} pivotParameter = self.pivotParameter names = [self.getDataSetName(inp[-1]) for inp in inputIn['Data']] @@ -94,6 +117,7 @@ def run(self, inputIn): self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) if pivotParameter not in evaluation: evaluation[pivotParameter] = inputIn['Data'][0][-1]['time'] + #evaluation[pivotParameter] = dataSets[0][pivotParameter] return evaluation ### utility functions @@ -107,7 +131,9 @@ def _evaluate(self, datasets, **kwargs): names = kwargs.get('dataobjectNames') outputDict = {} for feat, targ in zip(self.features, self.targets): + # featData = self._getDataFromDatasets(datasets, feat, names) featData = self._getDataFromDataDict(datasets, feat, names) + # targData = self._getDataFromDatasets(datasets, targ, names) targData = self._getDataFromDataDict(datasets, targ, names) for metric in self.metrics: name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) diff --git a/ravenframework/Models/PostProcessors/Validations/Representativity.py b/ravenframework/Models/PostProcessors/Validations/Representativity.py new file mode 100644 index 0000000000..af3e8478b5 --- /dev/null +++ b/ravenframework/Models/PostProcessors/Validations/Representativity.py @@ -0,0 +1,263 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Created on April 29, 2021 + + @author: Mohammad Abdo (@Jimmy-INL) + + This class represents a base class for the validation algorithms + It inherits from the PostProcessor directly + ##TODO: Recast it once the new PostProcesso API gets in place +""" + +#External Modules------------------------------------------------------------------------------------ +import numpy as np +import xarray as xr +#External Modules End-------------------------------------------------------------------------------- + +#Internal Modules------------------------------------------------------------------------------------ +from ravenframework.utils import InputData, InputTypes +from ravenframework.utils import utils +from .. import ValidationBase +#Internal Modules End-------------------------------------------------------------------------------- + +class Representativity(ValidationBase): + """ + Representativity is a base class for validation problems + It represents the base class for most validation problems + """ + + @classmethod + def getInputSpecification(cls): + """ + Method to get a reference to a class that specifies the input data for + class cls. + @ In, cls, the class for which we are retrieving the specification + @ Out, specs, InputData.ParameterInput, class to use for + specifying input of cls. + """ + specs = super(Representativity, cls).getInputSpecification() + parametersInput = InputData.parameterInputFactory("featureParameters", contentType=InputTypes.StringListType, + descr=r"""mock model parameters/inputs""") + parametersInput.addParam("type", InputTypes.StringType) + specs.addSub(parametersInput) + targetParametersInput = InputData.parameterInputFactory("targetParameters", contentType=InputTypes.StringListType, + descr=r"""Target model parameters/inputs""") + targetParametersInput.addParam("type", InputTypes.StringType) + specs.addSub(targetParametersInput) + targetPivotParameterInput = InputData.parameterInputFactory("targetPivotParameter", contentType=InputTypes.StringType, + descr=r"""ID of the temporal variable of the target model. Default is ``time''. + \nb Used just in case the \xmlNode{pivotValue}-based operation is requested (i.e., time dependent validation).""") + specs.addSub(targetPivotParameterInput) + return specs + + def __init__(self): + """ + Constructor + @ In, None + @ Out, None + """ + super().__init__() + self.printTag = 'POSTPROCESSOR Representativity' + self.dynamicType = ['static','dynamic'] # for now only static is available + self.acceptableMetrics = ["RepresentativityFactors"] # acceptable metrics + self.name = 'Representativity' + self.stat = [None, None] + self.featureDataObject = None + self.targetDataObject = None + self.senPrefix = 'nsen' + + def getBasicStat(self): + """ + Get Basic Statistic PostProcessor + @ In, None + @ Out, stat, object, Basic Statistic PostProcessor Object + """ + from .. import factory as ppFactory # delay import to allow definition + stat = ppFactory.returnInstance('BasicStatistics') + stat.what = ['sensitivity'] # expected value calculation + return stat + + def initialize(self, runInfo, inputs, initDict): + """ + Method to initialize the DataMining pp. + @ In, runInfo, dict, dictionary of run info (e.g. working dir, etc) + @ In, inputs, list, list of inputs + @ In, initDict, dict, dictionary with initialization options + @ Out, None + """ + super().initialize(runInfo, inputs, initDict) + if len(inputs) != 2: + self.raiseAnError(IOError, "PostProcessor", self.name, "can only accept two DataObjects, but got {}!".format(str(len(inputs)))) + params = self.features+self.targets+self.featureParameters+self.targetParameters + validParams = [True if "|" in x else False for x in params] + if not all(validParams): + notValid = list(np.asarray(params)[np.where(np.asarray(validParams)==False)[0]]) + self.raiseAnError(IOError, "'Features', 'Targets', 'featureParameters', and 'targetParameters' should use 'DataObjectName|variable' format, but variables {} do not follow this rule.".format(','.join(notValid))) + # Assume features and targets are in the format of: DataObjectName|Variables + names = set([x.split("|")[0] for x in self.features] + [x.split("|")[0] for x in self.featureParameters]) + if len(names) != 1: + self.raiseAnError(IOError, "'Features' and 'featureParameters' should come from the same DataObjects, but they present in differet DataObjects:{}".fortmat(','.join(names))) + featDataObject = list(names)[0] + names = set([x.split("|")[0] for x in self.targets] + [x.split("|")[0] for x in self.targetParameters]) + if len(names) != 1: + self.raiseAnError(IOError, "'Targets' and 'targetParameters' should come from the same DataObjects, but they present in differet DataObjects:{}".fortmat(','.join(names))) + targetDataObject = list(names)[0] + featVars = [x.split("|")[-1] for x in self.features] + [x.split("|")[1] for x in self.featureParameters] + targVars = [x.split("|")[-1] for x in self.targets] + [x.split("|")[1] for x in self.targetParameters] + + for i, inp in enumerate(inputs): + if inp.name == featDataObject: + self.featureDataObject = (inp, i) + else: + self.targetDataObject = (inp, i) + + vars = self.featureDataObject[0].vars + self.featureDataObject[0].indexes + if not set(featVars).issubset(set(vars)): + missing = featVars - set(vars) + self.raiseAnError(IOError, "Variables {} are missing from DataObject {}".format(','.join(missing), self.featureDataObject[0].name)) + vars = self.targetDataObject[0].vars + self.targetDataObject[0].indexes + if not set(targVars).issubset(set(vars)): + missing = targVars - set(vars) + self.raiseAnError(IOError, "Variables {} are missing from DataObject {}".format(','.join(missing), self.targetDataObject[0].name)) + + featStat = self.getBasicStat() + featStat.toDo = {'sensitivity':[{'targets':set([x.split("|")[-1] for x in self.features]), 'features':set([x.split("|")[-1] for x in self.featureParameters]),'prefix':self.senPrefix}]} + featStat.initialize(runInfo, [self.featureDataObject[0]], initDict) + self.stat[self.featureDataObject[-1]] = featStat + tartStat = self.getBasicStat() + tartStat.toDo = {'sensitivity':[{'targets':set([x.split("|")[-1] for x in self.targets]), 'features':set([x.split("|")[-1] for x in self.targetParameters]),'prefix':self.senPrefix}]} + tartStat.initialize(runInfo, [self.targetDataObject[0]], initDict) + self.stat[self.targetDataObject[-1]] = tartStat + + + def _handleInput(self, paramInput): + """ + Function to handle the parsed paramInput for this class. + @ In, paramInput, ParameterInput, the already parsed input. + @ Out, None + """ + super()._handleInput(paramInput) + for child in paramInput.subparts: + if child.getName() == 'featureParameters': + self.featureParameters = child.value + elif child.getName() == 'targetParameters': + self.targetParameters = child.value + elif child.getName() == 'targetPivotParameter': + self.targetPivotParameter = child.value + + def run(self, inputIn): + """ + This method executes the postprocessor action. In this case it loads the + results to specified dataObject + @ In, inputIn, list, dictionary of data to process + @ Out, outputDict, dict, dictionary containing the post-processed results + """ + dataSets = [data for _, _, data in inputIn['Data']] + pivotParameter = self.pivotParameter + names=[] + if isinstance(inputIn['Data'][0][-1], xr.Dataset): + names = [self.getDataSetName(inp[-1]) for inp in inputIn['Data']] + if len(inputIn['Data'][0][-1].indexes) and self.pivotParameter is None: + if 'dynamic' not in self.dynamicType: #self.model.dataType: + self.raiseAnError(IOError, "The validation algorithm '{}' is not a dynamic model but time-dependent data has been inputted in object {}".format(self._type, inputIn['Data'][0][-1].name)) + else: + pivotParameter = self.pivotParameter + evaluation ={k: np.atleast_1d(val) for k, val in self._evaluate(dataSets, **{'dataobjectNames': names}).items()}#inputIn + ## TODO: This is a placeholder to remember the time dependent case + # if pivotParameter: + # # Uncomment this to cause crash: print(dataSets[0], pivotParameter) + # if len(dataSets[0][pivotParameter]) != len(list(evaluation.values())[0]): + # self.raiseAnError(RuntimeError, "The pivotParameter value '{}' has size '{}' and validation output has size '{}'".format( len(dataSets[0][self.pivotParameter]), len(evaluation.values()[0]))) + # if pivotParameter not in evaluation: + # evaluation[pivotParameter] = dataSets[0][pivotParameter] + return evaluation + + def _evaluate(self, datasets, **kwargs): + """ + Main method to "do what you do". + @ In, datasets, list, list of datasets (data1,data2,etc.) to used. + @ In, kwargs, dict, keyword arguments + @ Out, outputDict, dict, dictionary containing the results {"feat"_"target"_"metric_name":value} + """ + sens = self.stat[self.featureDataObject[-1]].run({"Data":[[None, None, datasets[self.featureDataObject[-1]]]]}) + senMeasurables = self._generateSensitivityMatrix(self.features, self.featureParameters, sens) + sens = self.stat[self.targetDataObject[-1]].run({"Data":[[None, None, datasets[self.targetDataObject[-1]]]]}) + senFOMs = self._generateSensitivityMatrix(self.targets, self.targetParameters, sens) + c = np.zeros((datasets[0].dims['RAVEN_sample_ID'],len(self.featureParameters))) + names = kwargs.get('dataobjectNames') + outs = {} + ## TODO this loop is not needed + for feat, targ, param, targParam in zip(self.features, self.targets, self.featureParameters, self.targetParameters): + featData = self._getDataFromDatasets(datasets, feat, names) + targData = self._getDataFromDatasets(datasets, targ, names) + parameters = self._getDataFromDatasets(datasets, param, names) + targetParameters = self._getDataFromDatasets(datasets, targParam, names) + for ind,var in enumerate(self.featureParameters): + c[:,ind] = np.squeeze(self._getDataFromDatasets(datasets, var, names)[0]) + covParameters = c.T @ c + for metric in self.metrics: + name = "{}_{}_{}".format(feat.split("|")[-1], targ.split("|")[-1], metric.estimator.name) + outs[name] = metric.evaluate((featData, targData), senFOMs = senFOMs, senMeasurables=senMeasurables, covParameters=covParameters) + return outs + + def _generateSensitivityMatrix(self, outputs, inputs, sensDict): + """ + Reconstruct sensitivity matrix from the Basic Statistic calculation + @ In, inputs, list, list of input variables + @ In, outputs, list, list of output variables + @ In, sensDict, dict, dictionary contains the sensitivities + @ Out, sensMatr, numpy.array, 2-D array of the reconstructed sensitivity matrix + """ + sensMatr = np.zeros((len(outputs), len(inputs))) + inputVars = [x.split("|")[-1] for x in inputs] + outputVars = [x.split("|")[-1] for x in outputs] + for i, outVar in enumerate(outputVars): + for j, inpVar in enumerate(inputVars): + senName = "{}_{}_{}".format(self.senPrefix, outVar, inpVar) + # Assume static data (PointSets are provided as input) + sensMatr[i, j] = sensDict[senName][0] + return sensMatr + + def _getDataFromDatasets(self, datasets, var, names=None): + """ + Utility function to retrieve the data from datasets + @ In, datasets, list, list of datasets (data1,data2,etc.) to search from. + @ In, names, list, optional, list of datasets names (data1,data2,etc.). If not present, the search will be done on the full list. + @ In, var, str, the variable to find (either in fromat dataobject|var or simply var) + @ Out, data, tuple(numpy.ndarray, xarray.DataArray or None), the retrived data (data, probability weights (None if not present)) + """ + data = None + pw = None + dat = None + if "|" in var and names is not None: + do, feat = var.split("|") + doindex = names.index(do) + dat = datasets[doindex][feat] + else: + for doindex, ds in enumerate(datasets): + if var in ds: + dat = ds[var] + break + if 'ProbabilityWeight-{}'.format(feat) in datasets[names.index(do)]: + pw = datasets[doindex]['ProbabilityWeight-{}'.format(feat)].values + elif 'ProbabilityWeight' in datasets[names.index(do)]: + pw = datasets[doindex]['ProbabilityWeight'].values + dim = len(dat.shape) + dat = dat.values + if dim == 1: + # the following reshaping does not require a copy + dat.shape = (dat.shape[0], 1) + data = dat, pw + return data diff --git a/ravenframework/Models/PostProcessors/Validations/__init__.py b/ravenframework/Models/PostProcessors/Validations/__init__.py index 88d55b68bf..3d1bc5c2ba 100644 --- a/ravenframework/Models/PostProcessors/Validations/__init__.py +++ b/ravenframework/Models/PostProcessors/Validations/__init__.py @@ -19,4 +19,5 @@ @author: wangc """ from .Probabilistic import Probabilistic +from .Representativity import Representativity from .PPDSS import PPDSS diff --git a/ravenframework/Samplers/AdaptiveMonteCarlo.py b/ravenframework/Samplers/AdaptiveMonteCarlo.py index 39cbc89551..89ee6bf16a 100644 --- a/ravenframework/Samplers/AdaptiveMonteCarlo.py +++ b/ravenframework/Samplers/AdaptiveMonteCarlo.py @@ -186,7 +186,7 @@ def localFinalizeActualSampling(self,jobObject,model,myInput): @ Out, None """ if self.counter > 1: - output = self.basicStatPP.run(self._targetEvaluation) + output = self.basicStatPP._runLegacy(self._targetEvaluation) output['solutionUpdate'] = np.asarray([self.counter - 1]) self._solutionExport.addRealization(output) self.checkConvergence(output) diff --git a/tests/framework/AnalyticModels/linModel.py b/tests/framework/AnalyticModels/linModel.py new file mode 100644 index 0000000000..56cc6cc188 --- /dev/null +++ b/tests/framework/AnalyticModels/linModel.py @@ -0,0 +1,46 @@ +# Copyright 2017 Battelle Energy Alliance, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#*************************************** +#* Simple analytic test ExternalModule * +#*************************************** +# +# Simulates a steady state linear model that maps $J-$parameters (i.e., $\mathbb{R}^J$) to k Responses +# +# External Modules +import numpy as np +################## + +A = np.array([[2, -3],[1,8],[-5, -5]]) +b = np.array([[0],[0],[0]]) + +def run(self,Input): + """ + Method require by RAVEN to run this as an external model. + @ In, self, object, object to store members on + @ In, Input, dict, dictionary containing inputs from RAVEN + @ Out, None + """ + self.F1,self.F2,self.F3 = main(Input) + +def main(Input): + y = A @ np.array(list(Input.values())).reshape(-1,1) + b + return y[:] + + +if __name__ == '__main__': + Input = {} + Input['x1'] = 5.5 + Input['x2'] = 8 + a,b,c = main(Input) + print(a,b,c) diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml new file mode 100644 index 0000000000..046b0ec8ea --- /dev/null +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity.xml @@ -0,0 +1,109 @@ + + + + Representativity + mcRun, PP1 + 1 + + + + framework/PostProcessors/Validation/test_validation_gate_representativity + Mohammad Abdo (@Jimmy-INL) + 2021-04-29 + PostProcessors.Validation + + This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. + This test uses a toy 1D slab reflective model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. + + + Added Modification for new PP API + + + + + + time,phi_0,x,a_tilde,phi + + + outputDataMC1|ans + outputDataMC2|ans2 + simIndex + outputDataMC1|x1,outputDataMC1|x2 + outputDataMC2|x1,outputDataMC2|x2 + outputDataMC1|time + outputDataMC2|time + + + + + + + + + + 1 + 0.5 + + + -1 + 1 + + + + + + + 10 + + + dist1 + + + dist2 + + + + + + + inputPlaceHolder2 + slab + MC_external + outputDataMC1 + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + + + + + x1,x2 + OutputPlaceHolder + + + x1,x2 + ans + + + x1,x2 + ans2 + + + InputPlaceHolder + + + + + + csv + pp1_metric + + + + diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml new file mode 100644 index 0000000000..33e96e5611 --- /dev/null +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativity2.xml @@ -0,0 +1,112 @@ + + + + Representativity + mcRun, PP1 + 1 + + + + framework/PostProcessors/Validation/test_validation_gate_representativity + Mohammad Abdo (@Jimmy-INL) + 2021-04-29 + PostProcessors.Validation.Representativity + + This test is aimed to show how to use the mechanics of the Validation Post-Processor. For semplicity, + this test is using the attenuation model (analytical) and simple representativity factors metrics. + The output name convention is ``feature name''\underscore``target name''\underscore``metric name''=. + + + Added Modification for new PP API + + + + + + x1, x2 + ans, ans2 + + + outputDataMC1|ans + outputDataMC2|ans2 + simIndex + outputDataMC1|x1,outputDataMC1|x2 + outputDataMC2|x1,outputDataMC2|x2 + outputDataMC1|time + outputDataMC2|time + + + + + + + + + + 1 + 0.5 + + + -1 + 1 + + + + + + + 10 + + + dist1 + + + dist2 + + + + + + + inputPlaceHolder2 + poly + MC_external + outputDataMC1 + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + + + + + x1,x2 + OutputPlaceHolder + + + x1,x2 + ans + + + x1,x2 + ans2 + + + InputPlaceHolder + ans_ans2_simIndex + + + + + + csv + pp1_metric + + + + diff --git a/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml new file mode 100644 index 0000000000..1587557a83 --- /dev/null +++ b/tests/framework/PostProcessors/Validation/test_validation_gate_representativityLinModel.xml @@ -0,0 +1,109 @@ + + + + Representativity + mcRun, PP1 + 1 + + + + framework/PostProcessors/Validation/test_validation_gate_representativity + Mohammad Abdo (@Jimmy-INL) + 2021-04-29 + PostProcessors.Validation + + This test assesses the mechanics of the representativity workflow; one of the validation algorithms used in RAVEN. + This test a linear model as both the mock experiment and the target plant models. The expected representativity factor should be close to one for each measurable F_i and Figure of merit FOM_i. Currently the test utilizes the bias factor metric to compute the representativity factors. + + + Added Modification for new PP API + + + + + + p1,p2,F1, F2, F3 + + + outputDataMC1|F1, outputDataMC1|F2, outputDataMC1|F3 + outputDataMC2|F1, outputDataMC2|F2, outputDataMC2|F3 + simIndex + outputDataMC1|p1,outputDataMC1|p2 + outputDataMC2|p1,outputDataMC2|p2 + outputDataMC1|time + outputDataMC2|time + + + + + + + + + + 5.5 + 0.55 + + + 8 + 0.8 + + + + + + + 100 + + + dist1 + + + dist2 + + + + + + + inputPlaceHolder2 + linModel + MC_external + outputDataMC1 + outputDataMC2 + + + outputDataMC1 + outputDataMC2 + pp1 + pp1_metric + pp1_metric_dump + + + + + + p1,p2 + OutputPlaceHolder + + + p1,p2 + F1, F2, F3 + + + p1,p2 + F1, F2, F3 + + + InputPlaceHolder + + + + + + csv + pp1_metric + + + +