Skip to content

Commit

Permalink
[NeoML] Remove code copy-paste, add comments (#1083)
Browse files Browse the repository at this point in the history
* [NeoML] CBaseLayer::sequentialModeIfRecurrent

Signed-off-by: Kirill Golikov <[email protected]>

* [NeoML] CBlobDesc clarify

Signed-off-by: Kirill Golikov <[email protected]>

* [NeoML] CDnnBlob remove copy-paste

Signed-off-by: Kirill Golikov <[email protected]>

* [NeoML] CDnnBlob update comments

Signed-off-by: Kirill Golikov <[email protected]>

* [NeoML] Apply comments

Signed-off-by: Kirill Golikov <[email protected]>

---------

Signed-off-by: Kirill Golikov <[email protected]>
  • Loading branch information
favorart authored Aug 26, 2024
1 parent a5f6b51 commit 99d2f6e
Show file tree
Hide file tree
Showing 7 changed files with 112 additions and 142 deletions.
6 changes: 4 additions & 2 deletions NeoML/include/NeoML/Dnn/Dnn.h
Original file line number Diff line number Diff line change
Expand Up @@ -216,8 +216,8 @@ class NEOML_API CBaseLayer : public virtual IObject {
// Releases all temporary resources allocated for the layer
virtual void CleanUp( bool totalCleanUp = false );

// Returns the total size of trainable parameters in this layer
// Returns the total size of trainable parameters of its internal layers, if layer is composite or recurrent
// Returns the number of trainable parameters (floats or ints) in all of this layer's parameters blobs
// Returns the number of trainable parameters of its internal layers, if layer is composite or recurrent
virtual size_t GetTrainableParametersSize() const;

// Enable profile timer for RunOnce
Expand Down Expand Up @@ -404,6 +404,8 @@ class NEOML_API CBaseLayer : public virtual IObject {
void transferParamsBlob(CBaseLayer& dist) const;
// Technical method for recursion in GetPath( CArray<CString>& path )
void getPath( CArray<CString>& path ) const;
void sequentialModeIfRecurrent();
void nonSequentialModeIfRecurrent();
// Switches the specified blobs into sequence processing mode
void switchBlobsToSequentialMode(CObjectArray<CDnnBlob>& blobs, TBlobCacheType cacheType, bool storeParent);
void switchBlobsToNonSequentialMode(CObjectArray<CDnnBlob>& blobs, TBlobCacheType cacheType, bool clear);
Expand Down
11 changes: 9 additions & 2 deletions NeoML/include/NeoML/Dnn/DnnBlob.h
Original file line number Diff line number Diff line change
Expand Up @@ -224,12 +224,19 @@ class NEOML_API CDnnBlob : public IObject {
}

private:
// Math Engine owner
IMathEngine& mathEngine;
// Actual typed sizes description of the allocated data storage
CBlobDesc desc;
// Pointer to the allocated data storage
CMemoryHandle data;
// Ownership of the `data`, it means that it has full access to write and to free the allocated data storage
// Either `dataOwned` is true and `parent` is 0
// Or `dataOwned` is false and `parent` is pointer to blob that owns the allocated data storage
bool dataOwned;

CPtr<CDnnBlob> parent; // parent blob
// Pointer to blob with data for sequential recurent mode or reference dnn's paramBlobs
CPtr<CDnnBlob> parent;
// Offset in `parent` blob for sequential recurent mode, move window by BatchLength of the parent blob
int parentPos;

void initializeBlob(TBlobType _type, int batchLength, int batchWidth, int listSize, int height, int width,
Expand Down
2 changes: 1 addition & 1 deletion NeoML/include/NeoML/Dnn/Layers/CompositeLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ class NEOML_API CCompositeLayer : public CBaseLayer, public CDnnLayerGraph {
// It does not allocate outputBlobs in CBaseLayer in runOnce, because they are not used for inference.
// The outputBlob for CCompositeLayer are sinkLayer->GetBlob() of its internalDnn.
// NOTE: All class children do not allocate outputBlobs, for normal using cases it is ok
// For special cases (like CRecurentLayer) it should be reinitializated.
// For special cases (like CRecurrentLayer) it should be reinitializated.
void AllocateOutputBlobs() override {}

// The network object for the internal layers
Expand Down
2 changes: 1 addition & 1 deletion NeoML/include/NeoML/Dnn/Layers/RecurrentLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class NEOML_API CRecurrentLayer : public CCompositeLayer {
void DeleteAllBackLinks();
void DeleteAllLayersAndBackLinks();
// CCompositeLayer does not allocate outputBlobs in CBaseLayer in runOnce, because they are not used for inference.
// CRecurentLayer used its outputBlob to concatenate recurent outputs for its internalDnn.
// CRecurrentLayer used its outputBlob to concatenate recurent outputs for its internalDnn.
void AllocateOutputBlobs() override { CBaseLayer::AllocateOutputBlobs(); }

// Retrieves or sets the recurrent layer state
Expand Down
67 changes: 33 additions & 34 deletions NeoML/src/Dnn/BaseLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,33 @@ void CBaseLayer::transferParamsBlob( CBaseLayer& dist ) const
}
}

void CBaseLayer::sequentialModeIfRecurrent()
{
if( !dnn->IsRecurrentMode() ) {
return;
}
// Switch the input and output blobs to sequential mode (to the current position in sequence)
switchBlobsToSequentialMode( inputBlobs, BCT_Input, GetDnn()->isReuseMemoryMode );
switchBlobsToSequentialMode( outputBlobs, BCT_Output, GetDnn()->isReuseMemoryMode );
switchBlobsToSequentialMode( runtimeBlobs, BCT_Runtime, false );
for( int i = 0; i < runtimeBlobs.Size(); i++ ) {
*runtimeBlobPtrs[i] = runtimeBlobs[i];
}
}

void CBaseLayer::nonSequentialModeIfRecurrent()
{
if( !dnn->IsRecurrentMode() ) {
return;
}
switchBlobsToNonSequentialMode( inputBlobs, BCT_Input, GetDnn()->isReuseMemoryMode );
switchBlobsToNonSequentialMode( outputBlobs, BCT_Output, GetDnn()->isReuseMemoryMode );
switchBlobsToNonSequentialMode( runtimeBlobs, BCT_Runtime, false );
for( int i = 0; i < runtimeBlobs.Size(); i++ ) {
*runtimeBlobPtrs[i] = runtimeBlobs[i];
}
}

void CBaseLayer::switchBlobsToSequentialMode(CObjectArray<CDnnBlob>& blobs, TBlobCacheType cacheType, bool storeParent)
{
CObjectArray<CDnnBlob>& cache = blobCache[cacheType];
Expand Down Expand Up @@ -534,28 +561,14 @@ void CBaseLayer::runOnce()
allocatedBlobs = TInputBlobs | TOutputBlobs;

// Create window blobs for the inputs and outputs
if( dnn->IsRecurrentMode() ) {
switchBlobsToSequentialMode(inputBlobs, BCT_Input, GetDnn()->isReuseMemoryMode );
switchBlobsToSequentialMode(outputBlobs, BCT_Output, GetDnn()->isReuseMemoryMode );
switchBlobsToSequentialMode(runtimeBlobs, BCT_Runtime, false);
for(int i = 0; i < runtimeBlobs.Size(); i++) {
*runtimeBlobPtrs[i] = runtimeBlobs[i];
}
}
sequentialModeIfRecurrent();

{
CRunOnceTimer timer( useTimer, MathEngine(), runOnceCount, runOnceTime );
RunOnce();
}

if( dnn->IsRecurrentMode() ) {
switchBlobsToNonSequentialMode(inputBlobs, BCT_Input, GetDnn()->isReuseMemoryMode );
switchBlobsToNonSequentialMode(outputBlobs, BCT_Output, GetDnn()->isReuseMemoryMode );
switchBlobsToNonSequentialMode(runtimeBlobs, BCT_Runtime, false);
for(int i = 0; i < runtimeBlobs.Size(); i++) {
*runtimeBlobPtrs[i] = runtimeBlobs[i];
}
}
nonSequentialModeIfRecurrent();

if( GetDnn()->isReuseMemoryMode ) {
setAllocatedBlobs( TOutputBlobs | blobsNeededForBackward );
Expand Down Expand Up @@ -604,16 +617,8 @@ void CBaseLayer::backwardRunAndLearnOnce()
return; // not enough diff blobs for the output
}
}

if( dnn->IsRecurrentMode() ) {
// Switch the input and output blobs to sequential mode (to the current position in sequence)
switchBlobsToSequentialMode(inputBlobs, BCT_Input, GetDnn()->isReuseMemoryMode);
switchBlobsToSequentialMode(outputBlobs, BCT_Output, GetDnn()->isReuseMemoryMode);
switchBlobsToSequentialMode(runtimeBlobs, BCT_Runtime, false);
for(int i = 0; i < runtimeBlobs.Size(); i++) {
*runtimeBlobPtrs[i] = runtimeBlobs[i];
}
}

sequentialModeIfRecurrent();

// Start backward run and learning
if( IsBackwardPerformed() ) {
Expand Down Expand Up @@ -674,14 +679,8 @@ void CBaseLayer::backwardRunAndLearnOnce()
for( int out = 0; out < readyOutputDiffs.Size(); ++out ) {
readyOutputDiffs[out] = 0;
}
if( dnn->IsRecurrentMode() ) {
switchBlobsToNonSequentialMode(inputBlobs, BCT_Input, GetDnn()->isReuseMemoryMode);
switchBlobsToNonSequentialMode(outputBlobs, BCT_Output, GetDnn()->isReuseMemoryMode);
switchBlobsToNonSequentialMode(runtimeBlobs, BCT_Runtime, false);
for(int i = 0; i < runtimeBlobs.Size(); i++) {
*runtimeBlobPtrs[i] = runtimeBlobs[i];
}
}

nonSequentialModeIfRecurrent();

// If layer needs its inputs or outputs for training
// then it needs them for all the steps of the recurrent part
Expand Down
61 changes: 11 additions & 50 deletions NeoML/src/Dnn/DnnBlob.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,55 +137,18 @@ CDnnBlob* CDnnBlob::CreateBlob(IMathEngine& mathEngine, TBlobType type, const CB
void CDnnBlob::initializeBlob(TBlobType type,
int batchLength, int batchWidth, int listSize, int height, int width, int depth, int channels)
{
NeoAssert(desc.GetDataType() == CT_Invalid);

int allocSize = batchLength * batchWidth * listSize * height * width * depth * channels;

switch(type) {
case CT_Float:
desc.SetDataType( CT_Float );
data = mathEngine.HeapAllocTyped<float>( allocSize );
break;
case CT_Int:
desc.SetDataType( CT_Int );
data = mathEngine.HeapAllocTyped<int>( allocSize );
break;
default:
NeoAssert( false );
}
desc.SetDimSize(BD_BatchLength, batchLength);
desc.SetDimSize(BD_BatchWidth, batchWidth);
desc.SetDimSize(BD_ListSize, listSize);
desc.SetDimSize(BD_Height, height);
desc.SetDimSize(BD_Width, width);
desc.SetDimSize(BD_Depth, depth);
desc.SetDimSize(BD_Channels, channels);
CBlobDesc pattern( { batchLength, batchWidth, listSize, height, width, depth, channels } );
initializeByPattern( type, pattern );
}

void CDnnBlob::initializeTensor(TBlobType type, std::initializer_list<int> dimensions)
{
NeoAssert(desc.GetDataType() == CT_Invalid);
NeoAssert(dimensions.size() <= CBlobDesc::MaxDimensions);

int allocSize = 1;
for( int i = 0; i < static_cast<int>(dimensions.size()); i++) {
allocSize *= dimensions.begin()[i];
}
switch(type) {
case CT_Float:
desc.SetDataType( CT_Float );
data = mathEngine.HeapAllocTyped<float>( allocSize );
break;
case CT_Int:
desc.SetDataType( CT_Int );
data = mathEngine.HeapAllocTyped<int>( allocSize );
break;
default:
NeoAssert( false );
}
for( int i = 0; i < static_cast<int>(dimensions.size()); i++) {
desc.SetDimSize(i, dimensions.begin()[i]);
CBlobDesc pattern( type );
for( int i = 0; i < static_cast<int>( dimensions.size() ); ++i ) {
pattern.SetDimSize( i, dimensions.begin()[i] );
}
initializeByPattern( type, pattern );
}

void CDnnBlob::initializeWindow(const CPtr<CDnnBlob>& _parent, int windowSize)
Expand All @@ -203,22 +166,20 @@ void CDnnBlob::initializeWindow(const CPtr<CDnnBlob>& _parent, int windowSize)
void CDnnBlob::initializeByPattern(TBlobType type, const CBlobDesc& pattern)
{
NeoAssert(desc.GetDataType() == CT_Invalid);
CBlobDesc newPattern = pattern;

const int size = pattern.BlobSize();
switch(type) {
case CT_Float:
desc = newPattern;
desc.SetDataType( type );
data = mathEngine.HeapAllocTyped<float>( newPattern.BlobSize() );
data = mathEngine.HeapAllocTyped<float>( size );
break;
case CT_Int:
desc = newPattern;
desc.SetDataType( type );
data = mathEngine.HeapAllocTyped<int>( newPattern.BlobSize() );
data = mathEngine.HeapAllocTyped<int>( size );
break;
default:
NeoAssert( false );
}
desc = pattern;
desc.SetDataType( type );
}

CDnnBlob::~CDnnBlob()
Expand Down
Loading

0 comments on commit 99d2f6e

Please sign in to comment.